repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Endle/mtg_buyer | resolve.py | 1 | 1942 | from main import Item
import logging
from bs4 import BeautifulSoup
import copy
def _deeper(i, sons=1, pos=None):
'''只是向下挖一层'''
c = [k for k in i.contents if str(k).strip()]
assert(len(c) == sons)
if(sons == 1):
return c[0]
else:
return c[pos]
def _dewrap(li):
li = _deeper(li) #<dl class="item">
li = _deeper(li, 2, 1) #<dd class="detail-info">
return li
def _extract(item:Item, e)->Item:
'''e: bs4.element.Tag'''
item = copy.copy(item)
c = [k for k in e.contents if str(k).strip()]
title = c[0]
text = title.text.strip()
item.item_name = text
a = title.a
item.item_link = "https:" + a.attrs['href']
price = c[1]
after_discount = [k for k in price.contents if str(k).strip()][0]
p = None
for i in after_discount:
if 'value' in i.attrs['class']:
p = float(i.text)
assert(p)
item.item_price = p
return item
def resolve(item:Item)->list:
logging.warn("resolve " + item.card)
soup = BeautifulSoup(item.html, 'html.parser')
elements = soup.find_all('li', class_='item-wrap')
elements = tuple(_dewrap(e) for e in elements)
choices = [_extract(item, e) for e in elements]
return choices
def best_choice(item:Item, block=[])->Item:
def legal(s):
for b in block:
if b in s:
return False
return True
r = resolve(item)
for p in r:
logging.info(p)
#闪电击 与 闪电炼击
r = [i for i in r if item.card in i.item_name]
r = [i for i in r if legal(i.item_name)]
if (len(r) == 0):
logging.warn("Find nothing for " + str(item))
return None
r.sort(key=lambda i: i.item_price)
return r[0]
if __name__ == '__main__':
i = Item()
i.html = "beef"
i.card = "文胸"
with open("/Users/lizhenbo/Downloads/mtg/文胸") as fin:
i.html = fin.read()
resolve(i)
| gpl-3.0 | -1,751,196,985,169,293,600 | 24.72973 | 70 | 0.5625 | false |
jenniew/BigDL | pyspark/bigdl/nn/layer.py | 1 | 166808 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
import six
from bigdl.util.common import JTensor
from bigdl.util.common import JavaValue
from bigdl.util.common import callBigDlFunc
from bigdl.util.common import callJavaFunc
from bigdl.util.common import get_spark_context
from bigdl.util.common import to_list
from bigdl.util.common import INTMAX, INTMIN, DOUBLEMAX
from bigdl.util.common import get_activation_by_name
from bigdl.optim.optimizer import L1Regularizer, L2Regularizer, L1L2Regularizer
from py4j.java_gateway import JavaObject
if sys.version >= '3':
long = int
unicode = str
class Node(JavaValue):
"""
Represent a node in a graph. The connections between nodes are directed.
"""
def __init__(self, jvalue, bigdl_type, *args):
self.value = jvalue if jvalue else callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
@classmethod
def of(cls, jvalue, bigdl_type="float"):
return Node(jvalue, bigdl_type)
def element(self):
return Layer.of(self.value.element())
class Layer(JavaValue):
"""
Layer is the basic component of a neural network
and it's also the base class of layers.
Layer can connect to others to construct a complex neural network.
"""
def __init__(self, jvalue, bigdl_type, *args):
if (jvalue):
assert(type(jvalue) == JavaObject)
self.value = jvalue
else:
self.value = callBigDlFunc(
bigdl_type, JavaValue.jvm_class_constructor(self), *args)
self.bigdl_type = bigdl_type
def set_running_mean(self, running_mean):
"""
:param running_mean: a ndarray
"""
callBigDlFunc(self.bigdl_type, "setRunningMean",
self.value, JTensor.from_ndarray(running_mean))
return self
def set_running_std(self, running_std):
"""
:param running_mean: a ndarray
"""
callBigDlFunc(self.bigdl_type, "setRunningStd",
self.value, JTensor.from_ndarray(running_std))
return self
def __str__(self):
"""
>>> conv2 = SpatialConvolution(6, 12, 5, 5).set_name("conv2")
creating: createSpatialConvolution
>>> print(conv2)
SpatialConvolution[conv2](6 -> 12, 5 x 5, 1, 1, 0, 0)
"""
return self.value.toString()
def __call__(self, x=None):
"""
Some other modules point to current module
:param x: upstream module nodes. x is either a Node or list of Node.
:return: node containing current module
"""
x = x if x else []
return Node.of(callBigDlFunc(self.bigdl_type,
"createNode",
self,
to_list(x)))
@classmethod
def of(cls, jvalue, bigdl_type="float"):
"""
Create a Python Layer base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Layer
"""
model = Layer(jvalue, bigdl_type)
return model
def set_name(self, name):
"""
Give this model a name. There would be a generated name
consist of class name and UUID if user doesn't set it.
"""
callJavaFunc(get_spark_context(), self.value.setName, name)
return self
def name(self):
"""
Name of this layer
"""
return callJavaFunc(get_spark_context(), self.value.getName)
def set_seed(self, seed=123):
"""
You can control the random seed which used to init weights for this model.
:param seed: random seed
:return: Model itself.
"""
callBigDlFunc(self.bigdl_type, "setModelSeed", seed)
return self
def get_dtype(self):
if "float" == self.bigdl_type:
return "float32"
else:
return "float64"
@staticmethod
def check_input(input):
"""
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: (list of JTensor, isTable)
"""
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i)
elif isinstance(i, JTensor):
return i
else:
raise Exception("Error unknown input type %s" % type(i))
if type(input) is list:
if len(input) == 0:
raise Exception('Error when checking: empty input')
return list(map(lambda i: to_jtensor(i), input)), True
else:
return [to_jtensor(input)], False
@staticmethod
def convert_output(output):
if type(output) is JTensor:
return output.to_ndarray()
elif(len(output) == 1):
return output[0].to_ndarray()
else:
return [x.to_ndarray() for x in output]
def forward(self, input):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding output of the module
:param input: ndarray or list of ndarray
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
output = callBigDlFunc(self.bigdl_type,
"modelForward",
self.value,
jinput,
input_is_table)
return self.convert_output(output)
def backward(self, input, grad_output):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Performs a back-propagation step through the module, with respect to the given input. In
general this method makes the assumption forward(input) has been called before, with the same
input. This is necessary for optimization reasons. If you do not respect this rule, backward()
will compute incorrect gradients.
:param input: ndarray or list of ndarray or JTensor or list of JTensor.
:param grad_output: ndarray or list of ndarray or JTensor or list of JTensor.
:return: ndarray or list of ndarray
"""
jinput, input_is_table = self.check_input(input)
jgrad_output, grad_output_is_table = self.check_input(grad_output)
output = callBigDlFunc(self.bigdl_type,
"modelBackward",
self.value,
jinput,
input_is_table,
jgrad_output,
grad_output_is_table)
return self.convert_output(output)
def zero_grad_parameters(self):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
If the module has parameters, this will zero the accumulation of the gradients with respect
to these parameters. Otherwise, it does nothing.
"""
callJavaFunc(get_spark_context(), self.value.zeroGradParameters)
def update_parameters(self, learning_rate):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
"""
callBigDlFunc(self.bigdl_type,
"updateParameters",
self.value,
learning_rate)
def reset(self):
"""
Initialize the model weights.
"""
callJavaFunc(get_spark_context(), self.value.reset)
return self
def parameters(self):
"""
Get the model parameters which containing: weight, bias, gradBias, gradWeight
:return: dict(layername -> dict(parametername -> ndarray))
"""
name_to_params = callBigDlFunc(self.bigdl_type,
"modelGetParameters",
self.value)
def to_ndarray(params):
return dict((param_name,
np.array(values[0], dtype=self.get_dtype()).reshape(
values[1])) for param_name, values in
params.items())
return dict((layer_name, to_ndarray(params)) for layer_name, params in
name_to_params.items())
def evaluate(self, *args):
"""
No argument passed in:
Evaluate the model to set train = false, useful when doing test/forward
:return: layer itself
Three arguments passed in:
A method to benchmark the model quality.
:param val_rdd: the input data
:param batch_size: batch size
:param val_methods: a list of validation methods. i.e: Top1Accuracy,Top5Accuracy and Loss.
:return:
"""
if len(args) == 0:
callBigDlFunc(self.bigdl_type,
"evaluate", self.value)
return self
elif len(args) == 3:
val_rdd, batch_size, val_methods = args
return callBigDlFunc(self.bigdl_type,
"modelEvaluate",
self.value,
val_rdd, batch_size, val_methods)
else:
raise Exception("Error when calling evaluate(): it takes no argument or exactly three arguments only")
def _to_jtensors(self, x):
x = to_list(x)
if isinstance(x[0], np.ndarray):
return [JTensor.from_ndarray(i) for i in x]
elif isinstance(x[0], JTensor):
return x
else:
raise Exception("Not supported type: %s" % type(x[0]))
def predict_local(self, X):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:return: a ndarray as the prediction result.
"""
jresults = callBigDlFunc(self.bigdl_type,
"predictLocal",
self.value,
self._to_jtensors(X))
return np.stack([j.to_ndarray()for j in jresults])
def predict_local_class(self, X):
"""
:param X: X can be a ndarray or list of ndarray if the model has multiple inputs.
The first dimension of X should be batch.
:return: a ndarray as the prediction result.
"""
result = callBigDlFunc(self.bigdl_type,
"predictLocalClass",
self.value,
self._to_jtensors(X))
return np.stack(result)
def predict(self, data_rdd, batch_size=-1, share_buffer=False):
"""
Model inference base on the given data.
You need to invoke collect() to trigger those action \
as the returning result is an RDD.
:param data_rdd: the data to be predict.
:param batch_size: total batchSize for all partitions. If -1, default is 4 * partitionNumber of datatset.
:param whether to share same memory for each batch predict results
:return: An RDD represent the predict result.
"""
result = callBigDlFunc(self.bigdl_type,"modelPredictRDD", self.value, data_rdd, batch_size, share_buffer)
return result.map(lambda data: data.to_ndarray())
def predict_class(self, data_rdd, batch_size=-1):
"""
module predict, return the predict label
:param data_rdd: the data to be predict.
:param batch_size: total batchSize for all partitions. If -1, default is 4 * partitionNumber of datatset
:return: An RDD represent the predict label.
"""
result = callBigDlFunc(self.bigdl_type,
"modelPredictClass", self.value, data_rdd, batch_size)
return result
def set_weights(self, weights):
"""
Set weights for this layer
:param weights: a list of numpy arrays which represent weight and bias
:return:
>>> linear = Linear(3,2)
creating: createLinear
>>> linear.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
>>> weights = linear.get_weights()
>>> weights[0].shape == (2,3)
True
>>> weights[0][0]
array([ 1., 2., 3.], dtype=float32)
>>> weights[1]
array([ 7., 8.], dtype=float32)
>>> relu = ReLU()
creating: createReLU
>>> from py4j.protocol import Py4JJavaError
>>> try:
... relu.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias
>>> relu.get_weights()
The layer does not have weight/bias
>>> add = Add(2)
creating: createAdd
>>> try:
... add.set_weights([np.array([7,8]), np.array([1,2])])
... except Py4JJavaError as err:
... print(err.java_exception)
...
java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer, number of input 1, number of output 2
>>> cAdd = CAdd([4, 1])
creating: createCAdd
>>> cAdd.set_weights(np.ones([4, 1]))
>>> (cAdd.get_weights()[0] == np.ones([4, 1])).all()
True
"""
tensors = [JTensor.from_ndarray(param, self.bigdl_type) for param in to_list(weights)]
callBigDlFunc(self.bigdl_type, "setWeights", self.value, tensors)
def get_weights(self):
"""
Get weights for this layer
:return: list of numpy arrays which represent weight and bias
"""
tensorWeights = callBigDlFunc(self.bigdl_type,
"getWeights", self.value)
if tensorWeights is not None:
return [tensor.to_ndarray() for tensor in tensorWeights]
else:
print("The layer does not have weight/bias")
return None
def is_with_weights(self):
return callBigDlFunc(self.bigdl_type,
"isWithWeights", self.value)
def save(self, path, over_write = False):
callBigDlFunc(self.bigdl_type, "modelSave", self.value, path,
over_write)
def saveModel(self, path, over_write = False):
callBigDlFunc(self.bigdl_type, "saveBigDLModule", self.value, path,
over_write)
def save_caffe(self, prototxt_path, model_path, use_v2 = True, overwrite = False):
callBigDlFunc(self.bigdl_type, "saveCaffe", self.value, prototxt_path,
model_path, use_v2, overwrite)
def save_tensorflow(self, inputs, path, byte_order="little_endian", data_format="nhwc"):
"""
Save a model to protobuf files so that it can be used in tensorflow inference.
When saving the model, placeholders will be added to the tf model as input nodes. So
you need to pass in the names and shapes of the placeholders. BigDL model doesn't have
such information. The order of the placeholder information should be same as the inputs
of the graph model.
:param inputs: placeholder information, should be an array of tuples (input_name, shape)
where 'input_name' is a string and shape is an array of integer
:param path: the path to be saved to
:param byte_order: model byte order
:param data_format: model data format, should be "nhwc" or "nchw"
"""
callBigDlFunc(self.bigdl_type, "saveTF", self.value, inputs, path, byte_order, data_format)
def setWRegularizer(self, wRegularizer):
'''
set weight regularizer
:param wRegularizer: weight regularizer
:return:
'''
self.value.wRegularizer = wRegularizer.value
def setBRegularizer(self, bRegularizer):
'''
set bias regularizer
:param wRegularizer: bias regularizer
:return:
'''
self.value.bRegularizer = bRegularizer.value
def freeze(self, names=None):
"""
freeze module, if names is not None, set an array of layers that match given names
to be freezed
:param names: an array of layer names
:return:
"""
callBigDlFunc(self.bigdl_type, "freeze", self.value, names)
return self
def unfreeze(self, names=None):
"""
unfreeze module, if names is not None, unfreeze layers that match given names
:param names: an array of layer names
:return:
"""
callBigDlFunc(self.bigdl_type, "unFreeze", self.value, names)
return self
def training(self, is_training=True):
'''
Set this layer in the training mode or in predition mode if is_training=False
'''
if is_training:
callJavaFunc(get_spark_context(), self.value.training)
else:
callJavaFunc(get_spark_context(), self.value.evaluate)
return self
def is_training(self):
'''
:return: Whether this layer is in the training mode
>>> layer = Dropout()
creating: createDropout
>>> layer = layer.evaluate()
>>> layer.is_training()
False
>>> layer = layer.training()
>>> layer.is_training()
True
'''
return callJavaFunc(get_spark_context(), self.value.isTraining)
def quantize(self):
'''
Clone self and quantize it, at last return a new quantized model.
:return: A new quantized model.
>>> fc = Linear(4, 2)
creating: createLinear
>>> fc.set_weights([np.ones((2, 4)), np.ones((2,))])
>>> input = np.ones((2, 4))
>>> fc.forward(input)
array([[ 5., 5.],
[ 5., 5.]], dtype=float32)
>>> quantized_fc = fc.quantize()
>>> quantized_fc.forward(input)
array([[ 5., 5.],
[ 5., 5.]], dtype=float32)
>>> assert("quantized.Linear" in quantized_fc.__str__())
>>> conv = SpatialConvolution(1, 2, 3, 3)
creating: createSpatialConvolution
>>> conv.set_weights([np.ones((2, 1, 3, 3)), np.zeros((2,))])
>>> input = np.ones((2, 1, 4, 4))
>>> conv.forward(input)
array([[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]],
<BLANKLINE>
<BLANKLINE>
[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]]], dtype=float32)
>>> quantized_conv = conv.quantize()
>>> quantized_conv.forward(input)
array([[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]],
<BLANKLINE>
<BLANKLINE>
[[[ 9., 9.],
[ 9., 9.]],
<BLANKLINE>
[[ 9., 9.],
[ 9., 9.]]]], dtype=float32)
>>> assert("quantized.SpatialConvolution" in quantized_conv.__str__())
>>> seq = Sequential()
creating: createSequential
>>> seq = seq.add(conv)
>>> seq = seq.add(Reshape([8, 4], False))
creating: createReshape
>>> seq = seq.add(fc)
>>> input = np.ones([1, 1, 6, 6])
>>> seq.forward(input)
array([[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.]], dtype=float32)
>>> quantized_seq = seq.quantize()
>>> quantized_seq.forward(input)
array([[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.],
[ 37., 37.]], dtype=float32)
>>> assert("quantized.Linear" in quantized_seq.__str__())
>>> assert("quantized.SpatialConvolution" in quantized_seq.__str__())
'''
quantized_model = callBigDlFunc(self.bigdl_type, "quantize", self.value)
return Layer.of(quantized_model)
class Container(Layer):
'''
[[Container]] is a sub-class of Model that declares methods defined in all containers.
A container usually contain some other modules which can be added through the "add" method
'''
def __init__(self, jvalue, bigdl_type, *args):
super(Container, self).__init__(jvalue, bigdl_type, *args)
def add(self, model):
self.value.add(model.value)
return self
@property
def layers(self):
jlayers = callBigDlFunc(self.bigdl_type, "getContainerModules" , self)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers
@property
def flattened_layers(self):
jlayers = callBigDlFunc(self.bigdl_type, "getFlattenModules", self)
layers = [Layer.of(jlayer) for jlayer in jlayers]
return layers
class Model(Container):
"""
A graph container. Each node can have multiple inputs. The output of the node should be a
tensor. The output tensor can be connected to multiple nodes. So the module in each node can
have a tensor or table input, and should have a tensor output.
The graph container can have multiple inputs and multiple outputs. If there's one input,
the input data fed to the graph module should be a tensor. If there're multiple inputs,
the input data fed to the graph module should be a table, which is actually an sequence of
tensor. The order of the input tensors should be same with the order of the input nodes.
This is also applied to the gradient from the module in the back propagation.
If there's one output, the module output is a tensor. If there're multiple outputs, the module
output is a table, which is actually an sequence of tensor. The order of the output tensors is
same with the order of the output modules. This is also applied to the gradient passed to the
module in the back propagation.
All inputs should be able to connect to outputs through some paths in the graph.
It is allowed that some successors of the inputs node are not connect to outputs.
If so, these nodes will be excluded in the computation.
We also support initializing a Graph directly from a tensorflow module. In this case, you should
pass your tensorflow nodes as inputs and outputs and also specify the byte_order parameter ("little_endian"
or "big_endian") and node_type parameter ("bigdl" or "tensorflow")
node_type parameter.
"""
def __init__(self,
inputs,
outputs,
jvalue=None,
bigdl_type="float", byte_order="little_endian", model_type="bigdl"):
if jvalue:
self.value = jvalue
self.bigdl_type = bigdl_type
elif model_type == "bigdl":
super(Model, self).__init__(None, bigdl_type,
to_list(inputs),
to_list(outputs))
else:
from bigdl.util.tf_utils import convert
model = convert(to_list(inputs), to_list(outputs), byte_order, bigdl_type)
super(Model, self).__init__(model.value, bigdl_type)
@staticmethod
def from_jvalue(jvalue, bigdl_type="float"):
"""
Create a Python Model base on the given java value
:param jvalue: Java object create by Py4j
:return: A Python Model
"""
model = Model([], [], jvalue=jvalue)
model.value = jvalue
return model
def __str__(self):
return "->".join(self.layers())
@staticmethod
def load(path, bigdl_type="float"):
"""
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadBigDL", path)
return Layer.of(jmodel)
@staticmethod
def loadModel(path, bigdl_type="float"):
"""
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadBigDLModule", path)
return Layer.of(jmodel)
@staticmethod
def load_torch(path, bigdl_type="float"):
"""
Load a pre-trained Torch model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadTorch", path)
return Layer.of(jmodel)
@staticmethod
def load_keras(def_path, weights_path=None, by_name=False):
"""
Load a pre-trained Keras model.
:param def_path: The json path containing the keras model definition.
:param weights_path: The HDF5 path containing the pre-trained keras model weights.
:return: A pre-trained model.
"""
from bigdl.keras.converter import DefinitionLoader, WeightLoader
if weights_path:
return WeightLoader.load_weights_from_json_hdf5(def_path, weights_path, by_name=by_name)
else:
return DefinitionLoader.from_json_path(def_path)
return bmodel
@staticmethod
def load_caffe(model, defPath, modelPath, match_all=True, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param model: A bigdl model definition \which equivalent to the pre-trained caffe model.
:param defPath: The path containing the caffe model definition.
:param modelPath: The path containing the pre-trained caffe model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadCaffe", model, defPath, modelPath, match_all)
return Layer.of(jmodel)
@staticmethod
def load_caffe_model(defPath, modelPath, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param defPath: The path containing the caffe model definition.
:param modelPath: The path containing the pre-trained caffe model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadCaffeModel", defPath, modelPath)
return Layer.of(jmodel)
@staticmethod
def load_tensorflow(path, inputs, outputs, byte_order = "little_endian",
bin_file = None, bigdl_type="float"):
"""
Load a pre-trained Tensorflow model.
:param path: The path containing the pre-trained model.
:param inputs: The input node of this graph
:param outputs: The output node of this graph
:param byte_order: byte_order of the file, `little_endian` or `big_endian`
:param bin_file: the optional bin file produced by bigdl dump_model util function to store the weights
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadTF", path, inputs, outputs, byte_order, bin_file)
return Model.of(jmodel)
@staticmethod
def train(output, data, label, opt_method, criterion, batch_size, end_when, session=None, bigdl_type="float"):
from bigdl.util.tf_utils import get_path
from bigdl.util.common import Sample
output_name = output.name.split(":")[0]
path = get_path(output_name, session)
sc = get_spark_context()
rdd_train_images = sc.parallelize(data)
rdd_train_labels = sc.parallelize(label)
rdd_train_sample = rdd_train_images.zip(rdd_train_labels).map(lambda input:
Sample.from_ndarray(input[0], input[1]))
jmodel = callBigDlFunc(bigdl_type, "trainTF", path, output_name, rdd_train_sample, opt_method, criterion, batch_size, end_when)
return Model.of(jmodel)
def stop_gradient(self, stop_layers, bigdl_type="float"):
"""
stop the input gradient of layers that match the given ```names```
their input gradient are not computed.
And they will not contributed to the input gradient computation of
layers that depend on them.
:param stop_layers: an array of layer names
:param bigdl_type:
:return:
"""
callBigDlFunc(bigdl_type, "setStopGradient", self.value, stop_layers)
return self
def save_graph_topology(self, log_path, bigdl_type="float"):
"""
save current model graph to a folder, which can be display in tensorboard by running
tensorboard --logdir logPath
:param log_path: path to save the model graph
:param bigdl_type:
:return:
"""
callBigDlFunc(bigdl_type, "saveGraphTopology", self.value, log_path)
return self
class Linear(Layer):
'''
The [[Linear]] module applies a linear transformation to the input data,
i.e. `y = Wx + b`. The input given in `forward(input)` must be either
a vector (1D tensor) or matrix (2D tensor). If the input is a vector, it must
have the size of `inputSize`. If it is a matrix, then each row is assumed to be
an input sample of given batch (the number of rows means the batch size and
the number of columns should be equal to the `inputSize`).
:param input_size the size the each input sample
:param output_size the size of the module output of each sample
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param init_weight: the optional initial value for the weight
:param init_bias: the optional initial value for the bias
:param init_grad_weight: the optional initial value for the grad_weight
:param init_grad_bias: the optional initial value for the grad_bias
>>> linear = Linear(100, 10, True, L1Regularizer(0.5), L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createLinear
>>> import numpy as np
>>> init_weight = np.random.randn(10, 100)
>>> init_bias = np.random.randn(10)
>>> init_grad_weight = np.zeros([10, 100])
>>> init_grad_bias = np.zeros([10])
>>> linear = Linear(100, 10, True, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createL1Regularizer
creating: createL1Regularizer
creating: createLinear
'''
def __init__(self, input_size, output_size, with_bias=True, wRegularizer=None, bRegularizer=None,
init_weight=None, init_bias=None, init_grad_weight=None, init_grad_bias=None, bigdl_type="float"):
super(Linear, self).__init__(None, bigdl_type, input_size, output_size,
with_bias, wRegularizer, bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SparseLinear(Layer):
'''
SparseLinear is the sparse version of module Linear. SparseLinear has two different from Linear:
firstly, SparseLinear's input Tensor is a SparseTensor. Secondly, SparseLinear doesn't backward
gradient to next layer in the backpropagation by default, as the gradInput of SparseLinear is
useless and very big in most cases.
But, considering model like Wide&Deep, we provide backwardStart and backwardLength to backward
part of the gradient to next layer.
:param input_size the size the each input sample
:param output_size the size of the module output of each sample
:param backwardStart backwardStart index, counting from 1
:param backwardLength backward length
:param withBias if has bias
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param init_weight: the optional initial value for the weight
:param init_bias: the optional initial value for the bias
:param init_grad_weight: the optional initial value for the grad_weight
:param init_grad_bias: the optional initial value for the grad_bias
>>> sparselinear = SparseLinear(100, 10, True, wRegularizer=L1Regularizer(0.5), bRegularizer=L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSparseLinear
>>> import numpy as np
>>> init_weight = np.random.randn(10, 100)
>>> init_bias = np.random.randn(10)
>>> init_grad_weight = np.zeros([10, 100])
>>> init_grad_bias = np.zeros([10])
>>> sparselinear = SparseLinear(100, 10, True, 1, 5, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSparseLinear
>>> np.random.seed(123)
>>> init_weight = np.random.randn(5, 1000)
>>> init_bias = np.random.randn(5)
>>> sparselinear = SparseLinear(1000, 5, init_weight=init_weight, init_bias=init_bias)
creating: createSparseLinear
>>> input = JTensor.sparse(np.array([1, 3, 5, 2, 4, 6]), np.array([0, 0, 0, 1, 1, 1, 1, 5, 300, 2, 100, 500]), np.array([2, 1000]))
>>> print(sparselinear.forward(input))
[[ 10.09569263 -10.94844246 -4.1086688 1.02527523 11.80737209]
[ 7.9651413 9.7131443 -10.22719955 0.02345783 -3.74368906]]
'''
def __init__(self, input_size, output_size, with_bias=True, backwardStart=-1, backwardLength=-1,
wRegularizer=None, bRegularizer=None, init_weight=None, init_bias=None,
init_grad_weight=None, init_grad_bias=None, bigdl_type="float"):
super(SparseLinear, self).__init__(None, bigdl_type, input_size, output_size,
with_bias, backwardStart, backwardLength,
wRegularizer, bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class DenseToSparse(Layer):
'''
Convert DenseTensor to SparseTensor.
>>> DenseToSparse = DenseToSparse()
creating: createDenseToSparse
'''
def __init__(self,
bigdl_type="float"):
super(DenseToSparse, self).__init__(None, bigdl_type)
class ReLU(Layer):
'''
Applies the rectified linear unit (ReLU) function element-wise to the input Tensor,
thus outputting a Tensor of the same dimension.
ReLU is defined as: f(x) = max(0, x)
Can optionally do its operation in-place without using extra state memory
>>> relu = ReLU()
creating: createReLU
'''
def __init__(self, ip=False, bigdl_type="float"):
super(ReLU, self).__init__(None, bigdl_type, ip)
class Tanh(Layer):
'''
Applies the Tanh function element-wise to the input Tensor, thus outputting a Tensor of the same
dimension. Tanh is defined as f(x) = (exp(x)-exp(-x))/(exp(x)+exp(-x)).
>>> tanh = Tanh()
creating: createTanh
'''
def __init__(self, bigdl_type="float"):
super(Tanh, self).__init__(None, bigdl_type)
class Sigmoid(Layer):
'''
Applies the Sigmoid function element-wise to the input Tensor,
thus outputting a Tensor of the same dimension.
>>> sigmoid = Sigmoid()
creating: createSigmoid
'''
def __init__(self,
bigdl_type="float"):
super(Sigmoid, self).__init__(None, bigdl_type)
class Echo(Layer):
'''
This module is for debug purpose, which can print activation and gradient in your model
topology
>>> echo = Echo()
creating: createEcho
'''
def __init__(self, bigdl_type="float"):
super(Echo, self).__init__(None, bigdl_type)
class LogSoftMax(Layer):
'''
Applies the LogSoftMax function to an n-dimensional input Tensor.
LogSoftmax is defined as: f_i(x) = log(1 / a exp(x_i))
where a = sum_j[exp(x_j)].
>>> logSoftMax = LogSoftMax()
creating: createLogSoftMax
'''
def __init__(self, bigdl_type="float"):
super(LogSoftMax, self).__init__(None, bigdl_type)
class Sequential(Container):
'''
Sequential provides a means to plug layers together
in a feed-forward fully connected manner.
>>> echo = Echo()
creating: createEcho
>>> s = Sequential()
creating: createSequential
>>> s = s.add(echo)
>>> s = s.add(s)
>>> s = s.add(echo)
'''
def __init__(self, bigdl_type="float"):
super(Sequential, self).__init__(None, bigdl_type)
class TemporalConvolution(Layer):
'''
Applies a 1D convolution over an input sequence composed of nInputFrame frames..
The input tensor in `forward(input)` is expected to be a 2D tensor
(`nInputFrame` x `inputFrameSize`) or a 3D tensor
(`nBatchFrame` x `nInputFrame` x `inputFrameSize`).
:param input_frame_size The input frame size expected in sequences given into `forward()`
:param output_frame_size The output frame size the convolution layer will produce.
:param kernel_w The kernel width of the convolution
:param stride_w The step of the convolution in the width dimension.
:param propagate_back Whether propagate gradient back, default is true.
:param weight_regularizer instance of [[Regularizer]]
(eg. L1 or L2 regularization), applied to the input weights matrices.
:param bias_regularizer instance of [[Regularizer]]
applied to the bias.
:param init_weight Initial weight
:param init_bias Initial bias
:param init_grad_weight Initial gradient weight
:param init_grad_bias Initial gradient bias
>>> temporalConvolution = TemporalConvolution(6, 12, 5, 5)
creating: createTemporalConvolution
>>> temporalConvolution.setWRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> temporalConvolution.setBRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
'''
def __init__(self,
input_frame_size,
output_frame_size,
kernel_w,
stride_w=1,
propagate_back=True,
weight_regularizer=None,
bias_regularizer=None,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
bigdl_type="float"):
super(TemporalConvolution, self).__init__(None, bigdl_type,
input_frame_size,
output_frame_size,
kernel_w,
stride_w,
propagate_back,
weight_regularizer,
bias_regularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class BinaryTreeLSTM(Layer):
'''
This class is an implementation of Binary TreeLSTM (Constituency Tree LSTM).
:param inputSize input units size
:param hiddenSize hidden units size
:param gateOutput whether gate output
:param withGraph whether create lstms with [[Graph]], the default value is true.
>>> treeLSTM = BinaryTreeLSTM(100, 200)
creating: createBinaryTreeLSTM
'''
def __init__(self,
input_size,
hidden_size,
gate_output=True,
with_graph=True,
bigdl_type="float"):
super(BinaryTreeLSTM, self).__init__(None,
bigdl_type,
input_size,
hidden_size,
gate_output,
with_graph)
class SpatialConvolution(Layer):
'''
Applies a 2D convolution over an input image composed of several input planes.
The input tensor in forward(input) is expected to be
a 3D tensor (nInputPlane x height x width).
:param n_input_plane The number of expected input planes in the image given into forward()
:param n_output_plane The number of output planes the convolution layer will produce.
:param kernel_w The kernel width of the convolution
:param kernel_h The kernel height of the convolution
:param stride_w The step of the convolution in the width dimension.
:param stride_h The step of the convolution in the height dimension
:param pad_w The additional zeros added per width to the input planes.
:param pad_h The additional zeros added per height to the input planes.
:param n_group Kernel group number
:param propagate_back Propagate gradient back
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param init_weight: the optional initial value for the weight
:param init_bias: the optional initial value for the bias
:param init_grad_weight: the optional initial value for the grad_weight
:param init_grad_bias: the optional initial value for the grad_bias
:param with_bias: the optional initial value for if need bias
:param data_format: a string value of "NHWC" or "NCHW" to specify the input data format of this layer. In "NHWC" format
data is stored in the order of [batch_size, height, width, channels], in "NCHW" format data is stored
in the order of [batch_size, channels, height, width].
>>> spatialConvolution = SpatialConvolution(6, 12, 5, 5)
creating: createSpatialConvolution
>>> spatialConvolution.setWRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> spatialConvolution.setBRegularizer(L1Regularizer(0.5))
creating: createL1Regularizer
>>> import numpy as np
>>> init_weight = np.random.randn(1, 12, 6, 5, 5)
>>> init_bias = np.random.randn(12)
>>> init_grad_weight = np.zeros([1, 12, 6, 5, 5])
>>> init_grad_bias = np.zeros([12])
>>> spatialConvolution = SpatialConvolution(6, 12, 5, 5, 1, 1, 0, 0, 1, True, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias, True, "NCHW")
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSpatialConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w=1,
stride_h=1,
pad_w=0,
pad_h=0,
n_group=1,
propagate_back=True,
wRegularizer=None,
bRegularizer=None,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
with_bias=True,
data_format="NCHW",
bigdl_type="float"):
super(SpatialConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w,
stride_h,
pad_w,
pad_h,
n_group,
propagate_back,
wRegularizer,
bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias),
with_bias,
data_format)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class TemporalMaxPooling(Layer):
'''
Applies 1D max-pooling operation in kW regions by step size dW steps.
Input sequence composed of nInputFrame frames.
The input tensor in forward(input) is expected to be a 2D tensor (nInputFrame x inputFrameSize)
or a 3D tensor (nBatchFrame x nInputFrame x inputFrameSize).
If the input sequence is a 2D tensor of dimension nInputFrame x inputFrameSize,
the output sequence will be nOutputFrame x inputFrameSize where
nOutputFrame = (nInputFrame - k_w) / d_w + 1
:param k_w: kernel width
:param d_w: step size in width
>>> temporalMaxPooling = TemporalMaxPooling(2, 2)
creating: createTemporalMaxPooling
'''
def __init__(self,
k_w,
d_w,
bigdl_type="float"):
super(TemporalMaxPooling, self).__init__(None, bigdl_type, k_w,
d_w)
class SpatialMaxPooling(Layer):
'''
Applies 2D max-pooling operation in kWxkH regions by step size dWxdH steps.
The number of output features is equal to the number of input planes.
If the input image is a 3D tensor nInputPlane x height x width,
the output image size will be nOutputPlane x oheight x owidth where
owidth = op((width + 2*padW - kW) / dW + 1)
oheight = op((height + 2*padH - kH) / dH + 1)
op is a rounding operator. By default, it is floor.
It can be changed by calling :ceil() or :floor() methods.
When padW and padH are both -1, we use a padding algorithm similar to the "SAME"
padding of tensorflow. That is
outHeight = Math.ceil(inHeight.toFloat/strideH.toFloat)
outWidth = Math.ceil(inWidth.toFloat/strideW.toFloat)
padAlongHeight = Math.max(0, (outHeight - 1) * strideH + kernelH - inHeight)
padAlongWidth = Math.max(0, (outWidth - 1) * strideW + kernelW - inWidth)
padTop = padAlongHeight / 2
padLeft = padAlongWidth / 2
:param kW: kernel width
:param kH: kernel height
:param dW: step size in width
:param dH: step size in height
:param padW: padding in width
:param padH: padding in height
:param format: "NCHW" or "NHWC", indicating the input data format
>>> spatialMaxPooling = SpatialMaxPooling(2, 2, 2, 2)
creating: createSpatialMaxPooling
>>> spatialMaxPooling = SpatialMaxPooling(2, 2, 2, 2, -1, -1, True, "NHWC")
creating: createSpatialMaxPooling
'''
# to_ceil: call floor() when False; call ceil() when True
def __init__(self, kw,
kh,
dw,
dh,
pad_w=0,
pad_h=0,
to_ceil=False,
format="NCHW",
bigdl_type="float"):
super(SpatialMaxPooling, self).__init__(None, bigdl_type, kw,
kh,
dw,
dh,
pad_w,
pad_h,
to_ceil,
format)
class Select(Layer):
'''
A Simple layer selecting an index of the input tensor in the given dimension
:param dimension: the dimension to select
:param index: the index of the dimension to be selected
>>> select = Select(1, 1)
creating: createSelect
'''
def __init__(self, dim, index, bigdl_type="float"):
super(Select, self).__init__(None, bigdl_type, dim, index)
class Recurrent(Container):
'''
Recurrent module is a container of rnn cells
Different types of rnn cells can be added using add() function
>>> recurrent = Recurrent()
creating: createRecurrent
'''
def __init__(self, bigdl_type="float"):
super(Recurrent, self).__init__(None, bigdl_type)
def get_hidden_state(self):
"""
get hidden state and cell at last time step.
:return: list of hidden state and cell
"""
state = callBigDlFunc(self.bigdl_type, "getHiddenState", self.value)
for idx, tensor in enumerate(state):
state[idx] = tensor.to_ndarray()
return state
def set_hidden_state(self, states):
"""
set hidden state and cell at first time step.
"""
jstate, state_is_table = self.check_input(states)
callBigDlFunc(self.bigdl_type, "setHiddenState", self.value, jstate, state_is_table)
class RecurrentDecoder(Recurrent):
'''
RecurrentDecoder module is a container of rnn cells which used to make
a prediction of the next timestep based on the prediction we made from
the previous timestep. Input for RecurrentDecoder is dynamically composed
during training. input at t(i) is output at t(i-1), input at t(0) is
user input, and user input has to be batch x stepShape(shape of the input
at a single time step).
Different types of rnn cells can be added using add() function.
>>> recurrent_decoder = RecurrentDecoder(output_length = 5)
creating: createRecurrentDecoder
'''
def __init__(self, output_length, bigdl_type="float"):
super(Recurrent, self).__init__(None, bigdl_type, output_length)
class LSTM(Layer):
'''
| Long Short Term Memory architecture.
| Ref.
| A.: http://arxiv.org/pdf/1303.5778v1 (blueprint for this module)
| B. http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf
| C. http://arxiv.org/pdf/1503.04069v1.pdf
| D. https://github.com/wojzaremba/lstm
| E. https://github.com/Element-Research/rnn/blob/master/FastLSTM.lua
:param inputSize: the size of each input vector
:param hiddenSize: Hidden unit size in the LSTM
:param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf)
:param activation: activation function, by default to be Tanh if not specified.
It can also be the name of an existing activation as a string.
:param inner_activation: activation function for the inner cells, by default to be Sigmoid if not specified.
It can also be the name of an existing activation as a string.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> lstm = LSTM(4, 3, 0.5, 'tanh', Sigmoid(), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createSigmoid
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createTanh
creating: createLSTM
'''
def __init__(self, input_size, hidden_size, p=0.0, activation=None, inner_activation=None,
wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"):
if not activation:
activation = Tanh()
if not inner_activation:
inner_activation = Sigmoid()
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
if isinstance(inner_activation, six.string_types):
inner_activation = get_activation_by_name(inner_activation)
super(LSTM, self).__init__(None, bigdl_type, input_size, hidden_size, p,
activation, inner_activation, wRegularizer, uRegularizer, bRegularizer)
class LSTMPeephole(Layer):
'''
| Long Short Term Memory architecture with peephole.
| Ref. A.: http://arxiv.org/pdf/1303.5778v1 (blueprint for this module)
| B. http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf
| C. http://arxiv.org/pdf/1503.04069v1.pdf
| D. https://github.com/wojzaremba/lstm
| E. https://github.com/Element-Research/rnn/blob/master/LSTM.lua
:param input_size: the size of each input vector
:param hidden_size: Hidden unit size in the LSTM
:param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf)
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> lstm = LSTMPeephole(4, 3, 0.5, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createLSTMPeephole
'''
def __init__(self, input_size=4, hidden_size=3, p=0.0, wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"):
super(LSTMPeephole, self).__init__(None, bigdl_type, input_size, hidden_size, p, wRegularizer, uRegularizer, bRegularizer)
class GRU(Layer):
'''
Gated Recurrent Units architecture.
The first input in sequence uses zero value for cell and hidden state
| Ref.
| http://www.wildml.com/2015/10/recurrent-neural-network-tutorial-part-4-implementing-a-grulstm-rnn-with-python-and-theano/
| https://github.com/Element-Research/rnn/blob/master/GRU.lua
:param input_size: the size of each input vector
:param hidden_size: Hidden unit size in GRU
:param p: is used for [[Dropout]] probability. For more details aboutRNN dropouts, please refer to[RnnDrop: A Novel Dropout for RNNs in ASR](http://www.stat.berkeley.edu/~tsmoon/files/Conference/asru2015.pdf)[A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](https://arxiv.org/pdf/1512.05287.pdf)
:param activation: activation function, by default to be Tanh if not specified.
It can also be the name of an existing activation as a string.
:param inner_activation: activation function for the inner cells, by default to be Sigmoid if not specified.
It can also be the name of an existing activation as a string.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> gru = GRU(4, 3, 0.5, Tanh(), Sigmoid(), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createTanh
creating: createSigmoid
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createGRU
'''
def __init__(self, input_size, hidden_size, p=0.0, activation=None, inner_activation=None,
wRegularizer=None, uRegularizer=None, bRegularizer=None, bigdl_type="float"):
if not activation:
activation = Tanh()
if not inner_activation:
inner_activation = Sigmoid()
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
if isinstance(inner_activation, six.string_types):
inner_activation = get_activation_by_name(inner_activation)
super(GRU, self).__init__(None, bigdl_type, input_size, hidden_size, p, activation, inner_activation,
wRegularizer, uRegularizer, bRegularizer)
class RnnCell(Layer):
'''
It is a simple RNN. User can pass an activation function to the RNN.
:param input_size: the size of each input vector
:param hidden_size: Hidden unit size in simple RNN
:param activation: activation function. It can also be the name of an existing activation as a string.
:param isInputWithBias: boolean
:param isHiddenWithBias: boolean
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices.
:param bRegularizer: instance of [[Regularizer]](../regularizers.md),applied to the bias.
>>> rnn = RnnCell(4, 3, Tanh(), True, True, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createTanh
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createRnnCell
'''
def __init__(self,
input_size,
hidden_size,
activation,
isInputWithBias=True,
isHiddenWithBias=True,
wRegularizer=None,
uRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
super(RnnCell, self).__init__(None, bigdl_type, input_size, hidden_size, activation, isInputWithBias, isHiddenWithBias, wRegularizer, uRegularizer, bRegularizer)
class TimeDistributed(Layer):
'''
This layer is intended to apply contained layer to each temporal time slice
of input tensor.
For instance, The TimeDistributed Layer can feed each time slice of input tensor
to the Linear layer.
The input data format is [Batch, Time, Other dims]. For the contained layer, it must not change
the Other dims length.
>>> td = TimeDistributed(Linear(2, 3))
creating: createLinear
creating: createTimeDistributed
'''
def __init__(self, model, bigdl_type="float"):
super(TimeDistributed, self).__init__(None, bigdl_type, model)
class Concat(Container):
'''
Concat concatenates the output of one layer of "parallel"
modules along the provided {@code dimension}: they take the
same inputs, and their output is concatenated.
```
+-----------+
+----> module1 -----+
| | | |
input -----+----> module2 -----+----> output
| | | |
+----> module3 -----+
+-----------+
```
:param dimension: dimension
>>> concat = Concat(2)
creating: createConcat
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(Concat, self).__init__(None, bigdl_type,
dimension)
class SpatialAveragePooling(Layer):
'''
Applies 2D average-pooling operation in kWxkH regions by step size dWxdH steps.
The number of output features is equal to the number of input planes.
When padW and padH are both -1, we use a padding algorithm similar to the "SAME"
padding of tensorflow. That is
outHeight = Math.ceil(inHeight.toFloat/strideH.toFloat)
outWidth = Math.ceil(inWidth.toFloat/strideW.toFloat)
padAlongHeight = Math.max(0, (outHeight - 1) * strideH + kernelH - inHeight)
padAlongWidth = Math.max(0, (outWidth - 1) * strideW + kernelW - inWidth)
padTop = padAlongHeight / 2
padLeft = padAlongWidth / 2
:param kW: kernel width
:param kH: kernel height
:param dW: step width
:param dH: step height
:param padW: padding width
:param padH: padding height
:param global_pooling: If globalPooling then it will pool over the size of the input by doing
kH = input->height and kW = input->width
:param ceilMode: whether the output size is to be ceiled or floored
:param countIncludePad: whether to include padding when dividing thenumber of elements in pooling region
:param divide: whether to do the averaging
:param format: "NCHW" or "NHWC", indicating the input data format
>>> spatialAveragePooling = SpatialAveragePooling(7,7)
creating: createSpatialAveragePooling
>>> spatialAveragePooling = SpatialAveragePooling(2, 2, 2, 2, -1, -1, True, format="NHWC")
creating: createSpatialAveragePooling
'''
def __init__(self,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
global_pooling=False,
ceil_mode=False,
count_include_pad=True,
divide=True,
format="NCHW",
bigdl_type="float"):
super(SpatialAveragePooling, self).__init__(None, bigdl_type,
kw,
kh,
dw,
dh,
pad_w,
pad_h,
global_pooling,
ceil_mode,
count_include_pad,
divide,
format)
def set_weights(self, weights):
super(SpatialAveragePooling, self).set_weights(weights)
class SpatialBatchNormalization(Layer):
'''
This file implements Batch Normalization as described in the paper:
"Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift"
by Sergey Ioffe, Christian Szegedy
This implementation is useful for inputs coming from convolution layers.
For non-convolutional layers, see [[BatchNormalization]]
The operation implemented is:
```
( x - mean(x) )
y = -------------------- * gamma + beta
standard-deviation(x)
```
where gamma and beta are learnable parameters.
The learning of gamma and beta is optional.
>>> spatialBatchNormalization = SpatialBatchNormalization(1)
creating: createSpatialBatchNormalization
>>> import numpy as np
>>> init_weight = np.array([1.0])
>>> init_grad_weight = np.array([0.0])
>>> init_bias = np.array([0.0])
>>> init_grad_bias = np.array([0.0])
>>> spatialBatchNormalization = SpatialBatchNormalization(1, 1e-5, 0.1, True, init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createSpatialBatchNormalization
'''
def __init__(self,
n_output,
eps=1e-5,
momentum=0.1,
affine=True,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
bigdl_type="float"):
super(SpatialBatchNormalization, self).__init__(None, bigdl_type,
n_output,
eps,
momentum,
affine,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SpatialCrossMapLRN(Layer):
'''
Applies Spatial Local Response Normalization between different feature maps.
The operation implemented is:
```
x_f
y_f = -------------------------------------------------
(k+(alpha/size)* sum_{l=l1 to l2} (x_l^2^))^beta^
```
where x_f is the input at spatial locations h,w (not shown for simplicity) and feature map f,
l1 corresponds to max(0,f-ceil(size/2)) and l2 to min(F, f-ceil(size/2) + size).
Here, F is the number of feature maps.
:param size: the number of channels to sum over
:param alpha: the scaling parameter
:param beta: the exponent
:param k: a constant
>>> spatialCrossMapLRN = SpatialCrossMapLRN()
creating: createSpatialCrossMapLRN
'''
def __init__(self,
size=5,
alpha=1.0,
beta=0.75,
k=1.0,
bigdl_type="float"):
super(SpatialCrossMapLRN, self).__init__(None, bigdl_type,
size,
alpha,
beta,
k)
class Dropout(Layer):
'''
Dropout masks(set to zero) parts of input using a bernoulli distribution.
Each input element has a probability initP of being dropped. If scale is
set, the outputs are scaled by a factor of 1/(1-initP) during training.
During evaluating, output is the same as input.
:param initP: probability to be dropped
:param inplace: inplace model
:param scale: if scale by a factor of 1/(1-initP)
>>> dropout = Dropout(0.4)
creating: createDropout
'''
def __init__(self,
init_p=0.5,
inplace=False,
scale=True,
bigdl_type="float"):
super(Dropout, self).__init__(None, bigdl_type,
init_p,
inplace,
scale)
class GaussianDropout(Layer):
'''
Apply multiplicative 1-centered Gaussian noise.
The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate)).
As it is a regularization layer, it is only active at training time.
:param rate: drop probability (as with `Dropout`).
>>> GaussianDropout = GaussianDropout(0.5)
creating: createGaussianDropout
'''
def __init__(self,
rate,
bigdl_type="float"):
super(GaussianDropout, self).__init__(None, bigdl_type,
rate)
class GaussianNoise(Layer):
'''
Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs.
As it is a regularization layer, it is only active at training time.
:param stdev: standard deviation of the noise distribution
>>> GaussianNoise = GaussianNoise(0.5)
creating: createGaussianNoise
'''
def __init__(self,
stddev,
bigdl_type="float"):
super(GaussianNoise, self).__init__(None, bigdl_type,
stddev)
class View(Layer):
'''
This module creates a new view of the input tensor using the sizes passed to the constructor.
The method setNumInputDims() allows to specify the expected number of dimensions of the
inputs of the modules. This makes it possible to use minibatch inputs when using a size -1
for one of the dimensions.
:param size: sizes use for creates a new view
>>> view = View([1024,2])
creating: createView
'''
def __init__(self,
sizes,
num_input_dims=0,
bigdl_type="float"):
super(View, self).__init__(None, bigdl_type,
sizes,
num_input_dims)
class Abs(Layer):
'''
an element-wise abs operation
>>> abs = Abs()
creating: createAbs
'''
def __init__(self,
bigdl_type="float"):
super(Abs, self).__init__(None, bigdl_type)
class Add(Layer):
'''
adds a bias term to input data ;
:param input_size: size of input data
>>> add = Add(1)
creating: createAdd
'''
def __init__(self,
input_size,
bigdl_type="float"):
super(Add, self).__init__(None, bigdl_type,
input_size)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class AddConstant(Layer):
'''
adding a constant
:param constant_scalar: constant value
:param inplace: Can optionally do its operation in-place without using extra state memory
>>> addConstant = AddConstant(1e-5, True)
creating: createAddConstant
'''
def __init__(self,
constant_scalar,
inplace=False,
bigdl_type="float"):
super(AddConstant, self).__init__(None, bigdl_type,
constant_scalar,
inplace)
class BatchNormalization(Layer):
'''
This layer implements Batch Normalization as described in the paper:
"Batch Normalization: Accelerating Deep Network Training by Reducing Internal
Covariate Shift"
by Sergey Ioffe, Christian Szegedy https://arxiv.org/abs/1502.03167
This implementation is useful for inputs NOT coming from convolution layers. For convolution
layers, use nn.SpatialBatchNormalization.
The operation implemented is:
```
( x - mean(x) )
y = -------------------- * gamma + beta
standard-deviation(x)
```
where gamma and beta are learnable parameters.The learning of gamma and beta is optional.
:param n_output: output feature map number
:param eps: avoid divide zero
:param momentum: momentum for weight update
:param affine: affine operation on output or not
>>> batchNormalization = BatchNormalization(1, 1e-5, 1e-5, True)
creating: createBatchNormalization
>>> import numpy as np
>>> init_weight = np.random.randn(2)
>>> init_grad_weight = np.zeros([2])
>>> init_bias = np.zeros([2])
>>> init_grad_bias = np.zeros([2])
>>> batchNormalization = BatchNormalization(2, 1e-5, 1e-5, True, init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createBatchNormalization
'''
def __init__(self,
n_output,
eps=1e-5,
momentum=0.1,
affine=True,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
bigdl_type="float"):
super(BatchNormalization, self).__init__(None, bigdl_type,
n_output,
eps,
momentum,
affine,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias))
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class BifurcateSplitTable(Layer):
'''
Creates a module that takes a Tensor as input and
outputs two tables, splitting the Tensor along
the specified dimension `dimension`.
The input to this layer is expected to be a tensor, or a batch of tensors;
:param dimension to be split along this dimension
:param T Numeric type. Only support float/double now
>>> bifurcateSplitTable = BifurcateSplitTable(1)
creating: createBifurcateSplitTable
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(BifurcateSplitTable, self).__init__(None, bigdl_type,
dimension)
class Bilinear(Layer):
'''
a bilinear transformation with sparse inputs,
The input tensor given in forward(input) is a table containing both inputs x_1 and x_2,
which are tensors of size N x inputDimension1 and N x inputDimension2, respectively.
:param input_size1 input dimension of x_1
:param input_size2 input dimension of x_2
:param output_size output dimension
:param bias_res whether use bias
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> bilinear = Bilinear(1, 1, 1, True, L1Regularizer(0.5))
creating: createL1Regularizer
creating: createBilinear
'''
def __init__(self,
input_size1,
input_size2,
output_size,
bias_res=True,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(Bilinear, self).__init__(None, bigdl_type,
input_size1,
input_size2,
output_size,
bias_res,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class Bottle(Container):
'''
Bottle allows varying dimensionality input to be forwarded through any module
that accepts input of nInputDim dimensions, and generates output of nOutputDim dimensions.
:param module: transform module
:param n_input_dim: nInputDim dimensions of module
:param n_output_dim1: output of nOutputDim dimensions
>>> bottle = Bottle(Linear(100,10), 1, 1)
creating: createLinear
creating: createBottle
'''
def __init__(self,
module,
n_input_dim=2,
n_output_dim1=INTMAX,
bigdl_type="float"):
super(Bottle, self).__init__(None, bigdl_type,
module,
n_input_dim,
n_output_dim1)
class CAdd(Layer):
'''
This layer has a bias tensor with given size. The bias will be added element wise to the input
tensor. If the element number of the bias tensor match the input tensor, a simply element wise
will be done. Or the bias will be expanded to the same size of the input. The expand means
repeat on unmatched singleton dimension(if some unmatched dimension isn't singleton dimension,
it will report an error). If the input is a batch, a singleton dimension will be add to the
first dimension before the expand.
:param size: the size of the bias
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> cAdd = CAdd([1,2])
creating: createCAdd
'''
def __init__(self,
size, bRegularizer=None,
bigdl_type="float"):
super(CAdd, self).__init__(None, bigdl_type,
size, bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class CAddTable(Layer):
'''
Merge the input tensors in the input table by element wise adding them together. The input
table is actually an array of tensor with same size.
:param inplace: reuse the input memory
>>> cAddTable = CAddTable(True)
creating: createCAddTable
'''
def __init__(self,
inplace=False,
bigdl_type="float"):
super(CAddTable, self).__init__(None, bigdl_type,
inplace)
class CAveTable(Layer):
'''
Merge the input tensors in the input table by element wise taking the average. The input
table is actually an array of tensor with same size.
:param inplace: reuse the input memory
>>> cAveTable = CAveTable(True)
creating: createCAveTable
'''
def __init__(self,
inplace=False,
bigdl_type="float"):
super(CAveTable, self).__init__(None, bigdl_type,
inplace)
class CDivTable(Layer):
'''
Takes a table with two Tensor and returns the component-wise division between them.
>>> cDivTable = CDivTable()
creating: createCDivTable
'''
def __init__(self,
bigdl_type="float"):
super(CDivTable, self).__init__(None, bigdl_type)
class CMaxTable(Layer):
'''
Takes a table of Tensors and outputs the max of all of them.
>>> cMaxTable = CMaxTable()
creating: createCMaxTable
'''
def __init__(self,
bigdl_type="float"):
super(CMaxTable, self).__init__(None, bigdl_type)
class CMinTable(Layer):
'''
Takes a table of Tensors and outputs the min of all of them.
>>> cMinTable = CMinTable()
creating: createCMinTable
'''
def __init__(self,
bigdl_type="float"):
super(CMinTable, self).__init__(None, bigdl_type)
class CMul(Layer):
'''
Applies a component-wise multiplication to the incoming data
:param size: size of the data
>>> cMul = CMul([1,2])
creating: createCMul
'''
def __init__(self,
size,
wRegularizer=None,
bigdl_type="float"):
super(CMul, self).__init__(None, bigdl_type,
size, wRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class CMulTable(Layer):
'''
Takes a table of Tensors and outputs the multiplication of all of them.
>>> cMulTable = CMulTable()
creating: createCMulTable
'''
def __init__(self,
bigdl_type="float"):
super(CMulTable, self).__init__(None, bigdl_type)
class CSubTable(Layer):
'''
Takes a table with two Tensor and returns the component-wise subtraction between them.
>>> cSubTable = CSubTable()
creating: createCSubTable
'''
def __init__(self,
bigdl_type="float"):
super(CSubTable, self).__init__(None, bigdl_type)
class Clamp(Layer):
'''
Clamps all elements into the range [min_value, max_value].
Output is identical to input in the range,
otherwise elements less than min_value (or greater than max_value)
are saturated to min_value (or max_value).
:param min:
:param max:
>>> clamp = Clamp(1, 3)
creating: createClamp
'''
def __init__(self,
min,
max,
bigdl_type="float"):
super(Clamp, self).__init__(None, bigdl_type,
min,
max)
class Contiguous(Layer):
'''
used to make input, grad_output both contiguous
>>> contiguous = Contiguous()
creating: createContiguous
'''
def __init__(self,
bigdl_type="float"):
super(Contiguous, self).__init__(None, bigdl_type)
class Cosine(Layer):
'''
Cosine calculates the cosine similarity of the input to k mean centers. The input given in
forward(input) must be either a vector (1D tensor) or matrix (2D tensor). If the input is a
vector, it must have the size of inputSize. If it is a matrix, then each row is assumed to be
an input sample of given batch (the number of rows means the batch size and the number of
columns should be equal to the inputSize).
:param input_size: the size of each input sample
:param output_size: the size of the module output of each sample
>>> cosine = Cosine(2,3)
creating: createCosine
'''
def __init__(self,
input_size,
output_size,
bigdl_type="float"):
super(Cosine, self).__init__(None, bigdl_type,
input_size,
output_size)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class CosineDistance(Layer):
'''
Outputs the cosine distance between inputs
>>> cosineDistance = CosineDistance()
creating: createCosineDistance
'''
def __init__(self,
bigdl_type="float"):
super(CosineDistance, self).__init__(None, bigdl_type)
class Input(Node):
'''
Input layer do nothing to the input tensors, just passing them through. It is used as input to
the Graph container (add a link) when the first layer of the graph container accepts multiple
tensors as inputs.
Each input node of the graph container should accept one tensor as input. If you want a module
accepting multiple tensors as input, you should add some Input module before it and connect
the outputs of the Input nodes to it.
Please note that the return is not a layer but a Node containing input layer.
>>> input = Input()
creating: createInput
'''
def __init__(self,
bigdl_type="float"):
super(Input, self).__init__(None, bigdl_type)
class DotProduct(Layer):
'''
This is a simple table layer which takes a table of two tensors as input
and calculate the dot product between them as outputs
>>> dotProduct = DotProduct()
creating: createDotProduct
'''
def __init__(self,
bigdl_type="float"):
super(DotProduct, self).__init__(None, bigdl_type)
class ELU(Layer):
'''
D-A Clevert, Thomas Unterthiner, Sepp Hochreiter
Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
[http://arxiv.org/pdf/1511.07289.pdf]
>>> eLU = ELU(1e-5, True)
creating: createELU
'''
def __init__(self,
alpha=1.0,
inplace=False,
bigdl_type="float"):
super(ELU, self).__init__(None, bigdl_type,
alpha,
inplace)
class Euclidean(Layer):
'''
Outputs the Euclidean distance of the input to outputSize centers
:param inputSize: inputSize
:param outputSize: outputSize
:param T: Numeric type. Only support float/double now
>>> euclidean = Euclidean(1, 1, True)
creating: createEuclidean
'''
def __init__(self,
input_size,
output_size,
fast_backward=True,
bigdl_type="float"):
super(Euclidean, self).__init__(None, bigdl_type,
input_size,
output_size,
fast_backward)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class Exp(Layer):
'''
Applies element-wise exp to input tensor.
>>> exp = Exp()
creating: createExp
'''
def __init__(self,
bigdl_type="float"):
super(Exp, self).__init__(None, bigdl_type)
class FlattenTable(Layer):
'''
This is a table layer which takes an arbitrarily deep table of Tensors
(potentially nested) as input and a table of Tensors without any nested
table will be produced
>>> flattenTable = FlattenTable()
creating: createFlattenTable
'''
def __init__(self,
bigdl_type="float"):
super(FlattenTable, self).__init__(None, bigdl_type)
class GradientReversal(Layer):
'''
It is a simple module preserves the input, but takes the
gradient from the subsequent layer, multiplies it by -lambda
and passes it to the preceding layer. This can be used to maximise
an objective function whilst using gradient descent, as described in
["Domain-Adversarial Training of Neural Networks"
(http://arxiv.org/abs/1505.07818)]
:param lambda: hyper-parameter lambda can be set dynamically during training
>>> gradientReversal = GradientReversal(1e-5)
creating: createGradientReversal
>>> gradientReversal = GradientReversal()
creating: createGradientReversal
'''
def __init__(self,
the_lambda=1.0,
bigdl_type="float"):
super(GradientReversal, self).__init__(None, bigdl_type,
the_lambda)
class HardShrink(Layer):
'''
This is a transfer layer which applies the hard shrinkage function
element-wise to the input Tensor. The parameter lambda is set to 0.5
by default
```
x, if x > lambda
f(x) = x, if x < -lambda
0, otherwise
```
:param the_lambda: a threshold value whose default value is 0.5
>>> hardShrink = HardShrink(1e-5)
creating: createHardShrink
'''
def __init__(self,
the_lambda=0.5,
bigdl_type="float"):
super(HardShrink, self).__init__(None, bigdl_type,
the_lambda)
class HardTanh(Layer):
'''
Applies HardTanh to each element of input, HardTanh is defined:
```
| maxValue, if x > maxValue
f(x) = | minValue, if x < minValue
| x, otherwise
```
:param min_value: minValue in f(x), default is -1.
:param max_value: maxValue in f(x), default is 1.
:param inplace: whether enable inplace model.
>>> hardTanh = HardTanh(1e-5, 1e5, True)
creating: createHardTanh
>>> hardTanh = HardTanh()
creating: createHardTanh
'''
def __init__(self,
min_value=-1.0,
max_value=1.0,
inplace=False,
bigdl_type="float"):
super(HardTanh, self).__init__(None, bigdl_type,
min_value,
max_value,
inplace)
class Index(Layer):
'''
Applies the Tensor index operation along the given dimension.
:param dimension: the dimension to be indexed
>>> index = Index(1)
creating: createIndex
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(Index, self).__init__(None, bigdl_type,
dimension)
class InferReshape(Layer):
'''
Reshape the input tensor with automatic size inference support.
Positive numbers in the `size` argument are used to reshape the input to the
corresponding dimension size.
There are also two special values allowed in `size`:
a. `0` means keep the corresponding dimension size of the input unchanged.
i.e., if the 1st dimension size of the input is 2,
the 1st dimension size of output will be set as 2 as well.
b. `-1` means infer this dimension size from other dimensions.
This dimension size is calculated by keeping the amount of output elements
consistent with the input.
Only one `-1` is allowable in `size`.
For example,
Input tensor with size: (4, 5, 6, 7)
-> InferReshape(Array(4, 0, 3, -1))
Output tensor with size: (4, 5, 3, 14)
The 1st and 3rd dim are set to given sizes, keep the 2nd dim unchanged,
and inferred the last dim as 14.
:param size: the target tensor size
:param batch_mode: whether in batch mode
>>> inferReshape = InferReshape([4, 0, 3, -1], False)
creating: createInferReshape
'''
def __init__(self,
size,
batch_mode=False,
bigdl_type="float"):
super(InferReshape, self).__init__(None, bigdl_type,
size,
batch_mode)
class JoinTable(Layer):
'''
It is a table module which takes a table of Tensors as input and
outputs a Tensor by joining them together along the dimension `dimension`.
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user need to specify the number of dimensions of each sample tensor in the
batch using `nInputDims`.
:param dimension: to be join in this dimension
:param nInputDims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
>>> joinTable = JoinTable(1, 1)
creating: createJoinTable
'''
def __init__(self,
dimension,
n_input_dims,
bigdl_type="float"):
super(JoinTable, self).__init__(None, bigdl_type,
dimension,
n_input_dims)
class SparseJoinTable(Layer):
'''
:: Experimental ::
Sparse version of JoinTable. Backward just pass the origin gradOutput back to
the next layers without split. So this layer may just works in Wide&Deep like models.
:param dimension: to be join in this dimension
>>> joinTable = SparseJoinTable(1)
creating: createSparseJoinTable
'''
def __init__(self,
dimension,
bigdl_type="float"):
super(SparseJoinTable, self).__init__(None, bigdl_type,
dimension)
class L1Penalty(Layer):
'''
adds an L1 penalty to an input (for sparsity).
L1Penalty is an inline module that in its forward propagation copies the input Tensor
directly to the output, and computes an L1 loss of the latent state (input) and stores
it in the module's loss field. During backward propagation: gradInput = gradOutput + gradLoss.
:param l1weight:
:param sizeAverage:
:param provideOutput:
>>> l1Penalty = L1Penalty(1, True, True)
creating: createL1Penalty
'''
def __init__(self,
l1weight,
size_average=False,
provide_output=True,
bigdl_type="float"):
super(L1Penalty, self).__init__(None, bigdl_type,
l1weight,
size_average,
provide_output)
class LeakyReLU(Layer):
'''
It is a transfer module that applies LeakyReLU, which parameter negval sets the slope of the
negative part: LeakyReLU is defined as: f(x) = max(0, x) + negval * min(0, x)
:param negval: sets the slope of the negative partl
:param inplace: if it is true, doing the operation in-place without using extra state memory
>>> leakyReLU = LeakyReLU(1e-5, True)
creating: createLeakyReLU
'''
def __init__(self,
negval=0.01,
inplace=False,
bigdl_type="float"):
super(LeakyReLU, self).__init__(None, bigdl_type,
negval,
inplace)
class Log(Layer):
'''
Applies the log function element-wise to the input Tensor,
thus outputting a Tensor of the same dimension.
>>> log = Log()
creating: createLog
'''
def __init__(self,
bigdl_type="float"):
super(Log, self).__init__(None, bigdl_type)
class LogSigmoid(Layer):
'''
This class is a transform layer corresponding to the sigmoid function:
f(x) = Log(1 / (1 + e ^^ (-x)))
>>> logSigmoid = LogSigmoid()
creating: createLogSigmoid
'''
def __init__(self,
bigdl_type="float"):
super(LogSigmoid, self).__init__(None, bigdl_type)
class LookupTable(Layer):
'''
a convolution of width 1, commonly used for word embeddings
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
>>> lookupTable = LookupTable(1, 1, 1e-5, 1e-5, 1e-5, True, L1Regularizer(0.5))
creating: createL1Regularizer
creating: createLookupTable
'''
def __init__(self,
n_index,
n_output,
padding_value=0.0,
max_norm=DOUBLEMAX,
norm_type=2.0,
should_scale_grad_by_freq=False,
wRegularizer=None,
bigdl_type="float"):
super(LookupTable, self).__init__(None, bigdl_type,
n_index,
n_output,
padding_value,
max_norm,
norm_type,
should_scale_grad_by_freq,
wRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class MM(Layer):
'''
Module to perform matrix multiplication on two mini-batch inputs, producing a mini-batch.
:param trans_a: specifying whether or not transpose the first input matrix
:param trans_b: specifying whether or not transpose the second input matrix
>>> mM = MM(True, True)
creating: createMM
'''
def __init__(self,
trans_a=False,
trans_b=False,
bigdl_type="float"):
super(MM, self).__init__(None, bigdl_type,
trans_a,
trans_b)
class MV(Layer):
'''
It is a module to perform matrix vector multiplication on two mini-batch inputs,
producing a mini-batch.
:param trans: whether make matrix transpose before multiplication
>>> mV = MV(True)
creating: createMV
'''
def __init__(self,
trans=False,
bigdl_type="float"):
super(MV, self).__init__(None, bigdl_type,
trans)
class MapTable(Container):
'''
This class is a container for a single module which will be applied
to all input elements. The member module is cloned as necessary to
process all input elements.
>>> mapTable = MapTable(Linear(100,10))
creating: createLinear
creating: createMapTable
'''
def __init__(self,
module=None,
bigdl_type="float"):
super(MapTable, self).__init__(None, bigdl_type,
module)
class MaskedSelect(Layer):
'''
Performs a torch.MaskedSelect on a Tensor. The mask is supplied as a tabular argument with
the input on the forward and backward passes.
>>> maskedSelect = MaskedSelect()
creating: createMaskedSelect
'''
def __init__(self,
bigdl_type="float"):
super(MaskedSelect, self).__init__(None, bigdl_type)
class Max(Layer):
'''
Applies a max operation over dimension `dim`
:param dim: max along this dimension
:param num_input_dims: Optional. If in a batch model, set to the inputDims.
>>> max = Max(1)
creating: createMax
'''
def __init__(self,
dim,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Max, self).__init__(None, bigdl_type,
dim,
num_input_dims)
class Mean(Layer):
'''
It is a simple layer which applies a mean operation over the given dimension. When nInputDims
is provided, the input will be considered as batches. Then the mean operation will be applied
in (dimension + 1). The input to this layer is expected to be a tensor, or a batch of
tensors; when using mini-batch, a batch of sample tensors will be passed to the layer and the
user need to specify the number of dimensions of each sample tensor in the batch using
nInputDims.
:param dimension: the dimension to be applied mean operation
:param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimension would be consideredas batch size
:param squeeze: default is true, which will squeeze the sum dimension; set it to false to keep the sum dimension
>>> mean = Mean(1, 1, True)
creating: createMean
'''
def __init__(self,
dimension=1,
n_input_dims=-1,
squeeze=True,
bigdl_type="float"):
super(Mean, self).__init__(None, bigdl_type,
dimension,
n_input_dims,
squeeze)
class Min(Layer):
'''
Applies a min operation over dimension `dim`.
:param dim: min along this dimension
:param num_input_dims: Optional. If in a batch model, set to the input_dim.
>>> min = Min(1)
creating: createMin
'''
def __init__(self,
dim=1,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Min, self).__init__(None, bigdl_type,
dim,
num_input_dims)
class MixtureTable(Layer):
'''
Creates a module that takes a table {gater, experts} as input and outputs the mixture of experts
(a Tensor or table of Tensors) using a gater Tensor. When dim is provided, it specifies the
dimension of the experts Tensor that will be interpolated (or mixed). Otherwise, the experts
should take the form of a table of Tensors. This Module works for experts of dimension 1D or
more, and for a 1D or 2D gater, i.e. for single examples or mini-batches.
>>> mixtureTable = MixtureTable()
creating: createMixtureTable
>>> mixtureTable = MixtureTable(10)
creating: createMixtureTable
'''
def __init__(self,
dim=INTMAX,
bigdl_type="float"):
super(MixtureTable, self).__init__(None, bigdl_type, dim)
class Mul(Layer):
'''
Multiply a single scalar factor to the incoming data
>>> mul = Mul()
creating: createMul
'''
def __init__(self,
bigdl_type="float"):
super(Mul, self).__init__(None, bigdl_type)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class MulConstant(Layer):
'''
Multiplies input Tensor by a (non-learnable) scalar constant.
This module is sometimes useful for debugging purposes.
:param scalar: scalar constant
:param inplace: Can optionally do its operation in-place without using extra state memory
>>> mulConstant = MulConstant(2.5)
creating: createMulConstant
'''
def __init__(self,
scalar,
inplace=False,
bigdl_type="float"):
super(MulConstant, self).__init__(None, bigdl_type,
scalar,
inplace)
class Narrow(Layer):
'''
Narrow is application of narrow operation in a module.
The module further supports a negative length in order to handle inputs with an unknown size.
>>> narrow = Narrow(1, 1, 1)
creating: createNarrow
'''
def __init__(self,
dimension,
offset,
length=1,
bigdl_type="float"):
super(Narrow, self).__init__(None, bigdl_type,
dimension,
offset,
length)
class NarrowTable(Layer):
'''
Creates a module that takes a table as input and outputs the subtable starting at index
offset having length elements (defaults to 1 element). The elements can be either
a table or a Tensor. If `length` is negative, it means selecting the elements from the
offset to element which located at the abs(`length`) to the last element of the input.
:param offset: the start index of table
:param length: the length want to select
>>> narrowTable = NarrowTable(1, 1)
creating: createNarrowTable
'''
def __init__(self,
offset,
length=1,
bigdl_type="float"):
super(NarrowTable, self).__init__(None, bigdl_type,
offset,
length)
class Normalize(Layer):
'''
Normalizes the input Tensor to have unit L_p norm. The smoothing parameter eps prevents
division by zero when the input contains all zero elements (default = 1e-10).
p can be the max value of double
>>> normalize = Normalize(1e-5, 1e-5)
creating: createNormalize
'''
def __init__(self,
p,
eps=1e-10,
bigdl_type="float"):
super(Normalize, self).__init__(None, bigdl_type,
p,
eps)
class PReLU(Layer):
'''
Applies parametric ReLU, which parameter varies the slope of the negative part.
PReLU: f(x) = max(0, x) + a * min(0, x)
nOutputPlane's default value is 0, that means using PReLU in shared version and has
only one parameters.
Notice: Please don't use weight decay on this.
:param n_output_plane: input map number. Default is 0.
>>> pReLU = PReLU(1)
creating: createPReLU
'''
def __init__(self,
n_output_plane=0,
bigdl_type="float"):
super(PReLU, self).__init__(None, bigdl_type,
n_output_plane)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class Padding(Layer):
'''
This module adds pad units of padding to dimension dim of the input. If pad is negative,
padding is added to the left, otherwise, it is added to the right of the dimension.
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user need to specify the number of dimensions of each sample tensor in the
batch using n_input_dim.
:param dim: the dimension to be applied padding operation
:param pad: num of the pad units
:param n_input_dim: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
:param value: padding value
>>> padding = Padding(1, 1, 1, 1e-5, 1)
creating: createPadding
'''
def __init__(self,
dim,
pad,
n_input_dim,
value=0.0,
n_index=1,
bigdl_type="float"):
super(Padding, self).__init__(None, bigdl_type,
dim,
pad,
n_input_dim,
value,
n_index)
class PairwiseDistance(Layer):
'''
It is a module that takes a table of two vectors as input and outputs
the distance between them using the p-norm.
The input given in `forward(input)` is a [[Table]] that contains two tensors which
must be either a vector (1D tensor) or matrix (2D tensor). If the input is a vector,
it must have the size of `inputSize`. If it is a matrix, then each row is assumed to be
an input sample of the given batch (the number of rows means the batch size and
the number of columns should be equal to the `inputSize`).
:param norm: the norm of distance
>>> pairwiseDistance = PairwiseDistance(2)
creating: createPairwiseDistance
'''
def __init__(self,
norm=2,
bigdl_type="float"):
super(PairwiseDistance, self).__init__(None, bigdl_type,
norm)
class ParallelTable(Container):
'''
It is a container module that applies the i-th member module to the i-th
input, and outputs an output in the form of Table
>>> parallelTable = ParallelTable()
creating: createParallelTable
'''
def __init__(self,
bigdl_type="float"):
super(ParallelTable, self).__init__(None, bigdl_type)
class Power(Layer):
'''
Apply an element-wise power operation with scale and shift.
f(x) = (shift + scale * x)^power^
:param power: the exponent.
:param scale: Default is 1.
:param shift: Default is 0.
>>> power = Power(1e-5)
creating: createPower
'''
def __init__(self,
power,
scale=1.0,
shift=0.0,
bigdl_type="float"):
super(Power, self).__init__(None, bigdl_type,
power,
scale,
shift)
class RReLU(Layer):
'''
Applies the randomized leaky rectified linear unit (RReLU) element-wise to the input Tensor,
thus outputting a Tensor of the same dimension. Informally the RReLU is also known as
'insanity' layer. RReLU is defined as:
```
f(x) = max(0,x) + a * min(0, x) where a ~ U(l, u).
```
In training mode negative inputs are multiplied by a factor drawn from a uniform random
distribution U(l, u).
In evaluation mode a RReLU behaves like a LeakyReLU with a constant mean factor
a = (l + u) / 2.
By default, l = 1/8 and u = 1/3. If l == u a RReLU effectively becomes a LeakyReLU.
Regardless of operating in in-place mode a RReLU will internally allocate an input-sized
noise tensor to store random factors for negative inputs.
The backward() operation assumes that forward() has been called before.
For reference see [Empirical Evaluation of Rectified Activations in Convolutional Network](
http://arxiv.org/abs/1505.00853).
:param lower: lower boundary of uniform random distribution
:param upper: upper boundary of uniform random distribution
:param inplace: optionally do its operation in-place without using extra state memory
>>> rReLU = RReLU(1e-5, 1e5, True)
creating: createRReLU
'''
def __init__(self,
lower=1.0/8,
upper=1.0/3,
inplace=False,
bigdl_type="float"):
super(RReLU, self).__init__(None, bigdl_type,
lower,
upper,
inplace)
class ReLU6(Layer):
'''
Same as ReLU except that the rectifying function f(x) saturates at x = 6
:param inplace: either True = in-place or False = keeping separate state
>>> reLU6 = ReLU6(True)
creating: createReLU6
'''
def __init__(self,
inplace=False,
bigdl_type="float"):
super(ReLU6, self).__init__(None, bigdl_type,
inplace)
class Replicate(Layer):
'''
Replicate repeats input `nFeatures` times along its `dim` dimension.
Notice: No memory copy, it set the stride along the `dim`-th dimension to zero.
:param n_features: replicate times.
:param dim: dimension to be replicated.
:param n_dim: specify the number of non-batch dimensions.
>>> replicate = Replicate(2)
creating: createReplicate
'''
def __init__(self,
n_features,
dim=1,
n_dim=INTMAX,
bigdl_type="float"):
super(Replicate, self).__init__(None, bigdl_type,
n_features,
dim,
n_dim)
class RoiPooling(Layer):
'''
Region of interest pooling
The RoIPooling uses max pooling to convert the features inside any valid region of interest
into a small feature map with a fixed spatial extent of pooledH * pooledW (e.g., 7 * 7)
an RoI is a rectangular window into a conv feature map.
Each RoI is defined by a four-tuple (x1, y1, x2, y2) that specifies its
top-left corner (x1, y1) and its bottom-right corner (x2, y2).
RoI max pooling works by dividing the h * w RoI window into an pooledH * pooledW grid of
sub-windows of approximate size h/H * w/W and then max-pooling the values in each sub-window
into the corresponding output grid cell.
Pooling is applied independently to each feature map channel
:param pooled_w: spatial extent in width
:param pooled_h: spatial extent in height
:param spatial_scale: spatial scale
>>> import numpy as np
>>> input_data = np.random.rand(2,2,6,8)
>>> input_rois = np.array([0, 0, 0, 7, 5, 1, 6, 2, 7, 5, 1, 3, 1, 6, 4, 0, 3, 3, 3, 3],dtype='float64').reshape(4,5)
>>> m = RoiPooling(3,2,1.0)
creating: createRoiPooling
>>> out = m.forward([input_data,input_rois])
'''
def __init__(self,
pooled_w,
pooled_h,
spatial_scale,
bigdl_type="float"):
super(RoiPooling, self).__init__(None, bigdl_type,
pooled_w,
pooled_h,
spatial_scale)
class Scale(Layer):
'''
Scale is the combination of CMul and CAdd
Computes the elementwise product of input and weight, with the shape of the weight "expand" to
match the shape of the input.
Similarly, perform a expand cdd bias and perform an elementwise add
:param size: size of weight and bias
>>> scale = Scale([1,2])
creating: createScale
'''
def __init__(self,
size,
bigdl_type="float"):
super(Scale, self).__init__(None, bigdl_type,
size)
class SelectTable(Layer):
'''
Creates a module that takes a table as input and outputs the element at index `index`
(positive or negative). This can be either a table or a Tensor.
The gradients of the non-index elements are zeroed Tensors of the same size.
This is true regardless of the depth of the encapsulated Tensor as the function used
internally to do so is recursive.
:param index: the index to be selected
>>> selectTable = SelectTable(1)
creating: createSelectTable
'''
def __init__(self,
index,
bigdl_type="float"):
super(SelectTable, self).__init__(None, bigdl_type,
index)
class SoftMax(Layer):
'''
Applies the SoftMax function to an n-dimensional input Tensor, rescaling them so that the
elements of the n-dimensional output Tensor lie in the range (0, 1) and sum to 1.
Softmax is defined as: f_i(x) = exp(x_i - shift) / sum_j exp(x_j - shift)
where shift = max_i(x_i).
>>> softMax = SoftMax()
creating: createSoftMax
'''
def __init__(self,
bigdl_type="float"):
super(SoftMax, self).__init__(None, bigdl_type)
class SoftMin(Layer):
'''
Applies the SoftMin function to an n-dimensional input Tensor, rescaling them so that the
elements of the n-dimensional output Tensor lie in the range (0,1) and sum to 1.
Softmin is defined as: f_i(x) = exp(-x_i - shift) / sum_j exp(-x_j - shift)
where shift = max_i(-x_i).
>>> softMin = SoftMin()
creating: createSoftMin
'''
def __init__(self,
bigdl_type="float"):
super(SoftMin, self).__init__(None, bigdl_type)
class SoftPlus(Layer):
'''
Apply the SoftPlus function to an n-dimensional input tensor.
SoftPlus function: f_i(x) = 1/beta * log(1 + exp(beta * x_i))
:param beta: Controls sharpness of transfer function
>>> softPlus = SoftPlus(1e-5)
creating: createSoftPlus
'''
def __init__(self,
beta=1.0,
bigdl_type="float"):
super(SoftPlus, self).__init__(None, bigdl_type,
beta)
class SoftShrink(Layer):
'''
Apply the soft shrinkage function element-wise to the input Tensor
SoftShrinkage operator:
```
| x - lambda, if x > lambda
f(x) = | x + lambda, if x < -lambda
| 0, otherwise
```
:param the_lambda: lambda, default is 0.5
>>> softShrink = SoftShrink(1e-5)
creating: createSoftShrink
'''
def __init__(self,
the_lambda=0.5,
bigdl_type="float"):
super(SoftShrink, self).__init__(None, bigdl_type,
the_lambda)
class SoftSign(Layer):
'''
Apply SoftSign function to an n-dimensional input Tensor.
SoftSign function: f_i(x) = x_i / (1+|x_i|)
>>> softSign = SoftSign()
creating: createSoftSign
'''
def __init__(self,
bigdl_type="float"):
super(SoftSign, self).__init__(None, bigdl_type)
class SpatialDilatedConvolution(Layer):
'''
Apply a 2D dilated convolution over an input image.
The input tensor is expected to be a 3D or 4D(with batch) tensor.
If input is a 3D tensor nInputPlane x height x width,
owidth = floor(width + 2 * padW - dilationW * (kW-1) - 1) / dW + 1
oheight = floor(height + 2 * padH - dilationH * (kH-1) - 1) / dH + 1
Reference Paper: Yu F, Koltun V. Multi-scale context aggregation by dilated convolutions[J].
arXiv preprint arXiv:1511.07122, 2015.
:param n_input_plane: The number of expected input planes in the image given into forward().
:param n_output_plane: The number of output planes the convolution layer will produce.
:param kw: The kernel width of the convolution.
:param kh: The kernel height of the convolution.
:param dw: The step of the convolution in the width dimension. Default is 1.
:param dh: The step of the convolution in the height dimension. Default is 1.
:param pad_w: The additional zeros added per width to the input planes. Default is 0.
:param pad_h: The additional zeros added per height to the input planes. Default is 0.
:param dilation_w: The number of pixels to skip. Default is 1.
:param dilation_h: The number of pixels to skip. Default is 1.
:param init_method: Init method, Default, Xavier.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> spatialDilatedConvolution = SpatialDilatedConvolution(1, 1, 1, 1)
creating: createSpatialDilatedConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
dilation_w=1,
dilation_h=1,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(SpatialDilatedConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kw,
kh,
dw,
dh,
pad_w,
pad_h,
dilation_w,
dilation_h,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SpatialFullConvolution(Layer):
'''
Apply a 2D full convolution over an input image.
The input tensor is expected to be a 3D or 4D(with batch) tensor. Note that instead
of setting adjW and adjH, SpatialFullConvolution[Table, T] also accepts a table input
with two tensors: T(convInput, sizeTensor) where convInput is the standard input tensor,
and the size of sizeTensor is used to set the size of the output (will ignore the adjW and
adjH values used to construct the module). This module can be used without a bias by setting
parameter noBias = true while constructing the module.
If input is a 3D tensor nInputPlane x height x width,
owidth = (width - 1) * dW - 2*padW + kW + adjW
oheight = (height - 1) * dH - 2*padH + kH + adjH
Other frameworks call this operation "In-network Upsampling", "Fractionally-strided convolution",
"Backwards Convolution," "Deconvolution", or "Upconvolution."
Reference Paper: Long J, Shelhamer E, Darrell T. Fully convolutional networks for semantic
segmentation[C]//Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition.
2015: 3431-3440.
:param nInputPlane The number of expected input planes in the image given into forward()
:param nOutputPlane The number of output planes the convolution layer will produce.
:param kW The kernel width of the convolution.
:param kH The kernel height of the convolution.
:param dW The step of the convolution in the width dimension. Default is 1.
:param dH The step of the convolution in the height dimension. Default is 1.
:param padW The additional zeros added per width to the input planes. Default is 0.
:param padH The additional zeros added per height to the input planes. Default is 0.
:param adjW Extra width to add to the output image. Default is 0.
:param adjH Extra height to add to the output image. Default is 0.
:param nGroup Kernel group number.
:param noBias If bias is needed.
:param initMethod Init method, Default, Xavier, Bilinear.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> spatialFullConvolution = SpatialFullConvolution(1, 1, 1, 1)
creating: createSpatialFullConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
adj_w=0,
adj_h=0,
n_group=1,
no_bias=False,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(SpatialFullConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kw,
kh,
dw,
dh,
pad_w,
pad_h,
adj_w,
adj_h,
n_group,
no_bias,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class VolumetricFullConvolution(Layer):
'''
Apply a 3D full convolution over an 3D input image, a sequence of images, or a video etc.
The input tensor is expected to be a 4D or 5D(with batch) tensor. Note that instead
of setting adjT, adjW and adjH, `VolumetricFullConvolution` also accepts a table input
with two tensors: T(convInput, sizeTensor) where convInput is the standard input tensor,
and the size of sizeTensor is used to set the size of the output (will ignore the adjT, adjW and
adjH values used to construct the module). This module can be used without a bias by setting
parameter noBias = true while constructing the module.
If input is a 4D tensor nInputPlane x depth x height x width,
odepth = (depth - 1) * dT - 2*padt + kT + adjT
owidth = (width - 1) * dW - 2*padW + kW + adjW
oheight = (height - 1) * dH - 2*padH + kH + adjH
Other frameworks call this operation "In-network Upsampling", "Fractionally-strided convolution",
"Backwards Convolution," "Deconvolution", or "Upconvolution."
Reference Paper: Long J, Shelhamer E, Darrell T. Fully convolutional networks for semantic
segmentation[C]//Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition.
2015: 3431-3440.
:param nInputPlane The number of expected input planes in the image given into forward()
:param nOutputPlane The number of output planes the convolution layer will produce.
:param kT The kernel depth of the convolution.
:param kW The kernel width of the convolution.
:param kH The kernel height of the convolution.
:param dT The step of the convolution in the depth dimension. Default is 1.
:param dW The step of the convolution in the width dimension. Default is 1.
:param dH The step of the convolution in the height dimension. Default is 1.
:param padT The additional zeros added per depth to the input planes. Default is 0.
:param padW The additional zeros added per width to the input planes. Default is 0.
:param padH The additional zeros added per height to the input planes. Default is 0.
:param adjT Extra depth to add to the output image. Default is 0.
:param adjW Extra width to add to the output image. Default is 0.
:param adjH Extra height to add to the output image. Default is 0.
:param nGroup Kernel group number.
:param noBias If bias is needed.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> volumetricFullConvolution = VolumetricFullConvolution(1, 1, 1, 1, 1, 1)
creating: createVolumetricFullConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kt,
kw,
kh,
dt=1,
dw=1,
dh=1,
pad_t=0,
pad_w=0,
pad_h=0,
adj_t=0,
adj_w=0,
adj_h=0,
n_group=1,
no_bias=False,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(VolumetricFullConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kt,
kw,
kh,
dt,
dw,
dh,
pad_t,
pad_w,
pad_h,
adj_t,
adj_w,
adj_h,
n_group,
no_bias,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class SpatialShareConvolution(Layer):
'''
>>> spatialShareConvolution = SpatialShareConvolution(1, 1, 1, 1)
creating: createSpatialShareConvolution
>>> import numpy as np
>>> init_weight = np.random.randn(1, 12, 6, 5, 5)
>>> init_bias = np.random.randn(12)
>>> init_grad_weight = np.zeros([1, 12, 6, 5, 5])
>>> init_grad_bias = np.zeros([12])
>>> conv = SpatialShareConvolution(6, 12, 5, 5, 1, 1, 0, 0, 1, True, L1Regularizer(0.5), L1Regularizer(0.5), init_weight, init_bias, init_grad_weight, init_grad_bias)
creating: createL1Regularizer
creating: createL1Regularizer
creating: createSpatialShareConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w=1,
stride_h=1,
pad_w=0,
pad_h=0,
n_group=1,
propagate_back=True,
wRegularizer=None,
bRegularizer=None,
init_weight=None,
init_bias=None,
init_grad_weight=None,
init_grad_bias=None,
with_bias=True,
bigdl_type="float"):
super(SpatialShareConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
kernel_w,
kernel_h,
stride_w,
stride_h,
pad_w,
pad_h,
n_group,
propagate_back,
wRegularizer,
bRegularizer,
JTensor.from_ndarray(init_weight),
JTensor.from_ndarray(init_bias),
JTensor.from_ndarray(init_grad_weight),
JTensor.from_ndarray(init_grad_bias),
with_bias)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class VolumetricConvolution(Layer):
'''
Applies a 3D convolution over an input image composed of several input planes. The input tensor
in forward(input) is expected to be a 4D tensor (nInputPlane x time x height x width).
:param n_input_plane: The number of expected input planes in the image given into forward()
:param n_output_plane: The number of output planes the convolution layer will produce.
:param k_t: The kernel size of the convolution in time
:param k_w: The kernel width of the convolution
:param k_h: The kernel height of the convolution
:param d_t: The step of the convolution in the time dimension. Default is 1
:param d_w: The step of the convolution in the width dimension. Default is 1
:param d_h: The step of the convolution in the height dimension. Default is 1
:param pad_t: Additional zeros added to the input plane data on both sides of time axis.Default is 0. (kT-1)/2 is often used here.
:param pad_w: The additional zeros added per width to the input planes.
:param pad_h: The additional zeros added per height to the input planes.
:param with_bias: whether with bias
:param wRegularizer: instance of [[Regularizer]] (eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]] applied to the bias.
>>> volumetricConvolution = VolumetricConvolution(6, 12, 5, 5, 5, 1, 1, 1)
creating: createVolumetricConvolution
'''
def __init__(self,
n_input_plane,
n_output_plane,
k_t,
k_w,
k_h,
d_t=1,
d_w=1,
d_h=1,
pad_t=0,
pad_w=0,
pad_h=0,
with_bias=True,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(VolumetricConvolution, self).__init__(None, bigdl_type,
n_input_plane,
n_output_plane,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t,
pad_w,
pad_h,
with_bias,
wRegularizer,
bRegularizer)
def set_init_method(self, weight_init_method = None, bias_init_method = None):
callBigDlFunc(self.bigdl_type, "setInitMethod", self.value,
weight_init_method, bias_init_method)
return self
class VolumetricMaxPooling(Layer):
'''
Applies 3D max-pooling operation in kTxkWxkH regions by step size dTxdWxdH.
The number of output features is equal to the number of input planes / dT.
The input can optionally be padded with zeros. Padding should be smaller than
half of kernel size. That is, padT < kT/2, padW < kW/2 and padH < kH/2
:param k_t: The kernel size
:param k_w: The kernel width
:param k_h: The kernel height
:param d_t: The step in the time dimension
:param d_w: The step in the width dimension
:param d_h: The step in the height dimension
:param pad_t: The padding in the time dimension
:param pad_w: The padding in the width dimension
:param pad_h: The padding in the height dimension
>>> volumetricMaxPooling = VolumetricMaxPooling(5, 5, 5, 1, 1, 1)
creating: createVolumetricMaxPooling
'''
def __init__(self,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t=0,
pad_w=0,
pad_h=0,
bigdl_type="float"):
super(VolumetricMaxPooling, self).__init__(None, bigdl_type,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t,
pad_w,
pad_h)
class VolumetricAveragePooling(Layer):
'''
Applies 3D average-pooling operation in kTxkWxkH regions by step size dTxdWxdH.
The number of output features is equal to the number of input planes / dT.
The input can optionally be padded with zeros. Padding should be smaller than
half of kernel size. That is, padT < kT/2, padW < kW/2 and padH < kH/2
:param k_t: The kernel size
:param k_w: The kernel width
:param k_h: The kernel height
:param d_t: The step in the time dimension
:param d_w: The step in the width dimension
:param d_h: The step in the height dimension
:param pad_t: The padding in the time dimension
:param pad_w: The padding in the width dimension
:param pad_h: The padding in the height dimension
:param count_include_pad: whether to include padding when dividing the number of elements in pooling region
:param ceil_mode: whether the output size is to be ceiled or floored
>>> volumetricAveragePooling = VolumetricAveragePooling(5, 5, 5, 1, 1, 1)
creating: createVolumetricAveragePooling
'''
def __init__(self,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t=0,
pad_w=0,
pad_h=0,
count_include_pad=True,
ceil_mode=False,
bigdl_type="float"):
super(VolumetricAveragePooling, self).__init__(None, bigdl_type,
k_t,
k_w,
k_h,
d_t,
d_w,
d_h,
pad_t,
pad_w,
pad_h,
count_include_pad,
ceil_mode)
class SpatialZeroPadding(Layer):
'''
Each feature map of a given input is padded with specified number of zeros.
If padding values are negative, then input is cropped.
:param padLeft: pad left position
:param padRight: pad right position
:param padTop: pad top position
:param padBottom: pad bottom position
>>> spatialZeroPadding = SpatialZeroPadding(1, 1, 1, 1)
creating: createSpatialZeroPadding
'''
def __init__(self,
pad_left,
pad_right,
pad_top,
pad_bottom,
bigdl_type="float"):
super(SpatialZeroPadding, self).__init__(None, bigdl_type,
pad_left,
pad_right,
pad_top,
pad_bottom)
class SplitTable(Layer):
'''
Creates a module that takes a Tensor as input and
outputs several tables, splitting the Tensor along
the specified dimension `dimension`. Please note the dimension starts from 1.
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user needs to specify the number of dimensions of each sample tensor in a
batch using `nInputDims`.
:param dimension: to be split along this dimension
:param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
>>> splitTable = SplitTable(1, 1)
creating: createSplitTable
'''
def __init__(self,
dimension,
n_input_dims=-1,
bigdl_type="float"):
super(SplitTable, self).__init__(None, bigdl_type,
dimension,
n_input_dims)
class Sqrt(Layer):
'''
Apply an element-wise sqrt operation.
>>> sqrt = Sqrt()
creating: createSqrt
'''
def __init__(self,
bigdl_type="float"):
super(Sqrt, self).__init__(None, bigdl_type)
class Square(Layer):
'''
Apply an element-wise square operation.
>>> square = Square()
creating: createSquare
'''
def __init__(self,
bigdl_type="float"):
super(Square, self).__init__(None, bigdl_type)
class Squeeze(Layer):
'''
Delete singleton all dimensions or a specific dim.
:param dim: Optional. The dimension to be delete. Default: delete all dimensions.
:param num_input_dims: Optional. If in a batch model, set to the inputDims.
>>> squeeze = Squeeze(1)
creating: createSqueeze
'''
def __init__(self,
dim,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Squeeze, self).__init__(None, bigdl_type,
dim,
num_input_dims)
class Sum(Layer):
'''
It is a simple layer which applies a sum operation over the given dimension.
When nInputDims is provided, the input will be considered as a batches.
Then the sum operation will be applied in (dimension + 1)
The input to this layer is expected to be a tensor, or a batch of tensors;
when using mini-batch, a batch of sample tensors will be passed to the layer and
the user need to specify the number of dimensions of each sample tensor in the
batch using `nInputDims`.
:param dimension: the dimension to be applied sum operation
:param n_input_dims: specify the number of dimensions that this module will receiveIf it is more than the dimension of input tensors, the first dimensionwould be considered as batch size
:param size_average: default is false, if it is true, it will return the mean instead
:param squeeze: default is true, which will squeeze the sum dimension; set it to false to keep the sum dimension
>>> sum = Sum(1, 1, True, True)
creating: createSum
'''
def __init__(self,
dimension=1,
n_input_dims=-1,
size_average=False,
squeeze=True,
bigdl_type="float"):
super(Sum, self).__init__(None, bigdl_type,
dimension,
n_input_dims,
squeeze,
size_average)
class TanhShrink(Layer):
'''
A simple layer for each element of the input tensor, do the following operation
during the forward process:
[f(x) = tanh(x) - 1]
>>> tanhShrink = TanhShrink()
creating: createTanhShrink
'''
def __init__(self,
bigdl_type="float"):
super(TanhShrink, self).__init__(None, bigdl_type)
class Threshold(Layer):
'''
Threshold input Tensor.
If values in the Tensor smaller than th, then replace it with v
:param th: the threshold to compare with
:param v: the value to replace with
:param ip: inplace mode
>>> threshold = Threshold(1e-5, 1e-5, True)
creating: createThreshold
'''
def __init__(self,
th=1e-6,
v=0.0,
ip=False,
bigdl_type="float"):
super(Threshold, self).__init__(None, bigdl_type,
th,
v,
ip)
class Negative(Layer):
'''
Create an Negative layer. Computing negative value of each element of input tensor
:param inplace: if output tensor reuse input tensor storage. Default value is false
>>> negative = Negative(False)
creating: createNegative
'''
def __init__(self,
inplace = False,
bigdl_type="float"):
super(Negative, self).__init__(None, bigdl_type, inplace)
class Unsqueeze(Layer):
'''
Create an Unsqueeze layer. Insert singleton dim (i.e., dimension 1) at position pos.
For an input with dim = input.dim(),
there are dim + 1 possible positions to insert the singleton dimension.
:param pos: The position will be insert singleton.
:param num_input_dims: Optional. If in a batch model, set to the inputDim
>>> unsqueeze = Unsqueeze(1, 1)
creating: createUnsqueeze
'''
def __init__(self,
pos,
num_input_dims=INTMIN,
bigdl_type="float"):
super(Unsqueeze, self).__init__(None, bigdl_type,
pos,
num_input_dims)
class Reshape(Layer):
'''
The forward(input) reshape the input tensor into a size(0) * size(1) * ... tensor, taking the
elements row-wise.
:param size: the reshape size
>>> reshape = Reshape([1, 28, 28])
creating: createReshape
>>> reshape = Reshape([1, 28, 28], False)
creating: createReshape
'''
def __init__(self, size, batch_mode=None, bigdl_type="float"):
super(Reshape, self).__init__(None, bigdl_type, size, batch_mode)
class BiRecurrent(Container):
'''
Create a Bidirectional recurrent layer
:param merge: merge layer
>>> biRecurrent = BiRecurrent(CAddTable())
creating: createCAddTable
creating: createBiRecurrent
>>> biRecurrent = BiRecurrent()
creating: createBiRecurrent
'''
def __init__(self,
merge=None,
bigdl_type="float"):
super(BiRecurrent, self).__init__(None, bigdl_type, merge)
class ConcatTable(Container):
'''
ConcateTable is a container module like Concate. Applies an input
to each member module, input can be a tensor or a table.
ConcateTable usually works with CAddTable and CMulTable to
implement element wise add/multiply on outputs of two modules.
>>> concatTable = ConcatTable()
creating: createConcatTable
'''
def __init__(self,
bigdl_type="float"):
super(ConcatTable, self).__init__(None, bigdl_type)
class Identity(Layer):
'''
Identity just return the input to output.
It's useful in same parallel container to get an origin input.
>>> identity = Identity()
creating: createIdentity
'''
def __init__(self,
bigdl_type="float"):
super(Identity, self).__init__(None, bigdl_type)
class Reverse(Layer):
'''
Reverse the input w.r.t given dimension.
The input can be a Tensor or Table.
:param dim:
>>> reverse = Reverse()
creating: createReverse
>>> reverse = Reverse(1, False)
creating: createReverse
'''
def __init__(self,
dimension=1,
is_inplace=False,
bigdl_type="float"):
super(Reverse, self).__init__(None, bigdl_type,
dimension,
is_inplace)
class Transpose(Layer):
'''
Transpose input along specified dimensions
:param permutations: dimension pairs that need to swap
>>> transpose = Transpose([(1,2)])
creating: createTranspose
'''
def __init__(self,
permutations,
bigdl_type="float"):
super(Transpose, self).__init__(None, bigdl_type,
permutations)
class SpatialContrastiveNormalization(Layer):
'''
Subtractive + divisive contrast normalization.
:param n_input_plane:
:param kernel:
:param threshold:
:param thresval:
>>> kernel = np.ones([9,9]).astype("float32")
>>> spatialContrastiveNormalization = SpatialContrastiveNormalization(1, kernel)
creating: createSpatialContrastiveNormalization
>>> spatialContrastiveNormalization = SpatialContrastiveNormalization()
creating: createSpatialContrastiveNormalization
'''
def __init__(self,
n_input_plane=1,
kernel=None,
threshold=1e-4,
thresval=1e-4,
bigdl_type="float"):
super(SpatialContrastiveNormalization, self).__init__(None, bigdl_type,
n_input_plane,
JTensor.from_ndarray(kernel),
threshold,
thresval)
class SpatialConvolutionMap(Layer):
'''
This class is a generalization of SpatialConvolution.
It uses a generic connection table between input and output features.
The SpatialConvolution is equivalent to using a full connection table.
When padW and padH are both -1, we use a padding algorithm similar to the "SAME"
padding of tensorflow. That is
outHeight = Math.ceil(inHeight.toFloat/strideH.toFloat)
outWidth = Math.ceil(inWidth.toFloat/strideW.toFloat)
padAlongHeight = Math.max(0, (outHeight - 1) * strideH + kernelH - inHeight)
padAlongWidth = Math.max(0, (outWidth - 1) * strideW + kernelW - inWidth)
padTop = padAlongHeight / 2
padLeft = padAlongWidth / 2
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> ct = np.ones([9,9]).astype("float32")
>>> spatialConvolutionMap = SpatialConvolutionMap(ct, 9, 9)
creating: createSpatialConvolutionMap
'''
def __init__(self,
conn_table,
kw,
kh,
dw=1,
dh=1,
pad_w=0,
pad_h=0,
wRegularizer=None,
bRegularizer=None,
bigdl_type="float"):
super(SpatialConvolutionMap, self).__init__(None, bigdl_type,
JTensor.from_ndarray(conn_table),
kw,
kh,
dw,
dh,
pad_w,
pad_h,
wRegularizer,
bRegularizer)
class SpatialDivisiveNormalization(Layer):
'''
Applies a spatial division operation on a series of 2D inputs using kernel for
computing the weighted average in a neighborhood. The neighborhood is defined for
a local spatial region that is the size as kernel and across all features. For
an input image, since there is only one feature, the region is only spatial. For
an RGB image, the weighted average is taken over RGB channels and a spatial region.
If the kernel is 1D, then it will be used for constructing and separable 2D kernel.
The operations will be much more efficient in this case.
The kernel is generally chosen as a gaussian when it is believed that the correlation
of two pixel locations decrease with increasing distance. On the feature dimension,
a uniform average is used since the weighting across features is not known.
:param nInputPlane: number of input plane, default is 1.
:param kernel: kernel tensor, default is a 9 x 9 tensor.
:param threshold: threshold
:param thresval: threshhold value to replace withif data is smaller than theshold
>>> kernel = np.ones([9,9]).astype("float32")
>>> spatialDivisiveNormalization = SpatialDivisiveNormalization(2,kernel)
creating: createSpatialDivisiveNormalization
>>> spatialDivisiveNormalization = SpatialDivisiveNormalization()
creating: createSpatialDivisiveNormalization
'''
def __init__(self,
n_input_plane=1,
kernel=None,
threshold=1e-4,
thresval=1e-4,
bigdl_type="float"):
super(SpatialDivisiveNormalization, self).__init__(None, bigdl_type,
n_input_plane,
JTensor.from_ndarray(kernel),
threshold,
thresval)
class SpatialSubtractiveNormalization(Layer):
'''
Applies a spatial subtraction operation on a series of 2D inputs using kernel for
computing the weighted average in a neighborhood. The neighborhood is defined for
a local spatial region that is the size as kernel and across all features. For a
an input image, since there is only one feature, the region is only spatial. For
an RGB image, the weighted average is taken over RGB channels and a spatial region.
If the kernel is 1D, then it will be used for constructing and separable 2D kernel.
The operations will be much more efficient in this case.
The kernel is generally chosen as a gaussian when it is believed that the correlation
of two pixel locations decrease with increasing distance. On the feature dimension,
a uniform average is used since the weighting across features is not known.
:param n_input_plane: number of input plane, default is 1.
:param kernel: kernel tensor, default is a 9 x 9 tensor.
>>> kernel = np.ones([9,9]).astype("float32")
>>> spatialSubtractiveNormalization = SpatialSubtractiveNormalization(2,kernel)
creating: createSpatialSubtractiveNormalization
>>> spatialSubtractiveNormalization = SpatialSubtractiveNormalization()
creating: createSpatialSubtractiveNormalization
'''
def __init__(self,
n_input_plane=1,
kernel=None,
bigdl_type="float"):
super(SpatialSubtractiveNormalization, self).__init__(None, bigdl_type,
n_input_plane,
JTensor.from_ndarray(kernel))
class SpatialWithinChannelLRN(Layer):
'''
The local response normalization layer performs a kind of lateral inhibition
by normalizing over local input regions. the local regions extend spatially,
in separate channels (i.e., they have shape 1 x local_size x local_size).
:param size the side length of the square region to sum over
:param alpha the scaling parameter
:param beta the exponent
>>> layer = SpatialWithinChannelLRN()
creating: createSpatialWithinChannelLRN
'''
def __init__(self,
size=5,
alpha=1.0,
beta=0.75,
bigdl_type="float"):
super(SpatialWithinChannelLRN, self).__init__(None, bigdl_type,
size,
alpha,
beta)
class Pack(Layer):
'''
Stacks a list of n-dimensional tensors into one (n+1)-dimensional tensor.
>>> layer = Pack(1)
creating: createPack
'''
def __init__(self, dimension, bigdl_type="float"):
super(Pack, self).__init__(None, bigdl_type, dimension)
class ConvLSTMPeephole(Layer):
'''
| Convolution Long Short Term Memory architecture with peephole.
| Ref. A.: https://arxiv.org/abs/1506.04214 (blueprint for this module)
| B. https://github.com/viorik/ConvLSTM
:param input_size: number of input planes in the image given into forward()
:param output_size: number of output planes the convolution layer will produce
:param kernel_i: Convolutional filter size to convolve input
:param kernel_c: Convolutional filter size to convolve cell
:param stride: The step of the convolution, default is 1
:param padding: The additional zeros added, default is -1
:param activation: activation function, by default to be Tanh if not specified.
It can also be the name of an existing activation as a string.
:param inner_activation: activation function for the inner cells, by default to be Sigmoid if not specified.
It can also be the name of an existing activation as a string.
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param cRegularizer: instance of [[Regularizer]]applied to peephole.
:param with_peephole: whether use last cell status control a gate.
>>> convlstm = ConvLSTMPeephole(4, 3, 3, 3, 1, -1, Tanh(), HardSigmoid(), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createTanh
creating: createHardSigmoid
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createConvLSTMPeephole
'''
def __init__(self, input_size, output_size, kernel_i, kernel_c, stride=1, padding=-1,
activation=None, inner_activation=None,
wRegularizer=None, uRegularizer=None, bRegularizer=None, cRegularizer=None,
with_peephole=True, bigdl_type="float"):
if not activation:
activation = Tanh()
if not inner_activation:
inner_activation = Sigmoid()
if isinstance(activation, six.string_types):
activation = get_activation_by_name(activation)
if isinstance(inner_activation, six.string_types):
inner_activation = get_activation_by_name(inner_activation)
super(ConvLSTMPeephole, self).__init__(None, bigdl_type, input_size, output_size, kernel_i, kernel_c,
stride, padding, activation, inner_activation,
wRegularizer, uRegularizer, bRegularizer, cRegularizer, with_peephole)
class Tile(Layer):
'''
Replicate 'copies' copy along 'dim' dimension
>>> layer = Tile(1, 2)
creating: createTile
'''
def __init__(self, dim = 1, copies = 2, bigdl_type="float"):
super(Tile, self).__init__(None, bigdl_type, dim, copies)
class BinaryThreshold(Layer):
'''
Binary threshold, 1 if value > th, 0 otherwise
>>> layer = BinaryThreshold(0.1, False)
creating: createBinaryThreshold
'''
def __init__(self, th=1e-6, ip = False, bigdl_type="float"):
super(BinaryThreshold, self).__init__(None, bigdl_type, th, ip)
class ConvLSTMPeephole3D(Layer):
'''
:param input_size: number of input planes in the image given into forward()
:param output_size: number of output planes the convolution layer will produce
:param kernel_i Convolutional filter size to convolve input
:param kernel_c Convolutional filter size to convolve cell
:param stride The step of the convolution
:param padding The additional zeros added
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices
:param uRegularizer: instance [[Regularizer]](eg. L1 or L2 regularization), applied to the recurrent weights matrices
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
:param cRegularizer: instance of [[Regularizer]]applied to peephole.
:param with_peephole: whether use last cell status control a gate.
>>> convlstm = ConvLSTMPeephole3D(4, 3, 3, 3, 1, -1, L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5), L1Regularizer(0.5))
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createL1Regularizer
creating: createConvLSTMPeephole3D
'''
def __init__(self, input_size, output_size, kernel_i, kernel_c, stride=1, padding=-1, wRegularizer=None, uRegularizer=None,
bRegularizer=None, cRegularizer=None, with_peephole=True, bigdl_type="float"):
super(ConvLSTMPeephole3D, self).__init__(None, bigdl_type, input_size, output_size, kernel_i, kernel_c, stride,
padding, wRegularizer, uRegularizer, bRegularizer, cRegularizer, with_peephole)
class ResizeBilinear(Layer):
"""
Resize the input image with bilinear interpolation. The input image must be a float tensor with
NHWC layout
:param output_height: output height
:param output_width: output width
:param align_corner: align corner or not
>>> resizeBilinear = ResizeBilinear(10, 20, False)
creating: createResizeBilinear
"""
def __init__(self, output_height, output_width, align_corner, bigdl_type="float"):
super(ResizeBilinear, self).__init__(None, bigdl_type, output_height, output_width, align_corner)
class GaussianSampler(Layer):
"""
Takes {mean, log_variance} as input and samples from the Gaussian distribution
>>> sampler = GaussianSampler()
creating: createGaussianSampler
"""
def __init__(self, bigdl_type="float"):
super(GaussianSampler, self).__init__(None, bigdl_type)
class HardSigmoid(Layer):
"""
Apply Hard-sigmoid function
```
| 0, if x < -2.5
f(x) = | 1, if x > 2.5
| 0.2 * x + 0.5, otherwise
```
>>> hardSigmoid = HardSigmoid()
creating: createHardSigmoid
"""
def __init__(self, bigdl_type="float"):
super(HardSigmoid, self).__init__(None, bigdl_type)
class Highway(Layer):
"""
Densely connected highway network.
Highway layers are a natural extension of LSTMs to feedforward networks.
:param size input size
:param with_bias whether to include a bias
:param activation name of activation function to use
:param wRegularizer: instance of [[Regularizer]](eg. L1 or L2 regularization), applied to the input weights matrices.
:param bRegularizer: instance of [[Regularizer]]applied to the bias.
>>> highway = Highway(2)
creating: createHighway
"""
def __init__(self, size, with_bias=True, activation = None, wRegularizer=None, bRegularizer=None, bigdl_type="float"):
super(Highway, self).__init__(None, bigdl_type, size, with_bias, activation, wRegularizer, bRegularizer)
class UpSampling3D(Layer):
"""
Upsampling layer for 3D inputs.
Repeats the 1st, 2nd and 3rd dimensions
of the data by size[0], size[1] and size[2] respectively.
The input data is assumed to be of the form `minibatch x channels x depth x height x width`.
:param size Repeats the depth, height, width dimensions of the data by
>>> upsample3d = UpSampling3D([1, 2, 3])
creating: createUpSampling3D
"""
def __init__(self, size, bigdl_type="float"):
super(UpSampling3D, self).__init__(None, bigdl_type, size)
def _test():
import doctest
from pyspark import SparkContext
from bigdl.nn import layer
from bigdl.util.common import init_engine
from bigdl.util.common import create_spark_conf
globs = layer.__dict__.copy()
sc = SparkContext(master="local[4]", appName="test layer",
conf=create_spark_conf())
globs['sc'] = sc
init_engine()
(failure_count, test_count) = doctest.testmod(globs=globs,
optionflags=doctest.ELLIPSIS)
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -3,741,591,936,649,356,000 | 34.911302 | 329 | 0.56794 | false |
sudasana/armcom2 | xp_loader.py | 2 | 9469 | # Changed slightly to be compatible with Python 3
from sys import platform
if platform == 'darwin':
import tcod as libtcod
elif platform == 'linux':
import libtcodpy_local as libtcod
else:
import libtcodpy as libtcod
import binascii
##################################
# In-memory XP format is as follows:
# Returned structure is a dictionary with the keys version, layers, width, height, and layer_data
## Version is stored in case it's useful for someone, but as mentioned in the format description it probably won't be unless format changes happen
## Layers is a full 32 bit int, though right now REXPaint only exports or manages up to 4 layers
## Width and height are extracted from the layer with largest width and height - this value will hold true for all layers for now as per the format description
## layer_data is a list of individual layers, which are stored in the following format
### Each layer is a dictionary with keys width, height (see above), and cells.
### Cells is a row major 2d array of, again, dictionaries with the values 'keycode' (ascii keycode), 'fore_r/g/b', and 'back_r/g/b' (technically ints but in value 0-255)
##################################
##################################
# Used primarily internally to parse the data, feel free to reference them externally if it's useful.
# Changing these programattically will, of course, screw up the parsing (unless the format changes and you're using an old copy of this file)
##################################
version_bytes = 4
layer_count_bytes = 4
layer_width_bytes = 4
layer_height_bytes = 4
layer_keycode_bytes = 4
layer_fore_rgb_bytes = 3
layer_back_rgb_bytes = 3
layer_cell_bytes = layer_keycode_bytes + layer_fore_rgb_bytes + layer_back_rgb_bytes
##################################
# REXPaint color key for transparent background colors. Not directly used here, but you should reference this when calling libtcod's console_set_key_color on offscreen consoles.
##################################
transparent_cell_back_r = 255
transparent_cell_back_g = 0
transparent_cell_back_b = 255
####################################################################
# START LIBTCOD SPECIFIC CODE
##################################
# Used primarily internally to parse the data, feel free to reference them externally if it's useful.
# Changing these programattically will, of course, screw up the parsing (unless the format changes and you're using an old copy of this file)
##################################
#the solid square character
poskey_tile_character = 219
#some or all of the below may appear in libtcod's color definitions; and in fact, you can use libtcod colors as you please for position keys.
#These are merely the colors provided in the accompanying palette.
poskey_color_red = libtcod.Color(255, 0, 0)
poskey_color_lightpurple = libtcod.Color(254, 0, 255) # specifically 254 as 255, 0, 255 is considered a transparent key color in REXPaint
poskey_color_orange = libtcod.Color(255, 128, 0)
poskey_color_pink = libtcod.Color(255, 0, 128)
poskey_color_green = libtcod.Color(0, 255, 0)
poskey_color_teal = libtcod.Color(0, 255, 255)
poskey_color_yellow = libtcod.Color(255, 255, 0)
poskey_color_blue = libtcod.Color(0, 0, 255)
poskey_color_lightblue = libtcod.Color(0, 128, 255)
poskey_color_purple = libtcod.Color(128, 0, 255)
poskey_color_white = libtcod.Color(255, 255, 255)
##################################
# please note - this function writes the contents of transparent cells to the provided console.
# If you're building an offscreen console and want to use the default (or some other) color for transparency, please call libtcod's console.set_key_color(color)
##################################
def load_layer_to_console(console, xp_file_layer):
if not xp_file_layer['width'] or not xp_file_layer['height']:
raise AttributeError('Attempted to call load_layer_to_console on data that didn\'t have a width or height key, check your data')
for x in range(xp_file_layer['width']):
for y in range(xp_file_layer['height']):
cell_data = xp_file_layer['cells'][x][y]
fore_color = libtcod.Color(cell_data['fore_r'], cell_data['fore_g'], cell_data['fore_b'])
back_color = libtcod.Color(cell_data['back_r'], cell_data['back_g'], cell_data['back_b'])
libtcod.console_put_char_ex(console, x, y, cell_data['keycode'], fore_color, back_color)
def get_position_key_xy(xp_file_layer, poskey_color):
for x in range(xp_file_layer['width']):
for y in range(xp_file_layer['height']):
cell_data = xp_file_layer['cells'][x][y]
if cell_data['keycode'] == poskey_tile_character:
fore_color_matches = cell_data['fore_r'] == poskey_color.r and cell_data['fore_g'] == poskey_color.g and cell_data['fore_b'] == poskey_color.b
back_color_matches = cell_data['back_r'] == poskey_color.r and cell_data['back_g'] == poskey_color.g and cell_data['back_b'] == poskey_color.b
if fore_color_matches or back_color_matches:
return (x, y)
raise LookupError('No position key was specified for color ' + str(poskey_color) + ', check your .xp file and/or the input color')
# END LIBTCOD SPECIFIC CODE
####################################################################
##################################
# loads in an xp file from an unzipped string (gained from opening a .xp file with gzip and calling .read())
# reverse_endian controls whether the slices containing data for things like layer width, height, number of layers, etc. is reversed
# so far as I can tell Python is doing int conversions in big-endian, while the .xp format stores them in little-endian
# I may just not be aware of it being unneeded, but have it there in case
##################################
def load_xp_string(file_string, reverse_endian=True):
offset = 0
version = file_string[offset : offset + version_bytes]
offset += version_bytes
layer_count = file_string[offset : offset + layer_count_bytes]
offset += layer_count_bytes
if reverse_endian:
version = version[::-1]
layer_count = layer_count[::-1]
# hex-encodes the numbers then converts them to an int
version = int(binascii.b2a_hex(version), 16)
layer_count = int(binascii.b2a_hex(layer_count), 16)
layers = []
current_largest_width = 0
current_largest_height = 0
for layer in range(layer_count):
#slight lookahead to figure out how much data to feed load_layer
this_layer_width = file_string[offset:offset + layer_width_bytes]
this_layer_height = file_string[offset + layer_width_bytes:offset + layer_width_bytes + layer_height_bytes]
if reverse_endian:
this_layer_width = this_layer_width[::-1]
this_layer_height = this_layer_height[::-1]
this_layer_width = int(binascii.b2a_hex(this_layer_width), 16)
this_layer_height = int(binascii.b2a_hex(this_layer_height), 16)
current_largest_width = max(current_largest_width, this_layer_width)
current_largest_height = max(current_largest_height, this_layer_height)
layer_data_size = layer_width_bytes + layer_height_bytes + (layer_cell_bytes * this_layer_width * this_layer_height)
layer_data_raw = file_string[offset:offset + layer_data_size]
layer_data = parse_layer(file_string[offset:offset + layer_data_size], reverse_endian)
layers.append(layer_data)
offset += layer_data_size
return {
'version':version,
'layer_count':layer_count,
'width':current_largest_width,
'height':current_largest_height,
'layer_data':layers
}
##################################
# Takes a single layer's data and returns the format listed at the top of the file for a single layer.
##################################
def parse_layer(layer_string, reverse_endian=True):
offset = 0
width = layer_string[offset:offset + layer_width_bytes]
offset += layer_width_bytes
height = layer_string[offset:offset + layer_height_bytes]
offset += layer_height_bytes
if reverse_endian:
width = width[::-1]
height = height[::-1]
width = int(binascii.b2a_hex(width), 16)
height = int(binascii.b2a_hex(height), 16)
cells = []
for x in range(width):
row = []
for y in range(height):
cell_data_raw = layer_string[offset:offset + layer_cell_bytes]
cell_data = parse_individual_cell(cell_data_raw, reverse_endian)
row.append(cell_data)
offset += layer_cell_bytes
cells.append(row)
return {
'width':width,
'height':height,
'cells':cells
}
##################################
# Pulls out the keycode and the foreground/background RGB values from a single cell's data, returning them in the format listed at the top of this file for a single cell.
##################################
def parse_individual_cell(cell_string, reverse_endian=True):
offset = 0
keycode = cell_string[offset:offset + layer_keycode_bytes]
if reverse_endian:
keycode = keycode[::-1]
keycode = int(binascii.b2a_hex(keycode), 16)
offset += layer_keycode_bytes
fore_r = int(binascii.b2a_hex(cell_string[offset:offset+1]), 16)
offset += 1
fore_g = int(binascii.b2a_hex(cell_string[offset:offset+1]), 16)
offset += 1
fore_b = int(binascii.b2a_hex(cell_string[offset:offset+1]), 16)
offset += 1
back_r = int(binascii.b2a_hex(cell_string[offset:offset+1]), 16)
offset += 1
back_g = int(binascii.b2a_hex(cell_string[offset:offset+1]), 16)
offset += 1
back_b = int(binascii.b2a_hex(cell_string[offset:offset+1]), 16)
offset += 1
return {
'keycode':keycode,
'fore_r':fore_r,
'fore_g':fore_g,
'fore_b':fore_b,
'back_r':back_r,
'back_g':back_g,
'back_b':back_b,
} | gpl-3.0 | -3,345,553,247,127,047,700 | 37.971193 | 177 | 0.672405 | false |
jaruba/chromium.src | tools/telemetry/telemetry/core/platform/mac_platform_backend_unittest.py | 12 | 1532 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import decorators
from telemetry.core import platform as platform_module
from telemetry.core.platform import platform_backend
class MacPlatformBackendTest(unittest.TestCase):
def testVersionCamparison(self):
self.assertGreater(platform_backend.YOSEMITE,
platform_backend.MAVERICKS)
self.assertGreater(platform_backend.MAVERICKS,
platform_backend.SNOWLEOPARD)
self.assertGreater(platform_backend.LION,
platform_backend.LEOPARD)
self.assertEqual(platform_backend.YOSEMITE, 'yosemite')
self.assertEqual(platform_backend.MAVERICKS, 'mavericks')
self.assertEqual('%s2' % platform_backend.MAVERICKS, 'mavericks2')
self.assertEqual(''.join([platform_backend.MAVERICKS, '2']),
'mavericks2')
self.assertEqual(platform_backend.LION.upper(), 'LION')
@decorators.Enabled('mac')
def testGetCPUStats(self):
platform = platform_module.GetHostPlatform()
backend = platform._platform_backend # pylint: disable=W0212
cpu_stats = backend.GetCpuStats(os.getpid())
self.assertGreater(cpu_stats['CpuProcessTime'], 0)
self.assertTrue(cpu_stats.has_key('ContextSwitches'))
if backend.GetOSVersionName() >= platform_backend.MAVERICKS:
self.assertTrue(cpu_stats.has_key('IdleWakeupCount'))
| bsd-3-clause | 3,043,040,282,875,732,000 | 39.315789 | 72 | 0.722585 | false |
FluffyMortain/heekscnc | nc/hpgl2d_read.py | 28 | 3259 | import num_reader
import sys
import math
class Parser(num_reader.NumReader):
def __init__(self, writer):
num_reader.NumReader.__init__(self, writer)
self.i = 0
self.j = 0
self.x = 0
self.y = 0
self.down_z = 0
self.up_z = 20
self.up = True
self.units_to_mm = 0.01
def ParsePuOrPd(self, up):
self.line_index = self.line_index + 1
x = self.get_number()
if len(x) > 0:
y = self.get_number()
if len(y) > 0:
if up: color = "rapid"
else: color = "feed"
self.add_word(color)
self.writer.begin_path(color)
if up: z = self.up_z
else: z = self.down_z
if self.up != up:
self.writer.add_line(self.x * self.units_to_mm, self.y * self.units_to_mm, z)
self.writer.add_line(int(x) * self.units_to_mm, int(y) * self.units_to_mm, z)
self.writer.end_path()
self.up = up
self.x = int(x)
self.y = int(y)
def ParseAA(self):
self.line_index = self.line_index + 1
cx = self.get_number()
if len(cx) > 0:
cy = self.get_number()
if len(cy) > 0:
a = self.get_number()
if len(a) > 0:
self.add_word("feed")
self.writer.begin_path("feed")
z = self.down_z
if self.up:
self.writer.add_line(self.x * self.units_to_mm, self.y * self.units_to_mm, z)
sdx = self.x - int(cx)
sdy = self.y - int(cy)
start_angle = math.atan2(sdy, sdx)
end_angle = start_angle + int(a) * math.pi/180
radius = math.sqrt(sdx*sdx + sdy*sdy)
ex = int(cx) + radius * math.cos(end_angle)
ey = int(cy) + radius * math.sin(end_angle)
if int(a) > 0: d = 1
else: d = -1
self.writer.add_arc(ex * self.units_to_mm, ey * self.units_to_mm, 0.0, i = int(-sdx) * self.units_to_mm, j = int(-sdy) * self.units_to_mm, d = d)
self.writer.end_path()
self.up = False
self.x = int(ex)
self.y = int(ey)
def ParseFromFirstLetter(self, c):
if c == 'P':
self.line_index = self.line_index + 1
if self.line_index < self.line_length:
c1 = self.line[self.line_index]
self.parse_word += c1
if c1 == 'U': # PU
self.ParsePuOrPd(True)
elif c1 == 'D': # PD
self.ParsePuOrPd(False)
elif c == 'A':
self.line_index = self.line_index + 1
if self.line_index < self.line_length:
c1 = self.line[self.line_index]
self.parse_word += c1
if c1 == 'A': # AA, arc absolute
self.ParseAA()
| bsd-3-clause | 7,434,199,755,518,409,000 | 34.211111 | 165 | 0.419454 | false |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/tools/asm_module.py | 1 | 12650 |
import sys, re, itertools
import shared, js_optimizer
class AsmModule():
def __init__(self, filename):
self.filename = filename
self.js = open(filename).read()
self.start_asm = self.js.find(js_optimizer.start_asm_marker)
self.start_funcs = self.js.find(js_optimizer.start_funcs_marker)
self.end_funcs = self.js.rfind(js_optimizer.end_funcs_marker)
self.end_asm = self.js.rfind(js_optimizer.end_asm_marker)
# pre and asm
self.pre_js = self.js[:self.start_asm]
self.asm_js = self.js[self.start_asm:self.end_asm]
# heap initializer
try:
self.staticbump = int(re.search(shared.JS.memory_staticbump_pattern, self.pre_js).group(1))
except:
self.staticbump = 0
if self.staticbump:
try:
self.mem_init_js = re.search(shared.JS.memory_initializer_pattern, self.pre_js).group(0)
except:
self.mem_init_js = ''
# global initializers
global_inits = re.search(shared.JS.global_initializers_pattern, self.pre_js)
if global_inits:
self.global_inits_js = global_inits.group(0)
self.global_inits = map(lambda init: init.split('{')[2][1:].split('(')[0], global_inits.groups(0)[0].split(','))
else:
self.global_inits_js = ''
self.global_inits = []
# imports (and global variables)
first_var = self.js.find('var ', self.js.find('var ', self.start_asm)+4)
self.pre_imports_js = self.js[self.start_asm:first_var]
self.imports_js = self.js[first_var:self.start_funcs]
self.imports = {}
for i in js_optimizer.import_sig.finditer(self.imports_js):
imp = i.group(0).split('var ')[1][:-1]
if ',' not in imp:
key, value = imp.split('=', 1)
self.imports[key.strip()] = value.strip()
else:
for part in imp.split(','):
assert part.count('(') == part.count(')') # we must not break ',' in func(x, y)!
assert part.count('=') == 1
key, value = part.split('=')
self.imports[key.strip()] = value.strip()
#print >> sys.stderr, 'imports', self.imports
# funcs
self.funcs_js = self.js[self.start_funcs:self.end_funcs]
self.funcs = set([m.group(1) for m in js_optimizer.func_sig.finditer(self.funcs_js)])
#print 'funcs', self.funcs
# tables and exports
post_js = self.js[self.end_funcs:self.end_asm]
ret = post_js.find('return ')
self.tables_js = post_js[:ret]
self.exports_js = post_js[ret:]
self.tables = self.parse_tables(self.tables_js)
self.exports = set([export.strip() for export in self.exports_js[self.exports_js.find('{')+1:self.exports_js.find('}')].split(',')])
# post
self.post_js = self.js[self.end_asm:]
self.sendings = {}
for sending in [sending.strip() for sending in self.post_js[self.post_js.find('}, { ')+5:self.post_js.find(' }, buffer);')].split(',')]:
colon = sending.find(':')
self.sendings[sending[:colon].replace('"', '')] = sending[colon+1:].strip()
self.module_defs = set(re.findall('var [\w\d_$]+ = Module\["[\w\d_$]+"\] = asm\["[\w\d_$]+"\];\n', self.post_js))
self.extra_funcs_js = ''
def set_pre_js(self, staticbump=None, js=None):
if staticbump is None: staticbump = self.staticbump
if js is None: js = self.mem_init_js
self.pre_js = re.sub(shared.JS.memory_staticbump_pattern, 'STATICTOP = STATIC_BASE + %d;\n' % (staticbump,) + js, self.pre_js, count=1)
def relocate_into(self, main):
# heap initializer
if self.staticbump > 0:
new_mem_init = self.mem_init_js[:self.mem_init_js.rfind(', ')] + ', Runtime.GLOBAL_BASE+%d)' % main.staticbump
main.set_pre_js(main.staticbump + self.staticbump, new_mem_init)
# Find function name replacements TODO: do not rename duplicate names with duplicate contents, just merge them
replacements = {}
for func in self.funcs:
rep = func
while rep in main.funcs:
rep += '_'
replacements[func] = rep
#print >> sys.stderr, 'replacements:', replacements
# sendings: add invokes for new tables
all_sendings = main.sendings
added_sending = False
for table in self.tables:
if table not in main.tables:
sig = table[table.rfind('_')+1:]
func = 'invoke_%s' % sig
all_sendings[func] = func
main.pre_js += 'var %s = %s;\n' % (func, shared.JS.make_invoke(sig, named=False))
added_sending = True
# imports
all_imports = main.imports
for key, value in self.imports.iteritems():
if key in self.funcs or key in main.funcs: continue # external function in one module, implemented in the other
value_concrete = '.' not in value # env.key means it is an import, an external value, and not a concrete one
main_value = main.imports.get(key)
main_value_concrete = main_value and '.' not in main_value
if value_concrete and main_value_concrete: continue # standard global var
if not main_value or value_concrete:
if '+' in value:
# relocate
value = value.replace('(', '').replace(')', '').replace('| 0', '').replace('|0', '').replace(' ', '')
left, right = value.split('+')
assert left == 'H_BASE'
value = str(main.staticbump + int(right))
all_imports[key] = value
if (value_concrete or main_value_concrete) and key in all_sendings:
del all_sendings[key] # import of external value no longer needed
for key in all_imports.keys():
if key in self.funcs:
del all_imports[key] # import in main, provided in side
main.imports_js = '\n'.join(['var %s = %s;' % (key, value) for key, value in all_imports.iteritems()]) + '\n'
# check for undefined references to global variables
def check_import(key, value):
if value.startswith('+') or value.endswith('|0'): # ignore functions
if key not in all_sendings:
print >> sys.stderr, 'warning: external variable %s is still not defined after linking' % key
all_sendings[key] = '0'
for key, value in all_imports.iteritems(): check_import(key, value)
if added_sending:
sendings_js = ', '.join(['%s: %s' % (key, value) for key, value in all_sendings.iteritems()])
sendings_start = main.post_js.find('}, { ')+5
sendings_end = main.post_js.find(' }, buffer);')
main.post_js = main.post_js[:sendings_start] + sendings_js + main.post_js[sendings_end:]
# tables
f_bases = {}
f_sizes = {}
for table, data in self.tables.iteritems():
main.tables[table] = self.merge_tables(table, main.tables.get(table), data, replacements, f_bases, f_sizes)
main.combine_tables()
#print >> sys.stderr, 'f bases', f_bases
# relocate
temp = shared.Building.js_optimizer(self.filename, ['asm', 'relocate', 'last'], extra_info={
'replacements': replacements,
'fBases': f_bases,
'hBase': main.staticbump
})
#print >> sys.stderr, 'relocated side into', temp
relocated_funcs = AsmModule(temp)
shared.try_delete(temp)
main.extra_funcs_js = relocated_funcs.funcs_js.replace(js_optimizer.start_funcs_marker, '\n')
# update function table uses
ft_marker = 'FUNCTION_TABLE_'
def update_fts(what):
updates = []
i = 1 # avoid seeing marker in recursion
while 1:
i = what.find(ft_marker, i)
if i < 0: break;
start = i
end = what.find('[', start)
table = what[i:end]
if table not in f_sizes:
# table was not modified
i += len(ft_marker)
continue
nesting = 1
while nesting > 0:
next = what.find(']', end+1)
nesting -= 1
nesting += what.count('[', end+1, next)
end = next
assert end > 0
mask = what.rfind('&', start, end)
assert mask > 0 and end - mask <= 13
fixed = update_fts(what[start:mask+1] + str(f_sizes[table]-1) + ']')
updates.append((start, end, fixed))
i = end # additional function table uses were done by recursion
# apply updates
if len(updates) == 0: return what
parts = []
so_far = 0
for i in range(len(updates)):
start, end, fixed = updates[i]
parts.append(what[so_far:start])
parts.append(fixed)
so_far = end+1
parts.append(what[so_far:])
return ''.join(parts)
main.funcs_js = update_fts(main.funcs_js)
main.extra_funcs_js = update_fts(main.extra_funcs_js)
# global initializers
if self.global_inits:
my_global_inits = map(lambda init: replacements[init] if init in replacements else init, self.global_inits)
all_global_inits = map(lambda init: '{ func: function() { %s() } }' % init, main.global_inits + my_global_inits)
all_global_inits_js = '/* global initializers */ __ATINIT__.push(' + ','.join(all_global_inits) + ');'
if main.global_inits:
target = main.global_inits_js
else:
target = '// === Body ===\n'
all_global_inits_js = target + all_global_inits_js
main.pre_js = main.pre_js.replace(target, all_global_inits_js)
# exports
def rep_exp(export):
key, value = export.split(':')
if key in replacements:
repped = replacements[key]
return repped + ': ' + repped
return export
my_exports = map(rep_exp, self.exports)
exports = main.exports.union(my_exports)
main.exports_js = 'return {' + ','.join(list(exports)) + '};\n})\n'
# post
def rep_def(deff):
key = deff.split(' ')[1]
if key in replacements:
rep = replacements[key]
return 'var %s = Module["%s"] = asm["%s"];\n' % (rep, rep, rep)
return deff
my_module_defs = map(rep_def, self.module_defs)
new_module_defs = set(my_module_defs).difference(main.module_defs)
if len(new_module_defs) > 0:
position = main.post_js.find('Runtime.') # Runtime is the start of the hardcoded ones
main.post_js = main.post_js[:position] + ''.join(list(new_module_defs)) + '\n' + main.post_js[position:]
def write(self, out):
f = open(out, 'w')
f.write(self.pre_js)
f.write(self.pre_imports_js)
f.write(self.imports_js)
f.write(self.funcs_js)
f.write(self.extra_funcs_js)
f.write(self.tables_js)
f.write(self.exports_js)
f.write(self.post_js)
f.close()
# Utilities
def parse_tables(self, js):
tables = {}
parts = js.split(';')
for part in parts:
if '=' not in part: continue
part = part.split('var ')[1]
name, data = part.split('=')
tables[name.strip()] = data.strip()
return tables
def merge_tables(self, table, main, side, replacements, f_bases, f_sizes):
sig = table.split('_')[-1]
side = side[1:-1].split(',')
side = map(lambda s: s.strip(), side)
side = map(lambda f: replacements[f] if f in replacements else f, side)
if not main:
f_bases[sig] = 0
f_sizes[table] = len(side)
return '[' + ','.join(side) + ']'
main = main[1:-1].split(',')
main = map(lambda m: m.strip(), main)
# TODO: handle non-aliasing case too
assert len(main) % 2 == 0
f_bases[sig] = len(main)
ret = main + side
size = 2
while size < len(ret): size *= 2
aborter = ret[1] # we can assume odd indexes have an aborting function with the right signature
ret = ret + [aborter]*(size - len(ret))
assert len(ret) == size
f_sizes[table] = size
return '[' + ','.join(ret) + ']'
def combine_tables(self):
self.tables_js = '// EMSCRIPTEN_END_FUNCS\n'
for table, data in self.tables.iteritems():
self.tables_js += 'var %s = %s;\n' % (table, data)
def get_table_funcs(self):
return set(itertools.chain.from_iterable(map(lambda x: map(lambda y: y.strip(), x[1:-1].split(',')), self.tables.values())))
def get_funcs_map(self):
funcs = js_optimizer.split_funcs(self.funcs_js)
ret = {}
for name, content in funcs:
ret[name] = content
return ret
def apply_funcs_map(self, funcs_map): # assumes self.funcs is the set of funcs, in the right order
jses = []
for f in self.funcs:
if f in funcs_map: # TODO: fix
jses.append(funcs_map[f])
self.funcs_js = '\n'.join(jses)
def get_import_type(self, imp):
def is_int(x):
try:
int(x)
return True
except:
return False
def is_float(x):
try:
float(x)
return True
except:
return False
if '|0' in imp or '| 0' in imp or (is_int(imp) and not '.0' in imp or '+' in imp):
return 'i'
elif '.0' in imp or '+' in imp or is_float(imp):
return 'd'
else:
return '?'
| bsd-2-clause | 3,567,093,845,589,277,700 | 36.315634 | 140 | 0.599368 | false |
rep/certificate-transparency | python/ct/crypto/verify.py | 1 | 9218 | """Verify CT log statements."""
import hashlib
import io
import struct
from ct.crypto import error
from ct.crypto import merkle
from ct.crypto import pem
from ct.proto import client_pb2
from ct.proto import ct_pb2
from ct.serialization import tls_message
import ecdsa
def decode_signature(signature):
"""Decode the TLS-encoded serialized signature.
Args:
signature: TLS-encoded signature.
Returns:
a tuple of (hash algorithm, signature algorithm, signature data)
Raises:
ct.crypto.error.EncodingError: invalid TLS encoding.
"""
sig_stream = io.BytesIO(signature)
sig_prefix = sig_stream.read(2)
if len(sig_prefix) != 2:
raise error.EncodingError("Invalid algorithm prefix %s" %
sig_prefix.encode("hex"))
hash_algo, sig_algo = struct.unpack(">BB", sig_prefix)
if (hash_algo != ct_pb2.DigitallySigned.SHA256 or
sig_algo != ct_pb2.DigitallySigned.ECDSA):
raise error.EncodingError("Invalid algorithm(s) %d, %d" %
(hash_algo, sig_algo))
length_prefix = sig_stream.read(2)
if len(length_prefix) != 2:
raise error.EncodingError("Invalid signature length prefix %s" %
length_prefix.encode("hex"))
sig_length, = struct.unpack(">H", length_prefix)
remaining = sig_stream.read()
if len(remaining) != sig_length:
raise error.EncodingError("Invalid signature length %d for "
"signature %s with length %d" %
(sig_length, remaining.encode("hex"),
len(remaining)))
return (hash_algo, sig_algo, remaining)
class LogVerifier(object):
"""CT log verifier."""
__ECDSA_READ_MARKERS = ("PUBLIC KEY", "ECDSA PUBLIC KEY")
__ECDSA_WRITE_MARKER = "ECDSA PUBLIC KEY"
def __init__(self, key_info, merkle_verifier=merkle.MerkleVerifier()):
"""Initialize from KeyInfo protocol buffer and a MerkleVerifier."""
self.__merkle_verifier = merkle_verifier
if key_info.type != client_pb2.KeyInfo.ECDSA:
raise error.UnsupportedAlgorithmError("Key type %d not supported" %
key_info.type)
# Will raise a PemError on invalid encoding
self.__der, _ = pem.from_pem(key_info.pem_key,
LogVerifier.__ECDSA_READ_MARKERS)
try:
self.__pubkey = ecdsa.VerifyingKey.from_der(self.__der)
except ecdsa.der.UnexpectedDER as e:
raise error.EncodingError(e)
def __repr__(self):
return "%r(public key: %r)" % (self.__class__.__name__,
pem.to_pem(self.__der,
self.__ECDSA_WRITE_MARKER))
def __str__(self):
return "%s(public key: %s)" % (self.__class__.__name__,
pem.to_pem(self.__der,
self.__ECDSA_WRITE_MARKER))
def _encode_sth_input(self, sth_response):
if len(sth_response.sha256_root_hash) != 32:
raise error.EncodingError("Wrong hash length: expected 32, got %d" %
len(sth_response.sha256_root_hash))
return struct.pack(">BBQQ32s", ct_pb2.V1, ct_pb2.TREE_HEAD,
sth_response.timestamp, sth_response.tree_size,
sth_response.sha256_root_hash)
def _verify(self, signature_input, signature):
try:
return self.__pubkey.verify(signature, signature_input,
hashfunc=hashlib.sha256,
sigdecode=ecdsa.util.sigdecode_der)
except ecdsa.der.UnexpectedDER:
raise error.EncodingError("Invalid DER encoding for signature %s",
signature.encode("hex"))
except ecdsa.keys.BadSignatureError:
raise error.SignatureError("Signature did not verify: %s",
signature.encode("hex"))
@error.returns_true_or_raises
def verify_sth(self, sth_response):
"""Verify the STH Response.
Args:
sth_response: client_pb2.SthResponse proto. The response must have
all fields present.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ct.crypto.error.EncodingError: failed to encode signature input,
or decode the signature.
ct.crypto.error.SignatureError: invalid signature.
"""
signature_input = self._encode_sth_input(sth_response)
#TODO(eranm): Pass the actual hash and signature algorithms to the
# verify method.
(_, _, signature) = decode_signature(sth_response.tree_head_signature)
return self._verify(signature_input, signature)
@staticmethod
@error.returns_true_or_raises
def verify_sth_temporal_consistency(old_sth, new_sth):
"""Verify the temporal consistency for two STH responses.
For two STHs, verify that the newer STH has bigger tree size.
Does not verify STH signatures or consistency of hashes.
Args:
old_sth: client_pb2.SthResponse proto. The STH with the older
timestamp must be supplied first.
new_sth: client_pb2.SthResponse proto.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ct.crypto.error.ConsistencyError: STHs are inconsistent
ValueError: "Older" STH is not older.
"""
if old_sth.timestamp > new_sth.timestamp:
raise ValueError("Older STH has newer timestamp (%d vs %d), did "
"you supply inputs in the wrong order?" %
(old_sth.timestamp, new_sth.timestamp))
if (old_sth.timestamp == new_sth.timestamp and
old_sth.tree_size != new_sth.tree_size):
# Issuing two different STHs for the same timestamp is illegal,
# even if they are otherwise consistent.
raise error.ConsistencyError("Inconsistency: different tree sizes "
"for the same timestamp")
if (old_sth.timestamp < new_sth.timestamp and
old_sth.tree_size > new_sth.tree_size):
raise error.ConsistencyError("Inconsistency: older tree has bigger "
"size")
return True
@error.returns_true_or_raises
def verify_sth_consistency(self, old_sth, new_sth, proof):
"""Verify consistency of two STHs.
Verify the temporal consistency and consistency proof for two STH
responses. Does not verify STH signatures.
Args:
old_sth: client_pb2.SthResponse() proto. The STH with the older
timestamp must be supplied first.
new_sth: client_pb2.SthResponse() proto.
proof: a list of SHA256 audit nodes.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: STHs are inconsistent
ProofError: proof is invalid
ValueError: "Older" STH is not older.
"""
self.verify_sth_temporal_consistency(old_sth, new_sth)
self.__merkle_verifier.verify_tree_consistency(
old_sth.tree_size, new_sth.tree_size, old_sth.sha256_root_hash,
new_sth.sha256_root_hash, proof)
return True
@error.returns_true_or_raises
def verify_sct(self, sct, certificate):
"""Verify the SCT over the X.509 certificate provided
Not suitable for Precertificates.
Args:
sct: client_pb2.SignedCertificateTimestamp proto. Must have
all fields present.
certificate: cert.Certificate instance.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ct.crypto.error.EncodingError: failed to encode signature input,
or decode the signature.
ct.crypto.error.SignatureError: invalid signature.
"""
if sct.version != ct_pb2.V1:
raise error.UnsupportedVersionError("Cannot handle version: %s" %
sct.version)
dsentry = client_pb2.DigitallySignedTimestampedEntry()
dsentry.sct_version = ct_pb2.V1
dsentry.signature_type = client_pb2.CERTIFICATE_TIMESTAMP
dsentry.timestamp = sct.timestamp
dsentry.entry_type = client_pb2.X509_ENTRY
dsentry.asn1_cert = certificate.to_der()
dsentry.ct_extensions = sct.extensions
signature_input = tls_message.encode(dsentry)
return self._verify(signature_input, sct.signature.signature)
| apache-2.0 | 4,790,613,992,014,680,000 | 39.429825 | 80 | 0.580495 | false |
thunsaker/cloudpebble | ide/migrations/0001_initial.py | 3 | 12056 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table(u'ide_project', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('version_def_name', self.gf('django.db.models.fields.CharField')(default='APP_RESOURCES', max_length=50)),
))
db.send_create_signal(u'ide', ['Project'])
# Adding unique constraint on 'Project', fields ['owner', 'name']
db.create_unique(u'ide_project', ['owner_id', 'name'])
# Adding model 'TemplateProject'
db.create_table(u'ide_templateproject', (
(u'project_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['ide.Project'], unique=True, primary_key=True)),
('template_kind', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
))
db.send_create_signal(u'ide', ['TemplateProject'])
# Adding model 'BuildResult'
db.create_table(u'ide_buildresult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='builds', to=orm['ide.Project'])),
('uuid', self.gf('django.db.models.fields.CharField')(default='8277f892d4d84a69ba21c3989a02c61c', max_length=32)),
('state', self.gf('django.db.models.fields.IntegerField')(default=1)),
('started', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('finished', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'ide', ['BuildResult'])
# Adding model 'ResourceFile'
db.create_table(u'ide_resourcefile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='resources', to=orm['ide.Project'])),
('file_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('kind', self.gf('django.db.models.fields.CharField')(max_length=9)),
))
db.send_create_signal(u'ide', ['ResourceFile'])
# Adding unique constraint on 'ResourceFile', fields ['project', 'file_name']
db.create_unique(u'ide_resourcefile', ['project_id', 'file_name'])
# Adding model 'ResourceIdentifier'
db.create_table(u'ide_resourceidentifier', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('resource_file', self.gf('django.db.models.fields.related.ForeignKey')(related_name='identifiers', to=orm['ide.ResourceFile'])),
('resource_id', self.gf('django.db.models.fields.CharField')(max_length=100)),
('character_regex', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'ide', ['ResourceIdentifier'])
# Adding unique constraint on 'ResourceIdentifier', fields ['resource_file', 'resource_id']
db.create_unique(u'ide_resourceidentifier', ['resource_file_id', 'resource_id'])
# Adding model 'SourceFile'
db.create_table(u'ide_sourcefile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='source_files', to=orm['ide.Project'])),
('file_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'ide', ['SourceFile'])
# Adding unique constraint on 'SourceFile', fields ['project', 'file_name']
db.create_unique(u'ide_sourcefile', ['project_id', 'file_name'])
def backwards(self, orm):
# Removing unique constraint on 'SourceFile', fields ['project', 'file_name']
db.delete_unique(u'ide_sourcefile', ['project_id', 'file_name'])
# Removing unique constraint on 'ResourceIdentifier', fields ['resource_file', 'resource_id']
db.delete_unique(u'ide_resourceidentifier', ['resource_file_id', 'resource_id'])
# Removing unique constraint on 'ResourceFile', fields ['project', 'file_name']
db.delete_unique(u'ide_resourcefile', ['project_id', 'file_name'])
# Removing unique constraint on 'Project', fields ['owner', 'name']
db.delete_unique(u'ide_project', ['owner_id', 'name'])
# Deleting model 'Project'
db.delete_table(u'ide_project')
# Deleting model 'TemplateProject'
db.delete_table(u'ide_templateproject')
# Deleting model 'BuildResult'
db.delete_table(u'ide_buildresult')
# Deleting model 'ResourceFile'
db.delete_table(u'ide_resourcefile')
# Deleting model 'ResourceIdentifier'
db.delete_table(u'ide_resourceidentifier')
# Deleting model 'SourceFile'
db.delete_table(u'ide_sourcefile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ide.buildresult': {
'Meta': {'object_name': 'BuildResult'},
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'builds'", 'to': u"orm['ide.Project']"}),
'started': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'7d2901ebedec4f708e706c6424a71e73'", 'max_length': '32'})
},
u'ide.project': {
'Meta': {'unique_together': "(('owner', 'name'),)", 'object_name': 'Project'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'version_def_name': ('django.db.models.fields.CharField', [], {'default': "'APP_RESOURCES'", 'max_length': '50'})
},
u'ide.resourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'ResourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': u"orm['ide.Project']"})
},
u'ide.resourceidentifier': {
'Meta': {'unique_together': "(('resource_file', 'resource_id'),)", 'object_name': 'ResourceIdentifier'},
'character_regex': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'identifiers'", 'to': u"orm['ide.ResourceFile']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ide.sourcefile': {
'Meta': {'unique_together': "(('project', 'file_name'),)", 'object_name': 'SourceFile'},
'file_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source_files'", 'to': u"orm['ide.Project']"})
},
u'ide.templateproject': {
'Meta': {'object_name': 'TemplateProject', '_ormbases': [u'ide.Project']},
u'project_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['ide.Project']", 'unique': 'True', 'primary_key': 'True'}),
'template_kind': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['ide'] | mit | -1,705,941,945,434,616,000 | 62.125654 | 187 | 0.584439 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/bx_python-0.7.2-py2.7-linux-x86_64-ucs4.egg/bx/align/tools/fuse.py | 7 | 2888 | """
Tools for fusing contiguous alignment blocks together.
"""
from itertools import *
from copy import deepcopy
def fuse_list( mafs ):
"""
Try to fuse a list of blocks by progressively fusing each adjacent pair.
"""
last = None
for m in mafs:
if last is None:
last = m
else:
fused = fuse( last, m )
if fused:
last = fused
else:
yield last
last = m
if last:
yield last
def fuse( m1, m2 ):
"""
Attempt to fuse two blocks. If they can be fused returns a new block,
otherwise returns None.
Example:
>>> import bx.align.maf
>>> block1 = bx.align.maf.from_string( '''
... a score=0.0
... s hg18.chr10 52686 44 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGC
... s panTro1.chrUn_random 208115356 44 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGT
... ''' )
>>> block2 = bx.align.maf.from_string( '''
... a score=0.0
... s hg18.chr10 52730 69 + 135374737 GCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC
... s panTro1.chrUn_random 208115400 69 - 240967748 GCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT
... ''' )
>>> fused = fuse( block1, block2 )
>>> print fused
a score=0.0
s hg18.chr10 52686 113 + 135374737 GTGCTAACTTACTGCTCCACAGAAAACATCAATTCTGCTCATGCGCAGGTACAATTCATCAAGAAAGGAATTACAACTTCAGAAATGTGTTCAAAATATATCCATACTTTGAC
s panTro1.chrUn_random 208115356 113 - 240967748 GTGCTAACTGACTGCTCCAGAGAAAACATCAATTCTGTTCATGTGCAGCTACTATTCATCAAGAAAGGGATTACAACTTCAGAAATGTGTTCAAAGTGTATCCATACTTTGAT
<BLANKLINE>
"""
# Check if the blocks are adjacent, return none if not.
if len( m1.components ) != len( m2.components ): return None
for c1, c2 in izip( m1.components, m2.components ):
if c1.src != c2.src: return None
if c1.strand != c2.strand: return None
if c1.end != c2.start: return None
# Try to fuse:
n = deepcopy( m1 )
for c1, c2 in izip( n.components, m2.components ):
c1.text += c2.text
c1.size += c2.size
n.text_size = len( n.components[0].text )
return n
class FusingAlignmentWriter( object ):
"""
Wrapper for an alignment Writer which attempts to fuse adjacent blocks
"""
def __init__( self, maf_writer ):
self.maf_writer = maf_writer
self.last = None
def write( self, m ):
if not self.last:
self.last = m
else:
fused = fuse( self.last, m )
if fused:
self.last = fused
else:
self.maf_writer.write( self.last )
self.last = m
def close( self ):
if self.last: self.maf_writer.write( self.last )
self.maf_writer.close()
| gpl-3.0 | 2,240,081,035,608,953,300 | 31.818182 | 166 | 0.614612 | false |
derekchiang/keystone | keystone/openstack/common/gettextutils.py | 2 | 16240 | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from keystone.openstack.common.gettextutils import _
"""
import copy
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('keystone'.upper() + '_LOCALEDIR')
_t = gettext.translation('keystone', localedir=_localedir, fallback=True)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='keystone')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None, domain='keystone', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
unicode_mod = super(Message, self).__mod__(other)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=self._sanitize_mod_params(other),
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
for key in keys:
params[key] = self._copy_param(dict_param[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
| apache-2.0 | 5,540,851,499,754,961,000 | 38.513382 | 81 | 0.636515 | false |
maxkoryukov/headphones | lib/beetsplug/embedart.py | 14 | 9563 | # This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Allows beets to embed album art into file metadata."""
import os.path
import logging
import imghdr
import subprocess
import platform
from tempfile import NamedTemporaryFile
from beets.plugins import BeetsPlugin
from beets import mediafile
from beets import ui
from beets.ui import decargs
from beets.util import syspath, normpath, displayable_path
from beets.util.artresizer import ArtResizer
from beets import config
log = logging.getLogger('beets')
class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files.
"""
def __init__(self):
super(EmbedCoverArtPlugin, self).__init__()
self.config.add({
'maxwidth': 0,
'auto': True,
'compare_threshold': 0,
'ifempty': False,
})
if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:
self.config['maxwidth'] = 0
log.warn(u"embedart: ImageMagick or PIL not found; "
u"'maxwidth' option ignored")
if self.config['compare_threshold'].get(int) and not \
ArtResizer.shared.can_compare:
self.config['compare_threshold'] = 0
log.warn(u"embedart: ImageMagick 6.8.7 or higher not installed; "
u"'compare_threshold' option ignored")
def commands(self):
# Embed command.
embed_cmd = ui.Subcommand(
'embedart', help='embed image files into file metadata'
)
embed_cmd.parser.add_option(
'-f', '--file', metavar='PATH', help='the image file to embed'
)
maxwidth = config['embedart']['maxwidth'].get(int)
compare_threshold = config['embedart']['compare_threshold'].get(int)
ifempty = config['embedart']['ifempty'].get(bool)
def embed_func(lib, opts, args):
if opts.file:
imagepath = normpath(opts.file)
for item in lib.items(decargs(args)):
embed_item(item, imagepath, maxwidth, None,
compare_threshold, ifempty)
else:
for album in lib.albums(decargs(args)):
embed_album(album, maxwidth)
embed_cmd.func = embed_func
# Extract command.
extract_cmd = ui.Subcommand('extractart',
help='extract an image from file metadata')
extract_cmd.parser.add_option('-o', dest='outpath',
help='image output file')
def extract_func(lib, opts, args):
outpath = normpath(opts.outpath or 'cover')
item = lib.items(decargs(args)).get()
extract(outpath, item)
extract_cmd.func = extract_func
# Clear command.
clear_cmd = ui.Subcommand('clearart',
help='remove images from file metadata')
def clear_func(lib, opts, args):
clear(lib, decargs(args))
clear_cmd.func = clear_func
return [embed_cmd, extract_cmd, clear_cmd]
@EmbedCoverArtPlugin.listen('album_imported')
def album_imported(lib, album):
"""Automatically embed art into imported albums.
"""
if album.artpath and config['embedart']['auto']:
embed_album(album, config['embedart']['maxwidth'].get(int), True)
def embed_item(item, imagepath, maxwidth=None, itempath=None,
compare_threshold=0, ifempty=False, as_album=False):
"""Embed an image into the item's media file.
"""
if compare_threshold:
if not check_art_similarity(item, imagepath, compare_threshold):
log.warn(u'Image not similar; skipping.')
return
if ifempty:
art = get_art(item)
if not art:
pass
else:
log.debug(u'embedart: media file contained art already {0}'.format(
displayable_path(imagepath)
))
return
if maxwidth and not as_album:
imagepath = resize_image(imagepath, maxwidth)
try:
log.debug(u'embedart: embedding {0}'.format(
displayable_path(imagepath)
))
item['images'] = [_mediafile_image(imagepath, maxwidth)]
except IOError as exc:
log.error(u'embedart: could not read image file: {0}'.format(exc))
else:
# We don't want to store the image in the database.
item.try_write(itempath)
del item['images']
def embed_album(album, maxwidth=None, quiet=False):
"""Embed album art into all of the album's items.
"""
imagepath = album.artpath
if not imagepath:
log.info(u'No album art present: {0} - {1}'.
format(album.albumartist, album.album))
return
if not os.path.isfile(syspath(imagepath)):
log.error(u'Album art not found at {0}'
.format(displayable_path(imagepath)))
return
if maxwidth:
imagepath = resize_image(imagepath, maxwidth)
log.log(
logging.DEBUG if quiet else logging.INFO,
u'Embedding album art into {0.albumartist} - {0.album}.'.format(album),
)
for item in album.items():
embed_item(item, imagepath, maxwidth, None,
config['embedart']['compare_threshold'].get(int),
config['embedart']['ifempty'].get(bool), as_album=True)
def resize_image(imagepath, maxwidth):
"""Returns path to an image resized to maxwidth.
"""
log.info(u'Resizing album art to {0} pixels wide'
.format(maxwidth))
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath))
return imagepath
def check_art_similarity(item, imagepath, compare_threshold):
"""A boolean indicating if an image is similar to embedded item art.
"""
with NamedTemporaryFile(delete=True) as f:
art = extract(f.name, item)
if art:
# Converting images to grayscale tends to minimize the weight
# of colors in the diff score
cmd = 'convert {0} {1} -colorspace gray MIFF:- | ' \
'compare -metric PHASH - null:'.format(syspath(imagepath),
syspath(art))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=platform.system() != 'Windows',
shell=True)
stdout, stderr = proc.communicate()
if proc.returncode:
if proc.returncode != 1:
log.warn(u'embedart: IM phashes compare failed for {0}, \
{1}'.format(displayable_path(imagepath),
displayable_path(art)))
return
phashDiff = float(stderr)
else:
phashDiff = float(stdout)
log.info(u'embedart: compare PHASH score is {0}'.format(phashDiff))
if phashDiff > compare_threshold:
return False
return True
def _mediafile_image(image_path, maxwidth=None):
"""Return a `mediafile.Image` object for the path.
"""
with open(syspath(image_path), 'rb') as f:
data = f.read()
return mediafile.Image(data, type=mediafile.ImageType.front)
def get_art(item):
# Extract the art.
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
log.error(u'Could not extract art from {0}: {1}'.format(
displayable_path(item.path), exc
))
return
return mf.art
# 'extractart' command.
def extract(outpath, item):
if not item:
log.error(u'No item matches query.')
return
art = get_art(item)
if not art:
log.error(u'No album art present in {0} - {1}.'
.format(item.artist, item.title))
return
# Add an extension to the filename.
ext = imghdr.what(None, h=art)
if not ext:
log.error(u'Unknown image type.')
return
outpath += '.' + ext
log.info(u'Extracting album art from: {0.artist} - {0.title} '
u'to: {1}'.format(item, displayable_path(outpath)))
with open(syspath(outpath), 'wb') as f:
f.write(art)
return outpath
# 'clearart' command.
def clear(lib, query):
log.info(u'Clearing album art from items:')
for item in lib.items(query):
log.info(u'{0} - {1}'.format(item.artist, item.title))
try:
mf = mediafile.MediaFile(syspath(item.path),
config['id3v23'].get(bool))
except mediafile.UnreadableFileError as exc:
log.error(u'Could not clear art from {0}: {1}'.format(
displayable_path(item.path), exc
))
continue
del mf.art
mf.save()
| gpl-3.0 | -7,197,453,454,722,766,000 | 33.153571 | 79 | 0.586741 | false |
osoken/sqlite-tensor | doc/conf.py | 1 | 1305 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sqlite_tensor import __author__, __version__, __package_name__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxcontrib.asyncio',
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = __package_name__
copyright = '2017, ' + __author__
author = __author__
version = __version__
release = __version__
language = 'ja'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
todo_include_todos = True
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
htmlhelp_basename = __package_name__ + 'doc'
latex_elements = {
}
latex_documents = [
(master_doc, __package_name__ + '.tex',
__package_name__ + ' Documentation',
__author__, 'manual'),
]
man_pages = [
(master_doc, __package_name__, __package_name__ + ' Documentation',
[author], 1)
]
texinfo_documents = [
(master_doc, __package_name__, __package_name__ + ' Documentation',
author, __package_name__, 'One line description of project.',
'Miscellaneous'),
]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
| mit | -5,994,074,250,059,304,000 | 18.772727 | 71 | 0.622989 | false |
arne-cl/alt-mulig | crf-python/crfutils.py | 1 | 6193 | """
A miscellaneous utility for sequential labeling.
Copyright 2010,2011 Naoaki Okazaki.
"""
import optparse
import sys
def apply_templates(X, templates):
"""
Generate features for an item sequence by applying feature templates.
A feature template consists of a tuple of (name, offset) pairs,
where name and offset specify a field name and offset from which
the template extracts a feature value. Generated features are stored
in the 'F' field of each item in the sequence.
@type X: list of mapping objects
@param X: The item sequence.
@type template: tuple of (str, int)
@param template: The feature template.
"""
for template in templates:
name = '|'.join(['%s[%d]' % (f, o) for f, o in template])
for t in range(len(X)):
values = []
for field, offset in template:
p = t + offset
if p not in range(len(X)):
values = []
break
values.append(X[p][field])
if values:
X[t]['F'].append('%s=%s' % (name, '|'.join(values)))
def readiter(fi, names, sep=' '):
"""
Return an iterator for item sequences read from a file object.
This function reads a sequence from a file object L{fi}, and
yields the sequence as a list of mapping objects. Each line
(item) from the file object is split by the separator character
L{sep}. Separated values of the item are named by L{names},
and stored in a mapping object. Every item has a field 'F' that
is reserved for storing features.
@type fi: file
@param fi: The file object.
@type names: tuple
@param names: The list of field names.
@type sep: str
@param sep: The separator character.
@rtype list of mapping objects
@return An iterator for sequences.
"""
X = []
for line in fi:
line = line.strip('\n')
if not line:
yield X
X = []
else:
fields = line.split(sep)
if len(fields) < len(names):
raise ValueError(
'Too few fields (%d) for %r\n%s' % (len(fields), names, line))
item = {'F': []} # 'F' is reserved for features.
for i in range(len(names)):
item[names[i]] = fields[i]
X.append(item)
yield X
def escape(src):
"""
Escape colon characters from feature names.
@type src: str
@param src: A feature name
@rtype str
@return The feature name escaped.
"""
return src.replace(':', '__COLON__')
def output_features(fo, X, field=''):
"""
Output features (and reference labels) of a sequence in CRFSuite
format. For each item in the sequence, this function writes a
reference label (if L{field} is a non-empty string) and features.
@type fo: file
@param fo: The file object.
@type X: list of mapping objects
@param X: The sequence.
@type field: str
@param field: The field name of reference labels.
"""
for t in range(len(X)):
if field:
fo.write('%s' % X[t][field])
for a in X[t]['F']:
if isinstance(a, str):
fo.write('\t%s' % escape(a))
else:
fo.write('\t%s:%f' % (escape(a[0]), a[1]))
fo.write('\n')
fo.write('\n')
def to_crfsuite(X):
"""
Convert an item sequence into an object compatible with crfsuite
Python module.
@type X: list of mapping objects
@param X: The sequence.
@rtype crfsuite.ItemSequence
@return The same sequence in crfsuite.ItemSequence type.
"""
import crfsuite
xseq = crfsuite.ItemSequence()
for x in X:
item = crfsuite.Item()
for f in x['F']:
if isinstance(f, str):
item.append(crfsuite.Attribute(escape(f)))
else:
item.append(crfsuite.Attribute(escape(f[0]), f[1]))
xseq.append(item)
return xseq
def main(feature_extractor, fields='w pos y', sep=' '):
fi = sys.stdin
fo = sys.stdout
# Parse the command-line arguments.
parser = optparse.OptionParser(usage="""usage: %prog [options]
This utility reads a data set from STDIN, and outputs attributes to STDOUT.
Each line of a data set must consist of field values separated by SEPARATOR
characters. The names and order of field values can be specified by -f option.
The separator character can be specified with -s option. Instead of outputting
attributes, this utility tags the input data when a model file is specified by
-t option (CRFsuite Python module must be installed)."""
)
parser.add_option(
'-t', dest='model',
help='tag the input using the model (requires "crfsuite" module)'
)
parser.add_option(
'-f', dest='fields', default=fields,
help='specify field names of input data [default: "%default"]'
)
parser.add_option(
'-s', dest='separator', default=sep,
help='specify the separator of columns of input data [default: "%default"]'
)
(options, args) = parser.parse_args()
# The fields of input: ('w', 'pos', 'y) by default.
F = options.fields.split(' ')
if not options.model:
# The generator function readiter() reads a sequence from a
for X in readiter(fi, F, options.separator):
feature_extractor(X)
output_features(fo, X, 'y')
else:
# Create a tagger with an existing model.
import crfsuite
tagger = crfsuite.Tagger()
tagger.open(options.model)
# For each sequence from STDIN.
for X in readiter(fi, F, options.separator):
# Obtain features.
feature_extractor(X)
xseq = to_crfsuite(X)
yseq = tagger.tag(xseq)
for t in range(len(X)):
v = X[t]
fo.write('\t'.join([v[f] for f in F]))
fo.write('\t%s\n' % yseq[t])
fo.write('\n')
| gpl-3.0 | -9,142,735,506,375,005,000 | 33.405556 | 83 | 0.570806 | false |
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_spilloverpolicy_binding.py | 3 | 11328 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_spilloverpolicy_binding(base_resource) :
""" Binding class showing the spilloverpolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._gotopriorityexpression = ""
self._bindpoint = ""
self._priority = 0
self._name = ""
self._targetlbvserver = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
ur"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
ur"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the label to be invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the label to be invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def targetlbvserver(self) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
ur"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of label to be invoked.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of label to be invoked.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_spilloverpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_spilloverpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_spilloverpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.priority = resource.priority
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_spilloverpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].priority = resource[i].priority
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_spilloverpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
deleteresource.priority = resource.priority
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_spilloverpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
deleteresources[i].priority = resource[i].priority
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch csvserver_spilloverpolicy_binding resources.
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of csvserver_spilloverpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count csvserver_spilloverpolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of csvserver_spilloverpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_spilloverpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_spilloverpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_spilloverpolicy_binding = [csvserver_spilloverpolicy_binding() for _ in range(length)]
| apache-2.0 | 1,937,573,977,754,214,700 | 29.95082 | 211 | 0.716984 | false |
slyphon/pants | tests/python/pants_test/engine/exp/test_addressable.py | 1 | 10250 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.engine.exp.addressable import (Exactly, MutationError, NotSerializableError,
SubclassesOf, SuperclassesOf, TypeConstraintError,
addressable, addressable_dict, addressable_list)
from pants.engine.exp.objects import Resolvable, Serializable
class TypeConstraintTestBase(unittest.TestCase):
class A(object):
pass
class B(A):
pass
class C(B):
pass
class BPrime(A):
pass
class SuperclassesOfTest(TypeConstraintTestBase):
def test_none(self):
with self.assertRaises(ValueError):
SubclassesOf()
def test_single(self):
self.assertTrue(SuperclassesOf(self.B).satisfied_by(self.A()))
self.assertTrue(SuperclassesOf(self.B).satisfied_by(self.B()))
self.assertFalse(SuperclassesOf(self.B).satisfied_by(self.BPrime()))
self.assertFalse(SuperclassesOf(self.B).satisfied_by(self.C()))
def test_multiple(self):
self.assertTrue(SuperclassesOf(self.A, self.B).satisfied_by(self.A()))
self.assertTrue(SuperclassesOf(self.A, self.B).satisfied_by(self.B()))
self.assertFalse(SuperclassesOf(self.A, self.B).satisfied_by(self.BPrime()))
self.assertFalse(SuperclassesOf(self.A, self.B).satisfied_by(self.C()))
class ExactlyTest(TypeConstraintTestBase):
def test_none(self):
with self.assertRaises(ValueError):
Exactly()
def test_single(self):
self.assertFalse(Exactly(self.B).satisfied_by(self.A()))
self.assertTrue(Exactly(self.B).satisfied_by(self.B()))
self.assertFalse(Exactly(self.B).satisfied_by(self.BPrime()))
self.assertFalse(Exactly(self.B).satisfied_by(self.C()))
def test_multiple(self):
self.assertTrue(Exactly(self.A, self.B).satisfied_by(self.A()))
self.assertTrue(Exactly(self.A, self.B).satisfied_by(self.B()))
self.assertFalse(Exactly(self.A, self.B).satisfied_by(self.BPrime()))
self.assertFalse(Exactly(self.A, self.B).satisfied_by(self.C()))
class SubclassesOfTest(TypeConstraintTestBase):
def test_none(self):
with self.assertRaises(ValueError):
SubclassesOf()
def test_single(self):
self.assertFalse(SubclassesOf(self.B).satisfied_by(self.A()))
self.assertTrue(SubclassesOf(self.B).satisfied_by(self.B()))
self.assertFalse(SubclassesOf(self.B).satisfied_by(self.BPrime()))
self.assertTrue(SubclassesOf(self.B).satisfied_by(self.C()))
def test_multiple(self):
self.assertTrue(SubclassesOf(self.B, self.C).satisfied_by(self.B()))
self.assertTrue(SubclassesOf(self.B, self.C).satisfied_by(self.C()))
self.assertFalse(SubclassesOf(self.B, self.C).satisfied_by(self.BPrime()))
self.assertFalse(SubclassesOf(self.B, self.C).satisfied_by(self.A()))
class SimpleSerializable(Serializable):
def __init__(self, **kwargs):
self._kwargs = kwargs
def _asdict(self):
return self._kwargs
class CountingResolvable(Resolvable):
def __init__(self, address, value):
self._address = address
self._value = value
self._resolutions = 0
@property
def address(self):
return self._address
def resolve(self):
try:
return self._value
finally:
self._resolutions += 1
@property
def resolutions(self):
return self._resolutions
class AddressableDescriptorTest(unittest.TestCase):
def test_inappropriate_application(self):
class NotSerializable(object):
def __init__(self, count):
super(NotSerializable, self).__init__()
self.count = count
@addressable(Exactly(int))
def count(self):
pass
with self.assertRaises(NotSerializableError):
NotSerializable(42)
class AddressableTest(unittest.TestCase):
class Person(SimpleSerializable):
def __init__(self, age):
super(AddressableTest.Person, self).__init__()
self.age = age
@addressable(Exactly(int))
def age(self):
"""Return the person's age in years.
:rtype int
"""
def test_none(self):
person = self.Person(None)
self.assertIsNone(person.age, None)
def test_value(self):
person = self.Person(42)
self.assertEqual(42, person.age)
def test_address(self):
person = self.Person('//:meaning-of-life')
self.assertEqual('//:meaning-of-life', person.age)
def test_resolvable(self):
resolvable_age = CountingResolvable('//:meaning-of-life', 42)
person = self.Person(resolvable_age)
self.assertEqual(0, resolvable_age.resolutions)
self.assertEqual(42, person.age)
self.assertEqual(1, resolvable_age.resolutions)
self.assertEqual(42, person.age)
self.assertEqual(2, resolvable_age.resolutions)
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Person(42.0)
def test_type_mismatch_resolvable(self):
resolvable_age = CountingResolvable('//:meaning-of-life', 42.0)
person = self.Person(resolvable_age)
with self.assertRaises(TypeConstraintError):
person.age
def test_single_assignment(self):
person = self.Person(42)
with self.assertRaises(MutationError):
person.age = 37
class AddressableListTest(unittest.TestCase):
class Series(SimpleSerializable):
def __init__(self, values):
super(AddressableListTest.Series, self).__init__()
self.values = values
@addressable_list(Exactly(int, float))
def values(self):
"""Return this series' values.
:rtype list of int or float
"""
def test_none(self):
series = self.Series(None)
self.assertEqual([], series.values)
def test_values(self):
series = self.Series([42, 1 / 137.0])
self.assertEqual([42, 1 / 137.0], series.values)
def test_addresses(self):
series = self.Series(['//:meaning-of-life'])
self.assertEqual(['//:meaning-of-life'], series.values)
def test_resolvables(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
series = self.Series([resolvable_value])
self.assertEqual([1 / 137.0], series.values)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, series.values[0])
self.assertEqual(2, resolvable_value.resolutions)
def test_mixed(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
series = self.Series([42, '//:meaning-of-life', resolvable_value])
self.assertEqual(0, resolvable_value.resolutions)
self.assertEqual([42, '//:meaning-of-life', 1 / 137.0], series.values)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, series.values[2])
self.assertEqual(2, resolvable_value.resolutions)
def test_type_mismatch_container(self):
with self.assertRaises(TypeError):
self.Series({42, 1 / 137.0})
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Series([42, False])
def test_type_mismatch_resolvable(self):
resolvable_value = CountingResolvable('//:meaning-of-life', True)
series = self.Series([42, resolvable_value])
with self.assertRaises(TypeConstraintError):
series.values
def test_single_assignment(self):
series = self.Series([42])
with self.assertRaises(MutationError):
series.values = [37]
class AddressableDictTest(unittest.TestCase):
class Varz(SimpleSerializable):
def __init__(self, varz):
super(AddressableDictTest.Varz, self).__init__()
self.varz = varz
@addressable_dict(Exactly(int, float))
def varz(self):
"""Return a snapshot of the current /varz.
:rtype dict of string -> int or float
"""
def test_none(self):
varz = self.Varz(None)
self.assertEqual({}, varz.varz)
def test_values(self):
varz = self.Varz({'meaning of life': 42, 'fine structure constant': 1 / 137.0})
self.assertEqual({'meaning of life': 42, 'fine structure constant': 1 / 137.0}, varz.varz)
def test_addresses(self):
varz = self.Varz({'meaning of life': '//:meaning-of-life'})
self.assertEqual({'meaning of life': '//:meaning-of-life'}, varz.varz)
def test_resolvables(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
varz = self.Varz({'fine structure constant': resolvable_value})
self.assertEqual({'fine structure constant': 1 / 137.0}, varz.varz)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, varz.varz['fine structure constant'])
self.assertEqual(2, resolvable_value.resolutions)
def test_mixed(self):
resolvable_value = CountingResolvable('//:fine-structure-constant', 1 / 137.0)
varz = self.Varz({'prime': 37,
'meaning of life': '//:meaning-of-life',
'fine structure constant': resolvable_value})
self.assertEqual(0, resolvable_value.resolutions)
self.assertEqual({'prime': 37,
'meaning of life': '//:meaning-of-life',
'fine structure constant': 1 / 137.0},
varz.varz)
self.assertEqual(1, resolvable_value.resolutions)
self.assertEqual(1 / 137.0, varz.varz['fine structure constant'])
self.assertEqual(2, resolvable_value.resolutions)
def test_type_mismatch_container(self):
with self.assertRaises(TypeError):
self.Varz([42, 1 / 137.0])
def test_type_mismatch_value(self):
with self.assertRaises(TypeConstraintError):
self.Varz({'meaning of life': 42, 'fine structure constant': False})
def test_type_mismatch_resolvable(self):
resolvable_item = CountingResolvable('//:fine-structure-constant', True)
varz = self.Varz({'meaning of life': 42, 'fine structure constant': resolvable_item})
with self.assertRaises(TypeConstraintError):
varz.varz
def test_single_assignment(self):
varz = self.Varz({'meaning of life': 42})
with self.assertRaises(MutationError):
varz.varz = {'fine structure constant': 1 / 137.0}
| apache-2.0 | 1,215,145,672,911,064,300 | 30.155015 | 94 | 0.678634 | false |
simphony/simphony-common | simphony/io/tests/abc_data_container_table_check.py | 1 | 10963 | import tempfile
import shutil
import os
import random
import uuid
import abc
from contextlib import closing, contextmanager
from collections import OrderedDict
import tables
from numpy.testing import assert_equal
from simphony.core import CUBA
from simphony.core.data_container import DataContainer
from simphony.io.data_container_table import DataContainerTable
from simphony.testing.utils import create_data_container, dummy_cuba_value
class ABCDataContainerTableCheck(object):
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def record(self):
""" The columns configuration that the table is using """
@property
def saved_keys(self):
""" Return the CUBA keys that are actually stored to be saved.
The default implementation will return the full CUBA keys.
"""
members = {
member.lower(): cuba
for member, cuba in CUBA.__members__.iteritems()}
data_record = self.record.columns['data']
try:
return [members[column] for column in data_record._v_names]
except AttributeError:
return [members[column] for column in data_record.columns]
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.filename = os.path.join(self.temp_dir, '_test_file.cuds')
self.maxDiff = None
def tearDown(self):
shutil.rmtree(self.temp_dir)
@contextmanager
def new_table(self, table_name):
handle = None
try:
handle = tables.open_file(self.filename, mode='w')
root = handle.root
table = DataContainerTable(root, table_name, record=self.record)
self.assertEqual(len(table), 0)
yield table
finally:
if handle is not None:
handle.close()
@contextmanager
def open_table(self, table_name, mode='r'):
handle = None
try:
handle = tables.open_file(self.filename, mode=mode)
root = handle.root
table = DataContainerTable(root, table_name, record=self.record)
yield table
finally:
if handle is not None:
handle.close()
@property
def data_list(self):
data = create_data_container(restrict=self.saved_keys)
full_data = create_data_container()
empty_data = DataContainer()
reduced_data = create_data_container(restrict=self.saved_keys[:-1])
return [data, empty_data, full_data, reduced_data]
def test_creating_a_data_container_table(self):
with closing(tables.open_file(self.filename, mode='w')) as handle:
root = handle.root
table = DataContainerTable(
root, 'my_data_table', record=self.record)
self.assertEqual(len(table), 0)
self.assertIn('my_data_table', root)
self.assertTrue(table.valid)
data_column = root.my_data_table.colinstances['data']
expected_column_names = [
key.name.lower() for key in self.saved_keys]
self.assertItemsEqual(
data_column._v_colnames, expected_column_names)
def test_append_data(self):
with self.new_table('my_data_table') as table:
uids = {table.append(data): data for data in self.data_list}
with self.open_table('my_data_table') as table:
self.assertEqual(len(table), 4)
for uid, data in uids.iteritems():
if len(data) <= len(self.saved_keys):
self.assertDataContainersEqual(table[uid], data)
else:
# special case for custom records since they do not
# store the full set of keys
self.assertDataContainersEqual(
table[uid],
create_data_container(restrict=self.saved_keys))
def test_set_data(self):
with self.new_table('my_data_table') as table:
uids = {uuid.uuid4(): data for data in self.data_list}
for uid, data in uids.iteritems():
table[uid] = data
with self.open_table('my_data_table') as table:
self.assertEqual(len(table), 4)
for uid, data in uids.iteritems():
if len(data) <= len(self.saved_keys):
self.assertDataContainersEqual(table[uid], data)
else:
# special case for custom records since they do not
# store the full set of keys
self.assertDataContainersEqual(
table[uid],
create_data_container(restrict=self.saved_keys))
def test_get_data(self):
saved_keys = self.saved_keys
data = create_data_container(restrict=saved_keys)
data1 = DataContainer(data)
key = saved_keys[0]
data[key] = dummy_cuba_value(key) + dummy_cuba_value(key)
with self.new_table('my_data_table') as table:
uid = table.append(data)
uid1 = uuid.uuid4()
table[uid1] = data1
with self.open_table('my_data_table') as table:
self.assertEqual(len(table), 2)
self.assertDataContainersEqual(table[uid], data)
self.assertDataContainersEqual(table[uid1], data1)
def test_get_with_invalid_uid(self):
saved_keys = self.saved_keys
data = create_data_container(restrict=saved_keys)
with self.new_table('my_data_table') as table:
table.append(data)
with self.open_table('my_data_table') as table:
with self.assertRaises(KeyError):
table[uuid.uuid4()]
def test_update_data(self):
with self.new_table('my_data_table') as table:
uids = OrderedDict()
for data in self.data_list:
uids[table.append(data)] = data
with self.open_table('my_data_table', mode='a') as table:
updated_data = [data for data in reversed(self.data_list)]
for uid in uids:
for data in updated_data:
table[uid] = data
if len(data) <= len(self.saved_keys):
self.assertDataContainersEqual(table[uid], data)
else:
# special case for custom records since they do not
# store the full set of keys
self.assertDataContainersEqual(
table[uid],
create_data_container(restrict=self.saved_keys))
def test_delete_data(self):
saved_keys = self.saved_keys
data = create_data_container(restrict=saved_keys)
with self.new_table('my_data_table') as table:
uid0 = table.append(data)
new_data = DataContainer(data)
key = saved_keys[0]
data[key] = dummy_cuba_value(key) + dummy_cuba_value(key)
uid1 = table.append(new_data)
with self.open_table('my_data_table', mode='a') as table:
del table[uid0]
loaded_data = table[uid1]
self.assertEqual(len(table), 1)
self.assertDataContainersEqual(loaded_data, new_data)
def test_delete_data_with_invalid_uid(self):
saved_keys = self.saved_keys
data = create_data_container(restrict=saved_keys)
with self.new_table('my_data_table') as table:
uid0 = table.append(data)
new_data = DataContainer(data)
key = saved_keys[0]
data[key] = dummy_cuba_value(key) + dummy_cuba_value(key)
table.append(new_data)
with self.open_table('my_data_table', mode='a') as table:
del table[uid0]
with self.assertRaises(KeyError):
del table[uuid.uuid4()]
with self.assertRaises(KeyError):
del table[uid0]
def test_delete_data_to_empty_table(self):
data = create_data_container()
with self.new_table('my_data_table') as table:
uid = table.append(data)
with closing(tables.open_file(self.filename, mode='a')) as handle:
root = handle.root
table = DataContainerTable(
root, 'my_data_table', record=self.record)
del table[uid]
self.assertEqual(len(table), 0)
# The table is recreated we need to make sure that the right
# record is used.
data_column = root.my_data_table.colinstances['data']
expected_column_names = [
key.name.lower() for key in self.saved_keys]
self.assertItemsEqual(
data_column._v_colnames, expected_column_names)
def test_iteration(self):
# create sample data
data = []
saved_keys = self.saved_keys
for key in saved_keys:
data_container = create_data_container(restrict=saved_keys)
del data_container[key]
data.append(data_container)
# add to data container table
with self.new_table('my_data_table') as table:
for data_container in data:
table.append(data_container)
self.assertEqual(len(table), len(saved_keys))
# Iterate over all the rows
with self.open_table('my_data_table') as table:
for index, loaded_data in enumerate(table):
self.assertDataContainersEqual(loaded_data, data[index])
self.assertEqual(index, len(saved_keys) - 1)
def test_itersequence(self):
# create sample data
data = []
saved_keys = self.saved_keys
for key in saved_keys[:-1]:
data_container = create_data_container(restrict=saved_keys)
del data_container[key]
data.append(data_container)
# add to data container table
with self.open_table('my_data_table', mode='a') as table:
uids = {
table.append(data_container): data_container
for data_container in data}
self.assertEqual(len(table), len(saved_keys) - 1)
# Iterate over a sequence of rows
with self.open_table('my_data_table') as table:
sequence = random.sample(uids, 4)
loaded_data = [
container for container in table.itersequence(sequence)]
self.assertEqual(len(loaded_data), 4)
for index, container in enumerate(loaded_data):
self.assertDataContainersEqual(
container, uids[sequence[index]])
def assertDataContainersEqual(self, data1, data2):
self.assertIsInstance(data1, DataContainer)
self.assertIsInstance(data2, DataContainer)
self.assertEqual(len(data1), len(data2))
for key in data1:
self.assertIn(key, data2)
assert_equal(data1[key], data2[key])
| bsd-2-clause | 884,006,210,559,833,100 | 38.721014 | 76 | 0.580407 | false |
electrumalt/electrum-ixc | lib/tests/test_util.py | 18 | 3460 | import unittest
from lib.util import format_satoshis, parse_URI
class TestUtil(unittest.TestCase):
def test_format_satoshis(self):
result = format_satoshis(1234)
expected = "0.00001234"
self.assertEqual(expected, result)
def test_format_satoshis_diff_positive(self):
result = format_satoshis(1234, is_diff=True)
expected = "+0.00001234"
self.assertEqual(expected, result)
def test_format_satoshis_diff_negative(self):
result = format_satoshis(-1234, is_diff=True)
expected = "-0.00001234"
self.assertEqual(expected, result)
def _do_test_parse_URI(self, uri, expected_address, expected_amount, expected_label, expected_message, expected_request_url):
address, amount, label, message, request_url = parse_URI(uri)
self.assertEqual(expected_address, address)
self.assertEqual(expected_amount, amount)
self.assertEqual(expected_label, label)
self.assertEqual(expected_message, message)
self.assertEqual(expected_request_url, request_url)
def test_parse_URI_address(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', '', '', '', '')
def test_parse_URI_only_address(self):
self._do_test_parse_URI('15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', None, None, None, None)
def test_parse_URI_address_label(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?label=electrum%20test', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', '', 'electrum test', '', '')
def test_parse_URI_address_message(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?message=electrum%20test', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', '', '', 'electrum test', '')
def test_parse_URI_address_amount(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 30000, '', '', '')
def test_parse_URI_address_request_url(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?r=http://domain.tld/page?h%3D2a8628fc2fbe', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', '', '', '', 'http://domain.tld/page?h=2a8628fc2fbe')
def test_parse_URI_ignore_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?test=test', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', '', '', '', '')
def test_parse_URI_multiple_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.00004&label=electrum-test&message=electrum%20test&test=none&r=http://domain.tld/page', '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 4000, 'electrum-test', 'electrum test', 'http://domain.tld/page')
def test_parse_URI_no_address_request_url(self):
self._do_test_parse_URI('bitcoin:?r=http://domain.tld/page?h%3D2a8628fc2fbe', '', '', '', '', 'http://domain.tld/page?h=2a8628fc2fbe')
def test_parse_URI_invalid_address(self):
self.assertRaises(AssertionError, parse_URI, 'bitcoin:invalidaddress')
def test_parse_URI_invalid(self):
self.assertRaises(AssertionError, parse_URI, 'notbitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma')
def test_parse_URI_parameter_polution(self):
self.assertRaises(Exception, parse_URI, 'bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003&label=test&amount=30.0')
| gpl-3.0 | 1,588,218,002,876,656,400 | 52.230769 | 275 | 0.716474 | false |
Vavius/moai-framework | external/compile_layout.py | 1 | 10775 | #!/usr/bin/env python
import re, sys, os
import argparse
import json
import shutil, subprocess
# Lua code templates
bodyLua = """--------------------------------------------------------------------------------
-- %s
--
-- WARNING: Do not edit!
-- This file is auto generated, all changes will be lost.
--------------------------------------------------------------------------------
local Button = Gui.Button
local Sprite = Display.Sprite
local Group = Display.Group
local Label = Display.Label
local LocalizedString = LocalizedString or function(s) return s end
local function layout(layer)
local group = Group {
layer = layer,
children = {
%s
}
}
return group
end
return layout
"""
spriteFuncLua = """Sprite ("%(fileName)s", %(width)s, %(height)s)"""
spriteTableLua = """Sprite {
name = "%(name)s", fileName = "%(fileName)s", %(loc)s
width = %(width)s, height = %(height)s
}"""
buttonLua = """Button {
name = "%(name)s", %(loc)s
%(children)s
}"""
groupLua = """Group {
name = "%(name)s", %(loc)s
children = {
%(children)s
}
}"""
labelLua = """Label {
name = "%(name)s",
string = %(text)s, %(color)s
width = %(width)s, height = %(height)s, %(loc)s
fontName = "%(fontName)s", fontSize = %(fontSize)s,
alignment = { %(alignment)s, MOAITextBox.LEFT_JUSTIFY },
}"""
def makeLoc(x, y):
if x != 0 or y != 0:
return 'loc = {%f, %f, 0},' % (x, y)
return ''
def makeColor(r, g, b, a):
if r != 1 or g != 1 or b != 1 or a != 1:
return 'color = {%f, %f, %f, %f},' % (r, g, b, a)
return ''
class LayoutParser(object):
indentLevel = 3
offsetX = 0
offsetY = 0
fontPathCache = {}
"""LayoutParser"""
def __init__(self, params):
super(LayoutParser, self).__init__()
self.params = params
def reindent(self, s):
s = s.split('\n')
s = [(self.indentLevel * 4 * ' ') + line for line in s]
s = '\n'.join(s)
return s
def affirmFont(self, fontName):
fontPathCache = self.fontPathCache
if not fontName in fontPathCache:
selfDir = os.path.dirname(os.path.realpath(__file__))
path = subprocess.check_output([os.path.join(selfDir, 'fontfinder'), fontName], stderr=subprocess.STDOUT).strip()
if path:
fontPathCache[fontName] = path
outDir = self.params['fontsFolder']
if not os.path.isdir(outDir):
os.makedirs(outDir)
shutil.copyfile(path, os.path.join(outDir, os.path.basename(path)))
else:
print("Font not found in system", fontName)
print("Aborting...")
exit(0)
fileName = os.path.basename(fontPathCache[fontName])
return os.path.join(self.params['fontPrefix'], fileName)
def generateLayout(self, layout, outName):
self.screenWidth = layout['size'][0]
self.screenHeight = layout['size'][1]
children = ""
for obj in layout['layout']:
children = children + self.makeObject(obj) + ','
if obj != layout['layout'][-1]:
children = children + '\n'
return bodyLua % (outName, children)
def makeObject(self, obj):
factory = {
"spr" : self.makeSprite,
"grp" : self.makeGroup,
"lbl" : self.makeLabel,
"btn" : self.makeButton
}
return factory[obj['type']](obj)
def makeSprite(self, obj):
x, y, width, height = self.transformCoords(obj['pos'][0], obj['pos'][1], obj['size'][0], obj['size'][1])
data = {
'name' : obj['name'],
'fileName' : obj['fileName'],
'loc' : makeLoc(x, y),
'width' : width,
'height' : height,
}
if data['name'] == '' and data['loc'] == '':
return self.reindent(spriteFuncLua % data)
else:
return self.reindent(spriteTableLua % data)
def makeGroup(self, obj):
initialIndent = self.indentLevel
initialOffsetX = self.offsetX
initialOffsetY = self.offsetY
x, y, width, height = self.transformCoords(obj['pos'][0], obj['pos'][1], obj['size'][0], obj['size'][1])
data = {
'name' : obj['name'],
'loc' : makeLoc(x, y),
}
children = ""
self.indentLevel = 2
self.offsetX = self.offsetX + x
self.offsetY = self.offsetY + y
# reverse order in group (illustrator exports different ordering in layers and groups)
for child in obj['children'][::-1]:
children = children + self.makeObject(child) + ','
if child != obj['children'][0]:
children = children + '\n'
self.indentLevel = initialIndent
self.offsetX = initialOffsetX
self.offsetY = initialOffsetY
data['children'] = children
return self.reindent(groupLua % data)
def makeLabel(self, obj):
alignmentTypes = {
"center" : "MOAITextBox.CENTER_JUSTIFY",
"left" : "MOAITextBox.LEFT_JUSTIFY",
"right" : "MOAITextBox.RIGHT_JUSTIFY",
}
ry = float(self.params['targetHeight']) / self.screenHeight
x, y, width, height = self.transformCoords(obj['pos'][0], obj['pos'][1], obj['size'][0], obj['size'][1])
text = ('l' in obj['flags']) and 'LocalizedString([=[%s]=])' or '[=[%s]=]'
r, g, b, a = obj['color']
data = {
'name' : obj['name'],
'text' : text % obj['text'],
'fontName' : self.affirmFont(obj['fontName']),
'fontSize' : obj['fontSize'] * ry,
'alignment' : alignmentTypes[obj['alignment']],
'loc' : makeLoc(x, y),
'width' : width,
'height' : height + 0.5 * obj['fontSize'] * ry,
'color' : makeColor(r, g, b, a),
}
return self.reindent(labelLua % data)
def makeButton(self, obj):
initialIndent = self.indentLevel
initialOffsetX = self.offsetX
initialOffsetY = self.offsetY
x, y, width, height = self.transformCoords(obj['pos'][0], obj['pos'][1], obj['size'][0], obj['size'][1])
data = {
'name' : obj['name'],
'loc' : makeLoc(x, y),
}
flags = obj['flags']
file_base, file_ext = os.path.splitext(obj['normalSprite']['fileName'])
self.indentLevel = 1
self.offsetX = self.offsetX + x
self.offsetY = self.offsetY + y
normalSprite = self.makeObject(obj['normalSprite']).split('\n')
normalSprite[0] = normalSprite[0].lstrip()
buttonChildren = " normalSprite = %s," % '\n'.join(normalSprite)
if 'activeSprite' in obj:
activeSprite = self.makeObject(obj['activeSprite']).split('\n')
activeSprite[0] = activeSprite[0].lstrip()
buttonChildren = buttonChildren + "\n activeSprite = %s," % '\n'.join(activeSprite)
elif 'a' in flags:
aDict = obj['normalSprite'].copy()
aDict['fileName'] = file_base + '_active' + file_ext
activeSprite = self.makeObject(aDict).split('\n')
activeSprite[0] = activeSprite[0].lstrip()
buttonChildren = buttonChildren + "\n activeSprite = %s," % '\n'.join(activeSprite)
if 'disabledSprite' in obj:
disabledSprite = self.makeObject(obj['disabledSprite']).split('\n')
disabledSprite[0] = disabledSprite[0].lstrip()
buttonChildren = buttonChildren + "\n disabledSprite = %s," % '\n'.join(disabledSprite)
elif 'd' in flags:
dDict = obj['normalSprite'].copy()
dDict['fileName'] = file_base + '_disabled' + file_ext
disabledSprite = self.makeObject(dDict).split('\n')
disabledSprite[0] = disabledSprite[0].lstrip()
buttonChildren = buttonChildren + "\n disabledSprite = %s," % '\n'.join(disabledSprite)
if 'label' in obj:
label = self.makeObject(obj['label']).split('\n')
label[0] = label[0].lstrip()
buttonChildren = buttonChildren + "\n label = %s," % '\n'.join(label)
children = ''
if 'children' in obj:
self.indentLevel = 2
for child in obj['children']:
children = children + self.makeObject(child) + ','
if child != obj['children'][-1]:
children = children + '\n'
self.indentLevel = 1
buttonChildren = buttonChildren + """\n children = {
%s
}""" % children
data['children'] = buttonChildren
self.indentLevel = initialIndent
self.offsetX = initialOffsetX
self.offsetY = initialOffsetY
return self.reindent(buttonLua % data)
def transformCoords(self, x, y, width, height):
x = float(x) - 0.5 * self.screenWidth
y = 0.5 * self.screenHeight - float(y)
rx = float(self.params['targetWidth']) / self.screenWidth
ry = float(self.params['targetHeight']) / self.screenHeight
return -self.offsetX + self.params['offsetX'] + rx * x, -self.offsetY + self.params['offsetY'] + ry * y, rx * float(width), ry * float(height)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--out', help="Output file", default = "out.lua")
parser.add_argument('-width', help="Target width (in virtual coordinates)", default = 320, type=int)
parser.add_argument('-height', help="Target height (in virtual coordinates)", default = 568, type=int)
parser.add_argument('-ox', '--offsetX', help="Canvas offset X (in virtual coordinates)", default = 0, type=int)
parser.add_argument('-oy', '--offsetY', help="Canvas offset Y (in virtual coordinates)", default = 0, type=int)
parser.add_argument('-fonts', help="Output path for font files from layout", default = "fonts", type=str)
parser.add_argument('-fp', '--fontPrefix', help="Font path prefix that will be added to included font file names", default = "", type=str)
parser.add_argument('file', help="Input file")
args = parser.parse_args()
params = {
"offsetX" : args.offsetX,
"offsetY" : args.offsetY,
"targetWidth" : args.width,
"targetHeight" : args.height,
"fontsFolder" : args.fonts,
"fontPrefix" : args.fontPrefix,
}
layoutParser = LayoutParser(params)
with open(args.file) as f_in:
layoutDict = json.load(f_in)
with open(args.out, "w") as f:
f.write(layoutParser.generateLayout(layoutDict, args.out))
if __name__ == '__main__':
main() | mit | -7,365,049,202,083,208,000 | 34.215686 | 150 | 0.547842 | false |
sserrot/champion_relationships | venv/Lib/site-packages/win32/test/test_win32api.py | 1 | 8797 | # General test module for win32api - please add some :)
import unittest
from pywin32_testutil import str2bytes
import win32api, win32con, win32event, winerror
import sys, os
import tempfile
import datetime
class CurrentUserTestCase(unittest.TestCase):
def testGetCurrentUser(self):
name = "%s\\%s" % (win32api.GetDomainName(), win32api.GetUserName())
self.failUnless(name == win32api.GetUserNameEx(win32api.NameSamCompatible))
class TestTime(unittest.TestCase):
def testTimezone(self):
# GetTimeZoneInformation
rc, tzinfo = win32api.GetTimeZoneInformation()
if rc == win32con.TIME_ZONE_ID_DAYLIGHT:
tz_str = tzinfo[4]
tz_time = tzinfo[5]
else:
tz_str = tzinfo[1]
tz_time = tzinfo[2]
# for the sake of code exercise but don't output
tz_str.encode()
if not isinstance(tz_time, datetime.datetime):
tz_time.Format()
def TestDateFormat(self):
DATE_LONGDATE = 2
date_flags = DATE_LONGDATE
win32api.GetDateFormat(0, date_flags, None)
win32api.GetDateFormat(0, date_flags, 0)
win32api.GetDateFormat(0, date_flags, datetime.datetime.now())
win32api.GetDateFormat(0, date_flags, time.time())
def TestTimeFormat(self):
win32api.GetTimeFormat(0, 0, None)
win32api.GetTimeFormat(0, 0, 0)
win32api.GetTimeFormat(0, 0, datetime.datetime.now())
win32api.GetTimeFormat(0, 0, time.time())
class Registry(unittest.TestCase):
key_name = r'PythonTestHarness\Whatever'
def test1(self):
# This used to leave a stale exception behind.
def reg_operation():
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, self.key_name)
x = 3/0 # or a statement like: raise 'error'
# do the test
try:
try:
try:
reg_operation()
except:
1/0 # Force exception
finally:
win32api.RegDeleteKey(win32con.HKEY_CURRENT_USER, self.key_name)
except ZeroDivisionError:
pass
def testValues(self):
key_name = r'PythonTestHarness\win32api'
## tuples containing value name, value type, data
values=(
(None, win32con.REG_SZ, 'This is default unnamed value'),
('REG_SZ', win32con.REG_SZ,'REG_SZ text data'),
('REG_EXPAND_SZ', win32con.REG_EXPAND_SZ, '%systemdir%'),
## REG_MULTI_SZ value needs to be a list since strings are returned as a list
('REG_MULTI_SZ', win32con.REG_MULTI_SZ, ['string 1','string 2','string 3','string 4']),
('REG_MULTI_SZ_empty', win32con.REG_MULTI_SZ, []),
('REG_DWORD', win32con.REG_DWORD, 666),
('REG_QWORD_INT', win32con.REG_QWORD, 99),
('REG_QWORD', win32con.REG_QWORD, 2**33),
('REG_BINARY', win32con.REG_BINARY, str2bytes('\x00\x01\x02\x03\x04\x05\x06\x07\x08\x01\x00')),
)
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, key_name)
for value_name, reg_type, data in values:
win32api.RegSetValueEx(hkey, value_name, None, reg_type, data)
for value_name, orig_type, orig_data in values:
data, typ=win32api.RegQueryValueEx(hkey, value_name)
self.assertEqual(typ, orig_type)
self.assertEqual(data, orig_data)
def testNotifyChange(self):
def change():
hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, self.key_name)
try:
win32api.RegSetValue(hkey, None, win32con.REG_SZ, "foo")
finally:
win32api.RegDeleteKey(win32con.HKEY_CURRENT_USER, self.key_name)
evt = win32event.CreateEvent(None,0,0,None)
## REG_NOTIFY_CHANGE_LAST_SET - values
## REG_CHANGE_NOTIFY_NAME - keys
## REG_NOTIFY_CHANGE_SECURITY - security descriptor
## REG_NOTIFY_CHANGE_ATTRIBUTES
win32api.RegNotifyChangeKeyValue(win32con.HKEY_CURRENT_USER,1,win32api.REG_NOTIFY_CHANGE_LAST_SET,evt,True)
ret_code=win32event.WaitForSingleObject(evt,0)
# Should be no change.
self.failUnless(ret_code==win32con.WAIT_TIMEOUT)
change()
# Our event should now be in a signalled state.
ret_code=win32event.WaitForSingleObject(evt,0)
self.failUnless(ret_code==win32con.WAIT_OBJECT_0)
class FileNames(unittest.TestCase):
def testShortLongPathNames(self):
try:
me = __file__
except NameError:
me = sys.argv[0]
fname = os.path.abspath(me).lower()
short_name = win32api.GetShortPathName(fname).lower()
long_name = win32api.GetLongPathName(short_name).lower()
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
self.failUnlessEqual(long_name, win32api.GetLongPathNameW(short_name).lower())
long_name = win32api.GetLongPathNameW(short_name).lower()
self.failUnless(type(long_name)==str, "GetLongPathNameW returned type '%s'" % (type(long_name),))
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
def testShortUnicodeNames(self):
try:
me = __file__
except NameError:
me = sys.argv[0]
fname = os.path.abspath(me).lower()
# passing unicode should cause GetShortPathNameW to be called.
short_name = win32api.GetShortPathName(str(fname)).lower()
self.failUnless(isinstance(short_name, str))
long_name = win32api.GetLongPathName(short_name).lower()
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
self.failUnlessEqual(long_name, win32api.GetLongPathNameW(short_name).lower())
long_name = win32api.GetLongPathNameW(short_name).lower()
self.failUnless(type(long_name)==str, "GetLongPathNameW returned type '%s'" % (type(long_name),))
self.failUnless(long_name==fname, \
"Expected long name ('%s') to be original name ('%s')" % (long_name, fname))
def testLongLongPathNames(self):
# We need filename where the FQN is > 256 - simplest way is to create a
# 250 character directory in the cwd (except - cwd may be on a drive
# not supporting \\\\?\\ (eg, network share) - so use temp.
import win32file
basename = "a" * 250
# but we need to ensure we use the 'long' version of the
# temp dir for later comparison.
long_temp_dir = win32api.GetLongPathNameW(tempfile.gettempdir())
fname = "\\\\?\\" + os.path.join(long_temp_dir, basename)
try:
win32file.CreateDirectoryW(fname, None)
except win32api.error as details:
if details.winerror!=winerror.ERROR_ALREADY_EXISTS:
raise
try:
# GetFileAttributes automatically calls GetFileAttributesW when
# passed unicode
try:
attr = win32api.GetFileAttributes(fname)
except win32api.error as details:
if details.winerror != winerror.ERROR_FILENAME_EXCED_RANGE:
raise
attr = win32api.GetFileAttributes(str(fname))
self.failUnless(attr & win32con.FILE_ATTRIBUTE_DIRECTORY, attr)
long_name = win32api.GetLongPathNameW(fname)
self.failUnlessEqual(long_name.lower(), fname.lower())
finally:
win32file.RemoveDirectory(fname)
class FormatMessage(unittest.TestCase):
def test_FromString(self):
msg = "Hello %1, how are you %2?"
inserts = ["Mark", "today"]
result = win32api.FormatMessage(win32con.FORMAT_MESSAGE_FROM_STRING,
msg, # source
0, # ID
0, # LangID
inserts)
self.assertEqual(result, "Hello Mark, how are you today?")
class Misc(unittest.TestCase):
def test_last_error(self):
for x in (0, 1, -1, winerror.TRUST_E_PROVIDER_UNKNOWN):
win32api.SetLastError(x)
self.failUnlessEqual(x, win32api.GetLastError())
def testVkKeyScan(self):
# hopefully ' ' doesn't depend on the locale!
self.failUnlessEqual(win32api.VkKeyScan(' '), 32)
def testVkKeyScanEx(self):
# hopefully ' ' doesn't depend on the locale!
self.failUnlessEqual(win32api.VkKeyScanEx(' ', 0), 32)
if __name__ == '__main__':
unittest.main()
| mit | 4,030,860,234,814,456,300 | 42.334975 | 115 | 0.605093 | false |
larsjsol/shellpic | setup.py | 1 | 1319 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# Lars Jørgen Solberg <[email protected]> 2014
#
from distutils.core import setup
setup(
name='Shellpic',
version='1.6.2',
author='Lars Jørgen Solberg',
author_email='[email protected]',
packages=['shellpic'],
scripts=['bin/shellpic'],
url='https://github.com/larsjsol/shellpic',
license='GPLv3',
description='Display images using escape codes',
long_description=open('README.rst').read(),
install_requires=[
"Pillow >= 2.6",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: POSIX",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Artistic Software",
"Topic :: Games/Entertainment",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Viewers",
"Topic :: Utilities",
],
)
| gpl-3.0 | 2,271,304,520,621,388,500 | 30.357143 | 75 | 0.589218 | false |
djkskqyr3/CSipSimple | jni/pjsip/sources/tests/pjsua/scripts-sipp/transfer-attended.py | 14 | 1239 | # $Id: transfer-attended.py 4188 2012-06-29 09:01:17Z nanang $
#
import inc_const as const
PJSUA = ["--null-audio", # UA0
"--null-audio", # UA1
"--null-audio" # UA2
]
PJSUA_EXPECTS = [
# A calls B
[0, "", "m"],
[0, "", "$PJSUA_URI[1]"],
[0, const.STATE_CALLING, ""],
[1, const.EVENT_INCOMING_CALL, "a"],
[1, "", "200"],
[0, const.STATE_CONFIRMED, ""],
[1, const.STATE_CONFIRMED, ""],
# B holds A
[1, "", "H"],
[0, const.MEDIA_HOLD, ""],
[1, const.MEDIA_HOLD, ""],
# B calls C
[1, "", "m"],
[1, "", "$PJSUA_URI[2]"],
[1, const.STATE_CALLING, ""],
[2, const.EVENT_INCOMING_CALL, "a"],
[2, "", "200"],
[1, const.STATE_CONFIRMED, ""],
[2, const.STATE_CONFIRMED, ""],
# B holds C
[1, "", "]"],
[1, "", "H"],
[2, const.MEDIA_HOLD, ""],
[1, const.MEDIA_HOLD, ""],
[1, "", "]"],
# B transfer A to C
[1, "", "X"],
[1, "", "1"],
[0, "Call .* is being transfered", ""],
[1, "Subscription state .* ACCEPTED", ""],
[0, const.STATE_CALLING, ""],
[2, "Call .* is being replaced", ""],
[1, "call transfered successfully", ""],
[0, const.MEDIA_ACTIVE, ""],
[2, const.MEDIA_ACTIVE, ""],
[1, const.STATE_DISCONNECTED, ""]
]
| lgpl-3.0 | 1,098,058,397,780,436,900 | 22.826923 | 62 | 0.481033 | false |
brake/python-utl | setup.py | 1 | 2357 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ------------------------------------------------------------------------------
# Name: setup.py
# Package:
# Project: utl
#
# Created: 18.12.16 21:04
# Copyright 2016 © Constantin Roganov
# License: The MIT License
# ------------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from __future__ import unicode_literals, absolute_import
from setuptools import setup, find_packages
import sys
__author__ = 'Constantin Roganov'
__version__ = '2.0.0'
install_requires = ['future >= 0.16.0'] if sys.version_info[0] == 2 else []
setup(
name='utl',
version=__version__,
packages=find_packages(),
zip_safe=True,
install_requires=install_requires,
author=__author__,
author_email='rccbox at gmail dot com',
description='My Python utilities for every day',
long_description=open('README.md').read(),
license=open('LICENSE.txt').read(),
url='https://github.com/brake/python-utl',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
)
| mit | 4,021,895,776,785,961,000 | 38.932203 | 80 | 0.631579 | false |
kchodorow/tensorflow | tensorflow/python/lib/io/tf_record.py | 21 | 3821 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For reading and writing TFRecords files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
class TFRecordCompressionType(object):
"""The type of compression for the record."""
NONE = 0
ZLIB = 1
GZIP = 2
# NOTE(vrv): This will eventually be converted into a proto. to match
# the interface used by the C++ RecordWriter.
class TFRecordOptions(object):
"""Options used for manipulating TFRecord files."""
compression_type_map = {
TFRecordCompressionType.ZLIB: "ZLIB",
TFRecordCompressionType.GZIP: "GZIP",
TFRecordCompressionType.NONE: ""
}
def __init__(self, compression_type):
self.compression_type = compression_type
@classmethod
def get_compression_type_string(cls, options):
if not options:
return ""
return cls.compression_type_map[options.compression_type]
def tf_record_iterator(path, options=None):
"""An iterator that read the records from a TFRecords file.
Args:
path: The path to the TFRecords file.
options: (optional) A TFRecordOptions object.
Yields:
Strings.
Raises:
IOError: If `path` cannot be opened for reading.
"""
compression_type = TFRecordOptions.get_compression_type_string(options)
with errors.raise_exception_on_not_ok_status() as status:
reader = pywrap_tensorflow.PyRecordReader_New(
compat.as_bytes(path), 0, compat.as_bytes(compression_type), status)
if reader is None:
raise IOError("Could not open %s." % path)
while True:
try:
with errors.raise_exception_on_not_ok_status() as status:
reader.GetNext(status)
except errors.OutOfRangeError:
break
yield reader.record()
reader.Close()
class TFRecordWriter(object):
"""A class to write records to a TFRecords file.
This class implements `__enter__` and `__exit__`, and can be used
in `with` blocks like a normal file.
@@__init__
@@write
@@close
"""
# TODO(josh11b): Support appending?
def __init__(self, path, options=None):
"""Opens file `path` and creates a `TFRecordWriter` writing to it.
Args:
path: The path to the TFRecords file.
options: (optional) A TFRecordOptions object.
Raises:
IOError: If `path` cannot be opened for writing.
"""
compression_type = TFRecordOptions.get_compression_type_string(options)
with errors.raise_exception_on_not_ok_status() as status:
self._writer = pywrap_tensorflow.PyRecordWriter_New(
compat.as_bytes(path), compat.as_bytes(compression_type), status)
def __enter__(self):
"""Enter a `with` block."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Exit a `with` block, closing the file."""
self.close()
def write(self, record):
"""Write a string record to the file.
Args:
record: str
"""
self._writer.WriteRecord(record)
def close(self):
"""Close the file."""
self._writer.Close()
| apache-2.0 | 4,660,489,711,548,110,000 | 28.392308 | 80 | 0.681497 | false |
TightSquad/HABIP | pi/spi/spi.py | 1 | 2191 | """
author: Connor Goldberg
project: High Altitude Balloon Instrumentation Platform
description: SPI Interface
"""
import spidev
import common
import logger
class spi(object):
"""
Abstract the Pi's SPI interface
"""
def __init__(self, busIndex=0, deviceIndex=0, maxSpeed=150000, logDebug=False):
self.logger = logger.logger("spi")
if not logDebug:
self.logger.changeLevel(logger.logger.INFO) # Don't log DEBUG
self.busIndex = busIndex
self.deviceIndex = deviceIndex
self.maxSpeed = maxSpeed
self.interface = spidev.SpiDev()
try:
self.interface.open(busIndex,deviceIndex)
self.interface.max_speed_hz = maxSpeed
self.interface.cshigh = False
self.isOpen = True
except Exception as e:
self.interface = None
self.isOpen = False
self.logger.log.error("Could not open SPI interface: {}".format(e))
self.logger.log.info("Opened SPI interface: /dev/spidev{}.{}".format(
self.busIndex, self.deviceIndex))
def close(self):
self.logger.log.info("Closed SPI interface: /dev/spidev{}.{}".format(
self.busIndex, self.deviceIndex))
self.interface.close()
def sendString(self, string):
"""
Sends a string over the spi interface
"""
packet = [ord(c) for c in string]
response = []
for byte in packet:
resp = self.sendByte(byte)
if resp:
response.append(resp)
self.logger.log.debug("sent string: {}".format(string))
return response
def sendByte(self, byte):
"""
Sends a single byte over the spi interface
"""
resp = None
try:
resp = self.interface.xfer2([byte])[0]
self.logger.log.debug("sent byte: {}, received byte: {}"
.format(hex(byte), hex(resp)))
except Exception as e:
self.logger.log.error("could not send byte: {}, Exception: {}"
.format(hex(byte), e))
return resp
def readByte(self):
"""
Send's a dont care byte to read the response
"""
return self.sendByte(byte=ord('X'))
def readChar(self):
"""
Reads a byte as an ASCII character
"""
return chr(self.readByte())
########### Testing #############
if __name__ == "__main__":
mySpi = spi()
data = "{00:B4:ZGY}"
mySpi.sendString(data)
resp = mySpi.readString()
print resp
mySpi.close()
| gpl-3.0 | -2,193,306,540,012,061,200 | 20.480392 | 80 | 0.666819 | false |
hellhound/dentexchange | dentexchange/apps/membership/tests/test_coupon_validation_view.py | 2 | 2822 | # -*- coding:utf-8 -*-
import unittest
import mock
from ..views import CouponValidationView
class CouponValidationViewTestCase(unittest.TestCase):
@mock.patch('membership.views.Coupon')
@mock.patch('membership.views.JSONResponseMixin.render_json_response')
def test_get_should_call_render_json_response_with_status_ok_and_discount_when_valid_coupon(
self, render_json_response, coupon_class):
# setup
view = CouponValidationView()
coupon_code = 'acouponcode'
discount = mock.Mock()
request = mock.Mock()
request.GET.get.return_value = coupon_code
view.request = request
is_valid = coupon_class.objects.is_valid
is_valid.return_value = True
get_discount = coupon_class.objects.get_discount
get_discount.return_value = discount
# action
response = view.get(request)
# assert
self.assertTupleEqual((coupon_code,), is_valid.call_args[0])
self.assertTupleEqual((coupon_code,), get_discount.call_args[0])
self.assertDictEqual(dict(status='ok', discount=discount),
render_json_response.call_args[0][0])
self.assertEqual(id(render_json_response.return_value), id(response))
@mock.patch('membership.views.Coupon')
@mock.patch('membership.views.JSONResponseMixin.render_json_response')
def test_get_should_call_render_json_response_with_status_error_when_invalid_coupon(
self, render_json_response, coupon_class):
# setup
view = CouponValidationView()
coupon_code = 'aninvalidcoupon'
request = mock.Mock()
request.GET.get.return_value = coupon_code
view.request = request
is_valid = coupon_class.objects.is_valid
is_valid.return_value = False
# action
response = view.get(request)
# assert
self.assertTupleEqual((coupon_code,), is_valid.call_args[0])
self.assertTupleEqual((dict(status='invalid'),),
render_json_response.call_args[0])
self.assertEqual(id(render_json_response.return_value), id(response))
@mock.patch('membership.views.Coupon')
@mock.patch('membership.views.JSONResponseMixin.render_json_response')
def test_get_should_call_render_json_response_with_status_error_when_coupon_code_not_in_request_get(
self, render_json_response, coupon_class):
# setup
view = CouponValidationView()
request = mock.Mock()
request.GET.get.return_value = None
view.request = request
is_valid = coupon_class.objects.is_valid
is_valid.return_value = False
# action
view.get(request)
# assert
self.assertTupleEqual((dict(status='invalid'),),
render_json_response.call_args[0])
| bsd-3-clause | -5,178,683,904,700,829,000 | 37.135135 | 104 | 0.652374 | false |
THEMVFFINMAN/Python-Games | PyneSweeper.py | 2 | 5628 | import curses
from random import randint
# Setting up the curses screen
screen = curses.initscr()
curses.noecho()
curses.curs_set(2)
screen.keypad(1)
def createBoard():
#Clearly the most efficient way to initiate this
board = [ ['+', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '+'],
['A', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'A'],
['B', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'B'],
['C', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'C'],
['D', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'D'],
['E', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'E'],
['F', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'F'],
['G', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'G'],
['H', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'H'],
['I', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'I'],
['J', '_', '_', '_', '_', '_', '_', '_', '_', '_', '_', 'J'],
['+', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '+']
]
return board
def setMines(secretBoard, board, ox, oy):
# Will probably add setting to allow a prespecified mine amount
mines = 0
# Making it so the screen and array play nice with each otehr
sy = oy - 6
sx = (ox-2)/2
# The actual amount of mines
while mines <= 14:
rx = randint(1,10)
ry = randint(1,10)
# If the selected piece isn't a mine and it's not the imputted coordinate
if secretBoard[ry][rx] != "*" and not (sx == rx and sy == ry) :
secretBoard[ry][rx] = "*"
mines = mines + 1
# This goes through and figures the numbering out based on the surrounding mines
for lx in range(1, 11):
for ly in range(1, 11):
closeMines = 0
if secretBoard[lx][ly] != "*":
for lx1 in range (-1, 2):
for ly1 in range (-1, 2):
if secretBoard[lx + lx1][ly + ly1] == "*":
closeMines = closeMines + 1
secretBoard[lx][ly] = str(closeMines)
screen.move(oy, ox)
screen.addstr(secretBoard[sy][sx])
board[sy][sx] = secretBoard[sy][sx]
if secretBoard[sy][sx] == "0":
cleanHouse(board, secretBoard, sx, sy)
screen.move(oy, ox)
return secretBoard, board
def start(board):
# This is the default screen
Menu = " +=====================+\n"
Menu = Menu + " | |\n"
Menu = Menu + " | PyneSweeper |\n"
Menu = Menu + " | |\n"
Menu = Menu + " +=====================+\n\n"
screen.move(0, 0)
screen.addstr(Menu)
printBoard(board)
screen.refresh()
def printBoard(board):
for x in range (len(board)):
row = str(board[x])
prettyRow = " " + row.replace("\'", "").replace(",", "").replace("[", "").replace("]", "") + '\n'
screen.addstr(prettyRow)
#formatting = "\n 'WASD' to move\n\t'E' to Step\n\t'F' to Flag\n\t'Q' to Quit"
#screen.addstr(20, 0, formatting)
def printBoards(board1, board2):
printBoard(board1)
printBoard(board2)
def cleanHouse(board, secretBoard, x, y):
# This is the recursive algorithm that opens up the field when a 0 is hit
# It was much prettier before I added the curses implementation :(
for x1 in range (-1, 2):
for y1 in range(-1, 2):
if board[y + y1][x + x1] == '_' and 0 < y + y1 < 11 and 0 < x + x1 < 11:
sx = ((x + x1) * 2) + 2
sy = (y + y1) + 6
screen.move(sy, sx)
screen.addstr(secretBoard[y + y1][x + x1])
board[y + y1][x + x1] = secretBoard[y + y1][x + x1]
if secretBoard[y + y1][x + x1] == "0":
cleanHouse(board, secretBoard, x + x1, y + y1)
def makeGuess(x, y):
# This part is a test just for the first guess, could probably all be reworked later
if (x != -1 and y != -1):
screen.move(y, x)
else:
coord = curses.getsyx()
y = coord[0]
x = coord[1]
# Simple boundary checking
while True:
event = screen.getch()
if event == ord('w') or event == ord('W'):
if y > 7:
y = y - 1
screen.move(y,x)
elif event == ord('a') or event == ord('A'):
if x > 4:
x = x - 2
screen.move(y,x)
elif event == ord('s') or event == ord('S'):
if y < 16:
y = y + 1
screen.move(y,x)
elif event == ord('d') or event == ord('D'):
if x < 22:
x = x + 2
screen.move(y,x)
elif event == ord('e') or event == ord('E'):
return (x, y, False)
elif event == ord('f') or event == ord('F'):
return (x, y, True)
elif event == ord('q') or event == ord('Q'):
curses.endwin()
quit(1)
def main():
# Sets everything up including the first guess
board = createBoard()
secretBoard = createBoard()
start(board)
x, y, flag = makeGuess(4, 7)
# Ensures you don't guess a bomb first try
secretBoard, board = setMines(secretBoard, board, x, y)
# The main loop
while True:
sx, sy, flag = makeGuess(-1, -1)
x = (sx - 2) / 2
y = sy - 6
if flag and board[y][x] == '_':
board[y][x] = 'F'
screen.addstr('F')
elif secretBoard[y][x] != '*' and not flag:
screen.addstr(secretBoard[y][x])
board[y][x] = secretBoard[y][x]
if secretBoard[y][x] == '0':
cleanHouse(board, secretBoard, x, y)
elif not flag:
lose(secretBoard)
if checkWin(board):
win(secretBoard)
screen.move(sy, sx)
def checkWin(board):
screen.move(20,0)
for x in range(1, 11):
for y in range(1, 11):
if board[y][x] == "_":
return False
return True
def win(board):
screen.move(6, 0)
printBoard(board)
screen.addstr("\n You Win!\n Press any key to quit")
event = screen.getch()
curses.endwin()
exit(1)
def lose(board):
screen.move(6, 0)
printBoard(board)
screen.addstr("\n KABOOM!\n Press any key to quit")
event = screen.getch()
curses.endwin()
exit(1)
if __name__ == "__main__":
main()
| mit | 3,310,621,568,635,824,000 | 25.299065 | 100 | 0.512971 | false |
mythmon/kitsune | kitsune/users/views.py | 3 | 28373 | import os
from ast import literal_eval
from datetime import datetime
from django.conf import settings
from django.contrib import auth, messages
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.http import (HttpResponsePermanentRedirect, HttpResponseRedirect,
Http404, HttpResponseForbidden)
from django.views.decorators.cache import never_cache
from django.views.decorators.http import (require_http_methods, require_GET,
require_POST)
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _
# from axes.decorators import watch_login
from badger.models import Award
from mobility.decorators import mobile_template
from session_csrf import anonymous_csrf
from statsd import statsd
from tidings.models import Watch
from tidings.tasks import claim_watches
from kitsune import users as constants
from kitsune.access.decorators import (
logout_required, login_required, permission_required)
from kitsune.questions.models import Question
from kitsune.questions.utils import (
num_questions, num_answers, num_solutions, mark_content_as_spam)
from kitsune.sumo import email_utils
from kitsune.sumo.decorators import ssl_required, json_view
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import get_next_url, simple_paginate
from kitsune.upload.tasks import _create_image_thumbnail
from kitsune.users.forms import (
ProfileForm, AvatarForm, EmailConfirmationForm, AuthenticationForm,
EmailChangeForm, SetPasswordForm, PasswordChangeForm, SettingsForm,
ForgotUsernameForm, RegisterForm, PasswordResetForm)
from kitsune.users.templatetags.jinja_helpers import profile_url
from kitsune.users.models import (
CONTRIBUTOR_GROUP, Group, Profile, RegistrationProfile, EmailChange,
Deactivation)
from kitsune.users.utils import (
handle_login, handle_register, try_send_email_with_form, deactivate_user)
from kitsune.wiki.models import (
user_num_documents, user_documents, user_redirects)
@ssl_required
@anonymous_csrf
@logout_required
@require_http_methods(['GET', 'POST'])
def user_auth(request, contributor=False, register_form=None, login_form=None):
"""Try to log the user in, or register a user.
POSTs from these forms do not come back to this view, but instead go to the
login and register views, which may redirect back to this in case of error.
"""
next_url = get_next_url(request) or reverse('home')
if login_form is None:
login_form = AuthenticationForm()
if register_form is None:
register_form = RegisterForm()
return render(request, 'users/auth.html', {
'login_form': login_form,
'register_form': register_form,
'contributor': contributor,
'next_url': next_url})
@ssl_required
@anonymous_csrf
# @watch_login
@mobile_template('users/{mobile/}login.html')
def login(request, template):
"""Try to log the user in."""
if request.method == 'GET' and not request.MOBILE:
url = reverse('users.auth') + '?' + request.GET.urlencode()
return HttpResponsePermanentRedirect(url)
next_url = get_next_url(request) or reverse('home')
only_active = request.POST.get('inactive', '0') != '1'
form = handle_login(request, only_active=only_active)
if request.user.is_authenticated():
# Add a parameter so we know the user just logged in.
# fpa = "first page authed" or something.
next_url = urlparams(next_url, fpa=1)
res = HttpResponseRedirect(next_url)
max_age = (None if settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
else settings.SESSION_COOKIE_AGE)
res.set_cookie(settings.SESSION_EXISTS_COOKIE,
'1',
secure=False,
max_age=max_age)
return res
if request.MOBILE:
return render(request, template, {
'form': form,
'next_url': next_url})
return user_auth(request, login_form=form)
@ssl_required
@require_POST
def logout(request):
"""Log the user out."""
auth.logout(request)
statsd.incr('user.logout')
res = HttpResponseRedirect(get_next_url(request) or reverse('home'))
res.delete_cookie(settings.SESSION_EXISTS_COOKIE)
return res
@ssl_required
@logout_required
@require_http_methods(['GET', 'POST'])
@anonymous_csrf
@mobile_template('users/{mobile/}')
def register(request, template, contributor=False):
"""Register a new user.
:param contributor: If True, this is for registering a new contributor.
"""
if request.method == 'GET' and not request.MOBILE:
url = reverse('users.auth') + '?' + request.GET.urlencode()
return HttpResponsePermanentRedirect(url)
form = handle_register(request)
if form.is_valid():
return render(request, template + 'register_done.html')
if request.MOBILE:
return render(request, template + 'register.html', {
'form': form})
return user_auth(request, register_form=form, contributor=contributor)
def register_contributor(request):
"""Register a new user from the superheroes page."""
return register(request, contributor=True)
@anonymous_csrf # This view renders a login form
@mobile_template('users/{mobile/}activate.html')
def activate(request, template, activation_key, user_id=None):
"""Activate a User account."""
activation_key = activation_key.lower()
if user_id:
user = get_object_or_404(User, id=user_id)
else:
user = RegistrationProfile.objects.get_user(activation_key)
if user and user.is_active:
messages.add_message(
request, messages.INFO,
_(u'Your account is already activated, log in below.'))
return HttpResponseRedirect(reverse('users.login'))
account = RegistrationProfile.objects.activate_user(activation_key,
request)
my_questions = None
form = AuthenticationForm()
if account:
# Claim anonymous watches belonging to this email
statsd.incr('user.activate')
claim_watches.delay(account)
my_questions = Question.objects.filter(creator=account)
# Update created time to current time
for q in my_questions:
q.created = datetime.now()
q.save(update=True)
return render(request, template, {
'account': account, 'questions': my_questions,
'form': form})
@anonymous_csrf
@mobile_template('users/{mobile/}')
def resend_confirmation(request, template):
"""Resend confirmation email."""
if request.method == 'POST':
form = EmailConfirmationForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
reg_prof = RegistrationProfile.objects.get(
user__email=email)
if not reg_prof.user.is_active:
form = try_send_email_with_form(
RegistrationProfile.objects.send_confirmation_email,
form, 'email',
reg_prof)
else:
form = try_send_email_with_form(
RegistrationProfile.objects.send_confirmation_email,
form, 'email',
reg_prof,
text_template='users/email/already_activated.ltxt',
html_template='users/email/already_activated.html',
subject=_('Account already activated'))
except RegistrationProfile.DoesNotExist:
# Send already active email if user exists
try:
user = User.objects.get(email=email, is_active=True)
current_site = Site.objects.get_current()
email_kwargs = {'domain': current_site.domain,
'login_url': reverse('users.login')}
subject = _('Account already activated')
@email_utils.safe_translation
def _make_mail(locale):
mail = email_utils.make_mail(
subject=subject,
text_template='users/email/already_activated.ltxt',
html_template='users/email/already_activated.html',
context_vars=email_kwargs,
from_email=settings.DEFAULT_FROM_EMAIL,
to_email=user.email)
return mail
email_utils.send_messages(
[_make_mail(request.LANGUAGE_CODE)])
except User.DoesNotExist:
# Don't leak existence of email addresses.
pass
# Form may now be invalid if email failed to send.
if form.is_valid():
return render(
request, template + 'resend_confirmation_done.html',
{'email': email})
else:
form = EmailConfirmationForm()
return render(request, template + 'resend_confirmation.html', {
'form': form})
@login_required
@require_http_methods(['GET', 'POST'])
@mobile_template('users/{mobile/}')
def change_email(request, template):
"""Change user's email. Send confirmation first."""
if request.method == 'POST':
form = EmailChangeForm(request.user, request.POST)
u = request.user
if form.is_valid() and u.email != form.cleaned_data['email']:
# Delete old registration profiles.
EmailChange.objects.filter(user=request.user).delete()
# Create a new registration profile and send a confirmation email.
email_change = EmailChange.objects.create_profile(
user=request.user, email=form.cleaned_data['email'])
EmailChange.objects.send_confirmation_email(
email_change, form.cleaned_data['email'])
return render(
request, template + 'change_email_done.html',
{'email': form.cleaned_data['email']})
else:
form = EmailChangeForm(request.user,
initial={'email': request.user.email})
return render(request, template + 'change_email.html', {'form': form})
@require_GET
def confirm_change_email(request, activation_key):
"""Confirm the new email for the user."""
activation_key = activation_key.lower()
email_change = get_object_or_404(EmailChange,
activation_key=activation_key)
u = email_change.user
old_email = u.email
# Check that this new email isn't a duplicate in the system.
new_email = email_change.email
duplicate = User.objects.filter(email=new_email).exists()
if not duplicate:
# Update user's email.
u.email = new_email
u.save()
# Delete the activation profile now, we don't need it anymore.
email_change.delete()
return render(request, 'users/change_email_complete.html', {
'old_email': old_email, 'new_email': new_email,
'username': u.username, 'duplicate': duplicate})
@require_GET
@mobile_template('users/{mobile/}profile.html')
def profile(request, template, username):
# The browser replaces '+' in URL's with ' ' but since we never have ' ' in
# URL's we can assume everytime we see ' ' it was a '+' that was replaced.
# We do this to deal with legacy usernames that have a '+' in them.
username = username.replace(' ', '+')
user = User.objects.filter(username=username).first()
if not user:
try:
user = get_object_or_404(User, id=username)
except ValueError:
raise Http404('No Profile matches the given query.')
return redirect(reverse('users.profile', args=(user.username,)))
user_profile = get_object_or_404(Profile, user__id=user.id)
if not (request.user.has_perm('users.deactivate_users') or
user_profile.user.is_active):
raise Http404('No Profile matches the given query.')
groups = user_profile.user.groups.all()
return render(request, template, {
'profile': user_profile,
'awards': Award.objects.filter(user=user_profile.user),
'groups': groups,
'num_questions': num_questions(user_profile.user),
'num_answers': num_answers(user_profile.user),
'num_solutions': num_solutions(user_profile.user),
'num_documents': user_num_documents(user_profile.user)})
@login_required
@require_POST
def close_account(request):
# Clear the profile
profile = get_object_or_404(Profile, user__id=request.user.id)
profile.clear()
profile.save()
# Deactivate the user and change key information
request.user.username = 'user%s' % request.user.id
request.user.email = '%[email protected]' % request.user.id
request.user.is_active = False
# Remove from all groups
request.user.groups.clear()
request.user.save()
# Log the user out
auth.logout(request)
return render(request, 'users/close_account.html')
@require_POST
@permission_required('users.deactivate_users')
def deactivate(request, mark_spam=False):
user = get_object_or_404(User, id=request.POST['user_id'], is_active=True)
deactivate_user(user, request.user)
if mark_spam:
mark_content_as_spam(user, request.user)
return HttpResponseRedirect(profile_url(user))
@require_GET
@permission_required('users.deactivate_users')
def deactivation_log(request):
deactivations_qs = Deactivation.objects.order_by('-date')
deactivations = simple_paginate(request, deactivations_qs,
per_page=constants.DEACTIVATIONS_PER_PAGE)
return render(request, 'users/deactivation_log.html', {
'deactivations': deactivations})
@require_GET
def documents_contributed(request, username):
user_profile = get_object_or_404(
Profile, user__username=username, user__is_active=True)
return render(request, 'users/documents_contributed.html', {
'profile': user_profile,
'documents': user_documents(user_profile.user),
'redirects': user_redirects(user_profile.user)})
@login_required
@require_http_methods(['GET', 'POST'])
@mobile_template('users/{mobile/}edit_settings.html')
def edit_settings(request, template):
"""Edit user settings"""
if request.method == 'POST':
form = SettingsForm(request.POST)
if form.is_valid():
form.save_for_user(request.user)
messages.add_message(request, messages.INFO,
_(u'Your settings have been saved.'))
return HttpResponseRedirect(reverse('users.edit_settings'))
# Invalid form
return render(request, template, {'form': form})
# Pass the current user's settings as the initial values.
values = request.user.settings.values()
initial = dict()
for v in values:
try:
# Uses ast.literal_eval to convert 'False' => False etc.
# TODO: Make more resilient.
initial[v['name']] = literal_eval(v['value'])
except (SyntaxError, ValueError):
# Attempted to convert the string value to a Python value
# but failed so leave it a string.
initial[v['name']] = v['value']
form = SettingsForm(initial=initial)
return render(request, template, {'form': form})
@login_required
@require_http_methods(['GET', 'POST'])
def edit_watch_list(request):
"""Edit watch list"""
watches = Watch.objects.filter(user=request.user).order_by('content_type')
watch_list = []
for w in watches:
if w.content_object is not None:
if w.content_type.name == 'question':
# Only list questions that are not archived
if not w.content_object.is_archived:
watch_list.append(w)
else:
watch_list.append(w)
if request.method == 'POST':
for w in watch_list:
w.is_active = 'watch_%s' % w.id in request.POST
w.save()
return render(request, 'users/edit_watches.html', {
'watch_list': watch_list})
@login_required
@require_http_methods(['GET', 'POST'])
@mobile_template('users/{mobile/}edit_profile.html')
def edit_profile(request, username=None, template=None):
"""Edit user profile."""
# If a username is specified, we are editing somebody else's profile.
if username is not None and username != request.user.username:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
raise Http404
# Make sure the auth'd user has permission:
if not request.user.has_perm('users.change_profile'):
return HttpResponseForbidden()
else:
user = request.user
try:
user_profile = Profile.objects.get(user=user)
except Profile.DoesNotExist:
# TODO: Once we do user profile migrations, all users should have a
# a profile. We can remove this fallback.
user_profile = Profile.objects.create(user=user)
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES, instance=user_profile)
if form.is_valid():
user_profile = form.save()
new_timezone = user_profile.timezone
tz_changed = request.session.get('timezone', None) != new_timezone
if tz_changed and user == request.user:
request.session['timezone'] = new_timezone
return HttpResponseRedirect(reverse('users.profile',
args=[user.username]))
else: # request.method == 'GET'
form = ProfileForm(instance=user_profile)
# TODO: detect timezone automatically from client side, see
# http://rocketscience.itteco.org/2010/03/13/automatic-users-timezone-determination-with-javascript-and-django-timezones/ # noqa
return render(request, template, {
'form': form, 'profile': user_profile})
@login_required
@require_http_methods(['POST'])
def make_contributor(request):
"""Adds the logged in user to the contributor group"""
group = Group.objects.get(name=CONTRIBUTOR_GROUP)
request.user.groups.add(group)
@email_utils.safe_translation
def _make_mail(locale):
mail = email_utils.make_mail(
# L10n: Thank you so much for your translation work! You're
# L10n: the best!
subject=_('Welcome to SUMO!'),
text_template='users/email/contributor.ltxt',
html_template='users/email/contributor.html',
context_vars={'contributor': request.user},
from_email=settings.DEFAULT_FROM_EMAIL,
to_email=request.user.email)
return mail
email_utils.send_messages([_make_mail(request.LANGUAGE_CODE)])
if 'return_to' in request.POST:
return HttpResponseRedirect(request.POST['return_to'])
else:
return HttpResponseRedirect(reverse('landings.get_involved'))
@login_required
@require_http_methods(['GET', 'POST'])
def edit_avatar(request):
"""Edit user avatar."""
try:
user_profile = Profile.objects.get(user=request.user)
except Profile.DoesNotExist:
# TODO: Once we do user profile migrations, all users should have a
# a profile. We can remove this fallback.
user_profile = Profile.objects.create(user=request.user)
if request.method == 'POST':
# Upload new avatar and replace old one.
old_avatar_path = None
if user_profile.avatar and os.path.isfile(user_profile.avatar.path):
# Need to store the path, not the file here, or else django's
# form.is_valid() messes with it.
old_avatar_path = user_profile.avatar.path
form = AvatarForm(request.POST, request.FILES, instance=user_profile)
if form.is_valid():
if old_avatar_path:
os.unlink(old_avatar_path)
user_profile = form.save()
content = _create_image_thumbnail(user_profile.avatar.path,
settings.AVATAR_SIZE, pad=True)
# We want everything as .png
name = user_profile.avatar.name + ".png"
# Delete uploaded avatar and replace with thumbnail.
user_profile.avatar.delete()
user_profile.avatar.save(name, content, save=True)
return HttpResponseRedirect(reverse('users.edit_my_profile'))
else: # request.method == 'GET'
form = AvatarForm(instance=user_profile)
return render(request, 'users/edit_avatar.html', {
'form': form, 'profile': user_profile})
@login_required
@require_http_methods(['GET', 'POST'])
def delete_avatar(request):
"""Delete user avatar."""
try:
user_profile = Profile.objects.get(user=request.user)
except Profile.DoesNotExist:
# TODO: Once we do user profile migrations, all users should have a
# a profile. We can remove this fallback.
user_profile = Profile.objects.create(user=request.user)
if request.method == 'POST':
# Delete avatar here
if user_profile.avatar:
user_profile.avatar.delete()
return HttpResponseRedirect(reverse('users.edit_my_profile'))
# else: # request.method == 'GET'
return render(request, 'users/confirm_avatar_delete.html', {
'profile': user_profile})
@anonymous_csrf
@mobile_template('users/{mobile/}pw_reset_form.html')
def password_reset(request, template):
"""Password reset form.
Based on django.contrib.auth.views. This view sends the email.
"""
if request.method == "POST":
form = PasswordResetForm(request.POST)
was_valid = form.is_valid()
if was_valid:
# TODO: We aren't using Jingo anymore, but I'm not sure what
# to do with the below.
#
# TODO: Since we're using Jingo in a way that doesn't
# override the Django template loader, the pw_reset.ltxt
# email template must be a Django template and not a Jinja
# template.
#
# After we switch all the rendering everywhere, we can
# probably change this back. Until then, I'm pretty sure
# this won't get translated.
try_send_email_with_form(
form.save, form, 'email',
use_https=request.is_secure(),
token_generator=default_token_generator,
text_template='users/email/pw_reset.ltxt',
html_template='users/email/pw_reset.html',
subject_template_name='users/email/pw_reset_subject.ltxt')
# Form may now be invalid if email failed to send.
# PasswordResetForm is invalid iff there is no user with the entered
# email address.
# The condition below ensures we don't leak existence of email address
# _unless_ sending an email fails.
if form.is_valid() or not was_valid:
# Don't leak existence of email addresses.
return HttpResponseRedirect(reverse('users.pw_reset_sent'))
else:
form = PasswordResetForm()
return render(request, template, {'form': form})
@mobile_template('users/{mobile/}pw_reset_sent.html')
def password_reset_sent(request, template):
"""Password reset email sent.
Based on django.contrib.auth.views. This view shows a success message after
email is sent.
"""
return render(request, template)
@ssl_required
@anonymous_csrf
@mobile_template('users/{mobile/}pw_reset_confirm.html')
def password_reset_confirm(request, template, uidb36=None, token=None):
"""View that checks the hash in a password reset link and presents a
form for entering a new password.
Based on django.contrib.auth.views.
"""
try:
uid_int = base36_to_int(uidb36)
except ValueError:
raise Http404
user = get_object_or_404(User, id=uid_int)
context = {}
if default_token_generator.check_token(user, token):
context['validlink'] = True
if request.method == 'POST':
form = SetPasswordForm(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('users.pw_reset_complete'))
else:
form = SetPasswordForm(None)
else:
context['validlink'] = False
form = None
context['form'] = form
return render(request, template, context)
@mobile_template('users/{mobile/}pw_reset_complete.html')
def password_reset_complete(request, template):
"""Password reset complete.
Based on django.contrib.auth.views. Show a success message.
"""
form = AuthenticationForm()
return render(request, template, {'form': form})
@login_required
@mobile_template('users/{mobile/}pw_change.html')
def password_change(request, template):
"""Change password form page."""
if request.method == 'POST':
form = PasswordChangeForm(user=request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('users.pw_change_complete'))
else:
form = PasswordChangeForm(user=request.user)
return render(request, template, {'form': form})
@login_required
@mobile_template('users/{mobile/}pw_change_complete.html')
def password_change_complete(request, template):
"""Change password complete page."""
return render(request, template)
@anonymous_csrf
@mobile_template('users/{mobile/}forgot_username.html')
def forgot_username(request, template):
"""Forgot username form page.
On POST, this view sends an email with the username.
"""
if request.method == "POST":
form = ForgotUsernameForm(request.POST)
was_valid = form.is_valid()
if was_valid:
try_send_email_with_form(
form.save, form, 'email',
use_https=request.is_secure())
# Form may now be invalid if email failed to send.
# ForgotUsernameForm is invalid iff there is no user with the entered
# email address.
# The condition below ensures we don't leak existence of email address
# _unless_ sending an email fails.
if form.is_valid() or not was_valid:
# Don't leak existence of email addresses.
messages.add_message(
request, messages.INFO,
_(u"We've sent an email with the username to any account"
u" using {email}.").format(email=form.data['email']))
return HttpResponseRedirect(reverse('users.login'))
else:
form = ForgotUsernameForm()
return render(request, template, {'form': form})
@require_GET
@never_cache
@json_view
def validate_field(request):
data = {'valid': True}
field = request.GET.get('field')
value = request.GET.get('value')
form = RegisterForm()
try:
form.fields[request.GET.get('field')].clean(request.GET.get('value'))
except ValidationError, e:
data = {
'valid': False,
'error': e.messages[0]
}
except KeyError:
data = {
'valid': False,
'error': _('Invalid field')
}
if data['valid']:
if field == 'username':
if User.objects.filter(username=value).exists():
data = {
'valid': False,
'error': _('This username is already taken!')
}
elif field == 'email':
if User.objects.filter(email=request.GET.get('value')).exists():
data = {
'valid': False,
'error': _('This email is already in use!')
}
return data
| bsd-3-clause | 794,880,186,914,389,800 | 35.236271 | 133 | 0.626018 | false |
rackerlabs/quark | quark/tests/plugin_modules/test_ports.py | 1 | 91255 | # Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import json
import mock
import netaddr
from neutron.api.v2 import attributes as neutron_attrs
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from quark.db import models
from quark.drivers import registry
from quark import exceptions as q_exc
from quark import network_strategy
from quark.plugin_modules import ports as quark_ports
from quark import plugin_views
from quark import tags
from quark.tests import test_quark_plugin
class TestQuarkGetPorts(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ports=None, addrs=None):
port_models = []
addr_models = None
if addrs:
addr_models = []
for address in addrs:
a = models.IPAddress(**address)
addr_models.append(a)
if isinstance(ports, list):
for port in ports:
port_model = models.Port(**port)
if addr_models:
port_model.ip_addresses = addr_models
port_models.append(port_model)
elif ports is None:
port_models = None
else:
port_model = models.Port(**ports)
if addr_models:
port_model.ip_addresses = addr_models
port_models = port_model
with contextlib.nested(
mock.patch("quark.db.api.port_find")
) as (port_find,):
port_find.return_value = port_models
yield
def test_port_list_no_ports(self):
with self._stubs(ports=[]):
ports = self.plugin.get_ports(self.context, filters=None,
fields=None)
self.assertEqual(ports, [])
def test_port_list_with_device_owner_dhcp(self):
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100").value,
address_readable="192.168.1.100", subnet_id=1, network_id=2,
version=4)
filters = {'network_id': ip['network_id'],
'device_owner': 'network:dhcp'}
port = dict(mac_address="AA:BB:CC:DD:EE:FF", network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
bridge="xenbr0", device_owner='network:dhcp')
with self._stubs(ports=[port], addrs=[ip]):
ports = self.plugin.get_ports(self.context, filters=filters,
fields=None)
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]["device_owner"], "network:dhcp")
def test_port_list_with_ports(self):
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100").value,
address_readable="192.168.1.100", subnet_id=1, network_id=2,
version=4)
port = dict(mac_address="AA:BB:CC:DD:EE:FF", network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
bridge="xenbr0")
expected = {'status': "ACTIVE",
'device_owner': None,
'mac_address': 'AA:BB:CC:DD:EE:FF',
'network_id': 1,
'bridge': "xenbr0",
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'device_id': 2}
with self._stubs(ports=[port], addrs=[ip]):
ports = self.plugin.get_ports(self.context, filters=None,
fields=None)
self.assertEqual(len(ports), 1)
fixed_ips = ports[0].pop("fixed_ips")
for key in expected.keys():
self.assertEqual(ports[0][key], expected[key])
self.assertEqual(fixed_ips[0]["subnet_id"], ip["subnet_id"])
self.assertEqual(fixed_ips[0]["ip_address"],
ip["address_readable"])
def test_port_show_with_int_mac(self):
port = dict(mac_address=int('AABBCCDDEEFF', 16), network_id=1,
tenant_id=self.context.tenant_id, device_id=2)
expected = {'status': "ACTIVE",
'device_owner': None,
'mac_address': 'AA:BB:CC:DD:EE:FF',
'network_id': 1,
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [],
'device_id': 2}
with self._stubs(ports=port):
result = self.plugin.get_port(self.context, 1)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
def test_port_show_not_found(self):
with self._stubs(ports=None):
with self.assertRaises(n_exc.PortNotFound):
self.plugin.get_port(self.context, 1)
def test_port_show_vlan_id(self):
"""Prove VLAN IDs are included in port information when available."""
port_tags = [tags.VlanTag().serialize(5)]
port = dict(mac_address=int('AABBCCDDEEFF', 16), network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
tags=port_tags)
expected = {'status': "ACTIVE",
'device_owner': None,
'mac_address': 'AA:BB:CC:DD:EE:FF',
'network_id': 1,
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [],
'device_id': 2,
'vlan_id': '5'}
with self._stubs(ports=port):
result = self.plugin.get_port(self.context, 1)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
def test_port_show_invalid_vlan_id(self):
"""Prove VLAN IDs are included in port information when available."""
port_tags = [tags.VlanTag().serialize('invalid')]
port = dict(mac_address=int('AABBCCDDEEFF', 16), network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
tags=port_tags)
expected = {'status': "ACTIVE",
'device_owner': None,
'mac_address': 'AA:BB:CC:DD:EE:FF',
'network_id': 1,
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [],
'device_id': 2}
with self._stubs(ports=port):
result = self.plugin.get_port(self.context, 1)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
class TestQuarkGetPortsProviderSubnetIds(test_quark_plugin.TestQuarkPlugin):
def setUp(self):
super(TestQuarkGetPortsProviderSubnetIds, self).setUp()
self.strategy = {
"1": {
"bridge": "publicnet",
"subnets": {
"4": "v4-provider-subnet-id",
"6": "v6-provider-subnet-id"
}
}
}
self.strategy_json = json.dumps(self.strategy)
self.old = plugin_views.STRATEGY
plugin_views.STRATEGY = network_strategy.JSONStrategy(
self.strategy_json)
cfg.CONF.set_override("default_net_strategy", self.strategy_json,
"QUARK")
def tearDown(self):
plugin_views.STRATEGY = self.old
def _port_associate_stub(self, ports, address, **kwargs):
if not isinstance(ports, list):
ports = [ports]
for port in ports:
assoc = models.PortIpAssociation()
assoc.port_id = port.id
assoc.ip_address_id = address.id
assoc.port = port
assoc.ip_address = address
assoc.enabled = address.address_type == "fixed"
return address
@contextlib.contextmanager
def _stubs(self, ports=None, addrs=None):
port_models = []
addr_models = None
if addrs:
addr_models = []
for address in addrs:
a = models.IPAddress(**address)
addr_models.append(a)
if isinstance(ports, list):
for port in ports:
port_model = models.Port(**port)
if addr_models:
port_model.ip_addresses = addr_models
for addr_model in addr_models:
self._port_associate_stub(
port_model, addr_model)
port_models.append(port_model)
elif ports is None:
port_models = None
else:
port_model = models.Port(**ports)
if addr_models:
port_model.ip_addresses = addr_models
for addr_model in addr_models:
self._port_associate_stub(
port_model, addr_model)
port_models = port_model
with contextlib.nested(
mock.patch("quark.db.api.port_find")
) as (port_find,):
port_find.return_value = port_models
yield
def test_port_show_with_provider_subnet_ids(self):
"""Prove provider subnets ids are shown on the port object."""
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100").value,
address_readable="192.168.1.100", subnet_id="1",
network_id="1", version=4, address_type="fixed")
port = dict(mac_address=int('AABBCCDDEEFF', 16), network_id="1",
tenant_id=self.context.tenant_id, device_id=2)
expected = {'status': "ACTIVE",
'device_owner': None,
'mac_address': 'AA:BB:CC:DD:EE:FF',
'network_id': "1",
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [
{'subnet_id': 'v4-provider-subnet-id', 'enabled': True,
'ip_address': '192.168.1.100'}
],
'device_id': 2}
with self._stubs(ports=port, addrs=[ip]):
result = self.plugin.get_port(self.context, 1)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
def test_port_show_without_provider_subnet_ids(self):
"""Prove provider subnets ids are shown on the port object."""
cfg.CONF.set_override('show_provider_subnet_ids', False, 'QUARK')
self.addCleanup(
cfg.CONF.clear_override, 'show_provider_subnet_ids', 'QUARK')
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100").value,
address_readable="192.168.1.100", subnet_id="1",
network_id="1", version=4, address_type="fixed")
port = dict(mac_address=int('AABBCCDDEEFF', 16), network_id="1",
tenant_id=self.context.tenant_id, device_id=2)
expected = {'status': "ACTIVE",
'device_owner': None,
'mac_address': 'AA:BB:CC:DD:EE:FF',
'network_id': "1",
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [
{'subnet_id': '1', 'enabled': True,
'ip_address': '192.168.1.100'}
],
'device_id': 2}
with self._stubs(ports=port, addrs=[ip]):
result = self.plugin.get_port(self.context, 1)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
class TestQuarkGetPortsByIPAddress(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, ports=None, addr=None):
addr_models = []
for port in ports:
ip_mod = models.IPAddress()
ip_mod.update(addr)
port_model = models.Port()
port_model.update(port)
ip_mod.ports = [port_model]
addr_models.append(ip_mod)
with contextlib.nested(
mock.patch("quark.db.api.port_find_by_ip_address")
) as (port_find_by_addr,):
port_find_by_addr.return_value = addr_models
yield
def test_port_list_by_ip_address(self):
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100").value,
address_readable="192.168.1.100", subnet_id=1, network_id=2,
version=4)
port = dict(mac_address="AA:BB:CC:DD:EE:FF", network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
bridge="xenbr0", device_owner='network:dhcp')
with self._stubs(ports=[port], addr=ip):
admin_ctx = self.context.elevated()
filters = {"ip_address": ["192.168.0.1"]}
ports = self.plugin.get_ports(admin_ctx, filters=filters,
fields=None)
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]["device_owner"], "network:dhcp")
def test_port_list_by_ip_not_admin_raises(self):
with self._stubs(ports=[]):
filters = {"ip_address": ["192.168.0.1"]}
with self.assertRaises(n_exc.NotAuthorized):
self.plugin.get_ports(self.context, filters=filters,
fields=None)
def test_port_list_malformed_address_bad_request(self):
with self._stubs(ports=[]):
filters = {"ip_address": ["malformed-address-here"]}
admin_ctx = self.context.elevated()
with self.assertRaises(n_exc.BadRequest):
self.plugin.get_ports(admin_ctx, filters=filters, fields=None)
class TestQuarkCreatePortFailure(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port=None, network=None, addr=None, mac=None):
if network:
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
port_model = models.Port()
port_model.update(port)
port_models = port_model
with contextlib.nested(
mock.patch("quark.db.api.port_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.port_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.ipam.QuarkIpam.allocate_mac_address"),
mock.patch("quark.db.api.port_count_all"),
) as (port_create, net_find, port_find, alloc_ip, alloc_mac,
port_count):
port_create.return_value = port_models
net_find.return_value = network
port_find.return_value = models.Port()
alloc_ip.return_value = addr
alloc_mac.return_value = mac
port_count.return_value = 0
yield port_create
def test_create_multiple_ports_on_same_net_and_device_id_bad_request(self):
network = dict(id=1, tenant_id=self.context.tenant_id)
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00", network_id=1,
tenant_id=self.context.tenant_id, device_id=1,
name="Fake"))
port_2 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:11", network_id=1,
tenant_id=self.context.tenant_id, device_id=1,
name="Faker"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_port(self.context, port_1)
self.plugin.create_port(self.context, port_2)
class TestQuarkCreatePortRM9305(test_quark_plugin.TestQuarkPlugin):
def setUp(self):
super(TestQuarkCreatePortRM9305, self).setUp()
strategy = {"00000000-0000-0000-0000-000000000000":
{"bridge": "publicnet",
"subnets": {"4": "public_v4",
"6": "public_v6"}},
"11111111-1111-1111-1111-111111111111":
{"bridge": "servicenet",
"subnets": {"4": "private_v4",
"6": "private_v6"}}}
strategy_json = json.dumps(strategy)
quark_ports.STRATEGY = network_strategy.JSONStrategy(strategy_json)
@contextlib.contextmanager
def _stubs(self, port=None, network=None, addr=None, mac=None):
if network:
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
port_model = models.Port()
port_model.update(port)
port_models = port_model
db_mod = "quark.db.api"
ipam = "quark.ipam.QuarkIpam"
with contextlib.nested(
mock.patch("%s.port_create" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.port_find" % db_mod),
mock.patch("%s.allocate_ip_address" % ipam),
mock.patch("%s.allocate_mac_address" % ipam),
mock.patch("%s.port_count_all" % db_mod),
) as (port_create, net_find, port_find, alloc_ip, alloc_mac,
port_count):
port_create.return_value = port_models
net_find.return_value = network
port_find.return_value = None
alloc_ip.return_value = addr
alloc_mac.return_value = mac
port_count.return_value = 0
yield port_create
def test_RM9305_tenant_create_servicenet_port(self):
network_id = "11111111-1111-1111-1111-111111111111"
network = dict(id=network_id,
tenant_id="rackspace")
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00",
network_id=network_id,
tenant_id=self.context.tenant_id, device_id=2,
segment_id="bar",
name="Fake"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
self.plugin.create_port(self.context, port_1)
def test_RM9305_tenant_create_publicnet_port(self):
network_id = "00000000-0000-0000-0000-000000000000"
network = dict(id=network_id,
tenant_id="rackspace")
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00",
network_id=network_id,
tenant_id=self.context.tenant_id, device_id=3,
segment_id="bar",
name="Fake"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
self.plugin.create_port(self.context, port_1)
def test_RM9305_tenant_create_tenants_port(self):
network_id = "foobar"
network = dict(id=network_id,
tenant_id=self.context.tenant_id)
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00",
network_id=network_id,
tenant_id=self.context.tenant_id, device_id=4,
name="Fake"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
self.plugin.create_port(self.context, port_1)
def test_RM9305_tenant_create_other_tenants_port(self):
network_id = "foobar"
network = dict(id=network_id,
tenant_id="other_tenant")
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00",
network_id=network_id,
tenant_id=self.context.tenant_id, device_id=5,
name="Fake"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
with self.assertRaises(n_exc.NotAuthorized):
self.plugin.create_port(self.context, port_1)
class TestQuarkCreatePortsSameDevBadRequest(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port=None, network=None, addr=None, mac=None,
limit_checks=None, subnet=None):
subnet_model = None
if subnet:
subnet_model = models.Subnet()
subnet_model.update(subnet)
if network:
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
def _create_db_port(context, **kwargs):
port_model = models.Port()
port_model.update(kwargs)
return port_model
def _alloc_ip(context, new_ips, *args, **kwargs):
ip_mod = models.IPAddress()
ip_mod.update(addr)
ip_mod.enabled_for_port = lambda x: True
new_ips.extend([ip_mod])
return mock.DEFAULT
with contextlib.nested(
mock.patch("quark.db.api.port_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.ipam.QuarkIpam.allocate_mac_address"),
mock.patch("quark.db.api.port_count_all"),
mock.patch("neutron.quota.QuotaEngine.limit_check"),
mock.patch("quark.db.api.subnet_find"),
) as (port_create, net_find, alloc_ip, alloc_mac, port_count,
limit_check, subnet_find):
port_create.side_effect = _create_db_port
net_find.return_value = network
alloc_ip.side_effect = _alloc_ip
alloc_mac.return_value = mac
if subnet:
subnet_find.return_value = [subnet_model]
port_count.return_value = 0
if limit_checks:
limit_check.side_effect = limit_checks
yield port_create
def test_create_port(self):
network = dict(id=1, tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name))
expected = {'status': "ACTIVE",
'name': port_name,
'device_owner': None,
'mac_address': mac["address"],
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [],
'device_id': 2}
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac) as port_create:
result = self.plugin.create_port(self.context, port)
self.assertTrue(port_create.called)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
def test_create_port_segment_id_on_unshared_net_ignored(self):
network = dict(id=1, tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
segment_id="cell01", name=port_name))
expected = {'status': "ACTIVE",
'name': port_name,
'device_owner': None,
'mac_address': mac["address"],
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [],
'device_id': 2}
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac) as port_create:
result = self.plugin.create_port(self.context, port)
self.assertTrue(port_create.called)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
def test_create_port_mac_address_not_specified(self):
network = dict(id=1, tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2))
expected = {'status': "ACTIVE",
'device_owner': None,
'mac_address': mac["address"],
'network_id': network["id"],
'tenant_id': self.context.tenant_id,
'admin_state_up': None,
'fixed_ips': [],
'device_id': 2}
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac) as port_create:
port["port"]["mac_address"] = neutron_attrs.ATTR_NOT_SPECIFIED
result = self.plugin.create_port(self.context, port)
self.assertTrue(port_create.called)
for key in expected.keys():
self.assertEqual(result[key], expected[key])
@mock.patch("quark.network_strategy.JSONStrategy.is_provider_network")
def test_create_providernet_port_fixed_ip_not_authorized(self, is_parent):
is_parent.return_value = True
network = dict(id='1', tenant_id=self.context.tenant_id)
subnet = dict(id=1, network_id=network["id"])
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = mock.MagicMock()
ip.get = lambda x, *y: 1 if x == "subnet_id" else None
ip.formatted = lambda: "192.168.10.45"
ip.enabled_for_port = lambda x: True
fixed_ips = [dict(subnet_id=1, enabled=True,
ip_address="192.168.10.45")]
port = dict(port=dict(mac_address=mac["address"], network_id='1',
tenant_id=self.context.tenant_id, device_id=2,
fixed_ips=fixed_ips, ip_addresses=[ip],
segment_id="provider_segment"))
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac, subnet=subnet):
with self.assertRaises(n_exc.NotAuthorized):
self.plugin.create_port(self.context, port)
@mock.patch("quark.network_strategy.JSONStrategy.is_provider_network")
def test_create_providernet_port_fixed_ip_wrong_segment(self, is_parent):
is_parent.return_value = True
network = dict(id='1', tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
subnet = dict(id=1, network_id=network["id"])
ip = mock.MagicMock()
ip.get = lambda x, *y: 1 if x == "subnet_id" else None
ip.formatted = lambda: "192.168.10.45"
ip.enabled_for_port = lambda x: True
fixed_ips = [dict(subnet_id=1, enabled=True,
ip_address="192.168.10.45")]
port = dict(port=dict(mac_address=mac["address"], network_id='1',
tenant_id=self.context.tenant_id, device_id=2,
fixed_ips=fixed_ips, ip_addresses=[ip],
segment_id="provider_segment"))
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac, subnet=subnet):
with self.assertRaises(q_exc.AmbiguousNetworkId):
self.plugin.create_port(self.context.elevated(), port)
def test_create_port_fixed_ip_subnet_not_found(self):
network = dict(id='1', tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = mock.MagicMock()
ip.get = lambda x, *y: 1 if x == "subnet_id" else None
ip.formatted = lambda: "192.168.10.45"
ip.enabled_for_port = lambda x: True
fixed_ips = [dict(subnet_id=1, enabled=True,
ip_address="192.168.10.45")]
port = dict(port=dict(mac_address=mac["address"], network_id='1',
tenant_id=self.context.tenant_id, device_id=2,
fixed_ips=fixed_ips, ip_addresses=[ip],
segment_id="provider_segment"))
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac):
with self.assertRaises(n_exc.NotFound):
self.plugin.create_port(self.context.elevated(), port)
def test_create_port_fixed_ip_subnet_not_in_network(self):
network = dict(id='1', tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
subnet = dict(id=1, network_id='2')
ip = mock.MagicMock()
ip.get = lambda x, *y: 1 if x == "subnet_id" else None
ip.formatted = lambda: "192.168.10.45"
ip.enabled_for_port = lambda x: True
fixed_ips = [dict(subnet_id=1, enabled=True,
ip_address="192.168.10.45")]
port = dict(port=dict(mac_address=mac["address"], network_id='1',
tenant_id=self.context.tenant_id, device_id=2,
fixed_ips=fixed_ips, ip_addresses=[ip],
segment_id="provider_segment"))
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac, subnet=subnet):
with self.assertRaises(n_exc.InvalidInput):
self.plugin.create_port(self.context.elevated(), port)
def test_create_port_fixed_ips_bad_request(self):
network = dict(id=1, tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = mock.MagicMock()
ip.get = lambda x: 1 if x == "subnet_id" else None
ip.formatted = lambda: "192.168.10.45"
fixed_ips = [dict()]
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
fixed_ips=fixed_ips, ip_addresses=[ip]))
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac):
with self.assertRaises(n_exc.BadRequest):
self.plugin.create_port(self.context, port)
def test_create_port_no_network_found(self):
port = dict(port=dict(network_id=1, tenant_id=self.context.tenant_id,
device_id=2))
with self._stubs(network=None, port=port["port"]):
with self.assertRaises(n_exc.NetworkNotFound):
self.plugin.create_port(self.context, port)
def test_create_port_security_groups_raises(self, groups=[1]):
network = dict(id=1, tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
group = models.SecurityGroup()
group.update({'id': 1, 'tenant_id': self.context.tenant_id,
'name': 'foo', 'description': 'bar'})
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, security_groups=[group]))
with self._stubs(port=port["port"], network=network, addr=ip,
mac=mac):
with mock.patch("quark.db.api.security_group_find"):
with self.assertRaises(q_exc.SecurityGroupsNotImplemented):
self.plugin.create_port(self.context, port)
class TestQuarkPortCreateQuota(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port=None, network=None, addr=None, mac=None):
if network:
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
port_model = models.Port()
port_model.update(port)
port_models = port_model
with contextlib.nested(
mock.patch("quark.db.api.port_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.ipam.QuarkIpam.allocate_mac_address"),
mock.patch("quark.db.api.port_count_all"),
mock.patch("neutron.quota.QuotaEngine.limit_check")
) as (port_create, net_find, alloc_ip, alloc_mac, port_count,
limit_check):
port_create.return_value = port_models
net_find.return_value = network
alloc_ip.return_value = addr
alloc_mac.return_value = mac
port_count.return_value = len(network["ports"])
limit_check.side_effect = n_exc.OverQuota
yield port_create
def test_create_port_net_at_max(self):
network = dict(id=1, ports=[models.Port()],
tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name))
with self._stubs(port=port["port"], network=network, addr=ip, mac=mac):
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_port(self.context, port)
class TestQuarkPortCreateFixedIpsQuota(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, network):
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
with mock.patch("quark.db.api.network_find") as net_find:
net_find.return_value = network
yield
def test_create_port_fixed_ips_over_quota(self):
network = {"id": 1, "tenant_id": self.context.tenant_id}
fixed_ips = [{"subnet_id": 1}, {"subnet_id": 1}, {"subnet_id": 1},
{"subnet_id": 1}, {"subnet_id": 1}, {"subnet_id": 1},
{"subnet_id": 1}]
port = {"port": {"network_id": 1, "tenant_id": self.context.tenant_id,
"device_id": 2, "fixed_ips": fixed_ips}}
with self._stubs(network=network):
with self.assertRaises(n_exc.OverQuota):
self.plugin.create_port(self.context, port)
class TestQuarkUpdatePort(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port, new_ips=None, parent_net=False):
port_model = None
if port:
net_model = models.Network()
net_model["network_plugin"] = "BASE"
port_model = models.Port()
port_model.network = net_model
port_model.update(port)
with contextlib.nested(
mock.patch("quark.db.api.port_find"),
mock.patch("quark.db.api.port_update"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.ipam.QuarkIpam.deallocate_ips_by_port"),
) as (port_find, port_update, alloc_ip, dealloc_ip):
port_find.return_value = port_model
port_update.return_value = port_model
if new_ips:
alloc_ip.return_value = new_ips
yield port_find, port_update, alloc_ip, dealloc_ip
def test_update_port_not_found(self):
with self._stubs(port=None):
with self.assertRaises(n_exc.PortNotFound):
self.plugin.update_port(self.context, 1, {})
def test_update_port(self):
with self._stubs(
port=dict(id=1, name="myport")
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(name="ourport"))
self.plugin.update_port(self.context, 1, new_port)
self.assertEqual(port_find.call_count, 2)
port_update.assert_called_once_with(
self.context,
port_find(),
name="ourport",
security_groups=[])
def test_update_port_fixed_ip_bad_request(self):
with self._stubs(
port=dict(id=1, name="myport")
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(
fixed_ips=[dict(subnet_id=None,
ip_address=None)]))
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_port(self.context, 1, new_port)
def test_update_port_fixed_ip_bad_request_malformed_address(self):
with self._stubs(
port=dict(id=1, name="myport", mac_address="0:0:0:0:0:1")
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(
fixed_ips=[dict(subnet_id=1,
ip_address="malformed-address-here")]))
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_port(self.context, 1, new_port)
def test_update_port_fixed_ip(self):
with self._stubs(
port=dict(id=1, name="myport", mac_address="0:0:0:0:0:1")
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(
fixed_ips=[dict(subnet_id=1,
ip_address="1.1.1.1")]))
self.plugin.update_port(self.context, 1, new_port)
self.assertEqual(alloc_ip.call_count, 1)
def test_update_port_fixed_ip_no_subnet_raises(self):
with self._stubs(
port=dict(id=1, name="myport", mac_address="0:0:0:0:0:1")
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(
fixed_ips=[dict(ip_address="1.1.1.1")]))
with self.assertRaises(n_exc.BadRequest):
self.plugin.update_port(self.context, 1, new_port)
def test_update_port_fixed_ip_subnet_only_allocates_ip(self):
with self._stubs(
port=dict(id=1, name="myport", mac_address="0:0:0:0:0:1")
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(
fixed_ips=[dict(subnet_id=1)]))
self.plugin.update_port(self.context, 1, new_port)
self.assertEqual(alloc_ip.call_count, 1)
def test_update_port_fixed_ip_allocs_new_deallocs_existing(self):
addr_dict = {"address": 0, "address_readable": "0.0.0.0"}
addr = models.IPAddress()
addr.update(addr_dict)
new_addr_dict = {"address": netaddr.IPAddress("1.1.1.1"),
"address_readable": "1.1.1.1"}
new_addr = models.IPAddress()
new_addr.update(new_addr_dict)
with self._stubs(
port=dict(id=1, name="myport", mac_address="0:0:0:0:0:1",
ip_addresses=[addr]),
new_ips=[new_addr]
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(
fixed_ips=[dict(subnet_id=1,
ip_address=new_addr["address_readable"])]))
self.plugin.update_port(self.context, 1, new_port)
self.assertEqual(alloc_ip.call_count, 1)
def test_update_port_goes_over_quota(self):
fixed_ips = {"fixed_ips": [{"subnet_id": 1},
{"subnet_id": 1},
{"subnet_id": 1},
{"subnet_id": 1},
{"subnet_id": 1},
{"subnet_id": 1},
{"subnet_id": 1}]}
with self._stubs(
port=dict(id=1, name="myport", mac_address="0:0:0:0:0:1")
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = {"port": fixed_ips}
with self.assertRaises(n_exc.OverQuota):
self.plugin.update_port(self.context, 1, new_port)
class TestQuarkUpdatePortSecurityGroups(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port, new_ips=None, parent_net=False):
port_model = None
sg_mod = models.SecurityGroup()
if port:
net_model = models.Network()
net_model["network_plugin"] = "BASE"
port_model = models.Port()
port_model.network = net_model
port_model.update(port)
port_model["security_groups"].append(sg_mod)
with contextlib.nested(
mock.patch("quark.db.api.port_find"),
mock.patch("quark.db.api.port_update"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.ipam.QuarkIpam.deallocate_ips_by_port"),
mock.patch("neutron.quota.QuotaEngine.limit_check"),
mock.patch("quark.plugin_modules.ports.STRATEGY"
".is_provider_network"),
mock.patch("quark.db.api.security_group_find"),
mock.patch("quark.drivers.base.BaseDriver.update_port")
) as (port_find, port_update, alloc_ip, dealloc_ip, limit_check,
net_strat, sg_find, driver_port_update):
port_find.return_value = port_model
def _port_update(context, port_db, **kwargs):
return port_db.update(kwargs)
port_update.side_effect = _port_update
if new_ips:
alloc_ip.return_value = new_ips
net_strat.return_value = parent_net
sg_find.return_value = sg_mod
yield (port_find, port_update, alloc_ip, dealloc_ip, sg_find,
driver_port_update)
def test_update_port_security_groups(self):
with self._stubs(
port=dict(id=1, device_id="device"), parent_net=True
) as (port_find, port_update, alloc_ip, dealloc_ip, sg_find,
driver_port_update):
new_port = dict(port=dict(name="ourport",
security_groups=[1]))
port = self.plugin.update_port(self.context, 1, new_port)
port_update.assert_called_once_with(
self.context,
port_find(),
name="ourport",
security_groups=[sg_find()])
self.assertEqual(sg_find()["id"], port["security_groups"][0])
def test_update_port_empty_list_security_groups(self):
port_dict = {"id": 1, "mac_address": "AA:BB:CC:DD:EE:FF",
"device_id": 2, "backend_key": 3}
with self._stubs(
port=port_dict, parent_net=True
) as (port_find, port_update, alloc_ip, dealloc_ip, sg_find,
driver_port_update):
new_port = dict(port=dict(name="ourport",
security_groups=[]))
port = self.plugin.update_port(self.context, 1, new_port)
self.assertEqual(port["security_groups"], [])
port_update.assert_called_once_with(
self.context,
port_find(),
name="ourport",
security_groups=[])
driver_port_update.assert_called_once_with(
self.context, port_id=port_dict["backend_key"],
mac_address=port_dict["mac_address"],
device_id=port_dict["device_id"],
security_groups=[],
base_net_driver=registry.DRIVER_REGISTRY.get_driver('BASE'))
def test_update_port_no_security_groups(self):
port_dict = {"id": 1, "mac_address": "AA:BB:CC:DD:EE:FF",
"device_id": 2, "backend_key": 3}
with self._stubs(
port=port_dict, parent_net=True
) as (port_find, port_update, alloc_ip, dealloc_ip, sg_find,
driver_port_update):
new_port = dict(port=dict(name="ourport"))
self.plugin.update_port(self.context, 1, new_port)
driver_port_update.assert_called_once_with(
self.context, port_id=port_dict["backend_key"],
mac_address=port_dict["mac_address"],
device_id=port_dict["device_id"],
base_net_driver=registry.DRIVER_REGISTRY.get_driver('BASE'))
def test_update_port_security_groups_no_device_id_raises(self):
with self._stubs(
port=dict(id=1), parent_net=True
) as (port_find, port_update, alloc_ip, dealloc_ip, sg_find,
driver_port_update):
new_port = dict(port=dict(name="ourport",
security_groups=[1]))
with self.assertRaises(q_exc.SecurityGroupsRequireDevice):
self.plugin.update_port(self.context, 1, new_port)
class TestQuarkUpdatePortSetsIps(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port, new_ips=None):
def alloc_mock(kls, context, addresses, *args, **kwargs):
addresses.extend(new_ips)
self.called = True
port_model = None
if port:
net_model = models.Network()
net_model["network_plugin"] = "BASE"
port_model = models.Port()
port_model['network'] = net_model
port_model.update(port)
with contextlib.nested(
mock.patch("quark.db.api.port_find"),
mock.patch("quark.db.api.port_update"),
mock.patch("quark.ipam.QuarkIpam.deallocate_ips_by_port"),
mock.patch("neutron.quota.QuotaEngine.limit_check")
) as (port_find, port_update, dealloc_ip, limit_check):
port_find.return_value = port_model
port_update.return_value = port_model
alloc_ip = mock.patch("quark.ipam.QuarkIpam.allocate_ip_address",
new=alloc_mock)
alloc_ip.start()
yield port_find, port_update, alloc_ip, dealloc_ip
alloc_ip.stop()
def test_update_port_fixed_ip_subnet_only_allocates_ip(self):
self.called = False
new_addr_dict = {"address": netaddr.IPAddress('1.1.1.1'),
"address_readable": "1.1.1.1"}
new_addr = models.IPAddress()
new_addr.update(new_addr_dict)
with self._stubs(
port=dict(id=1, name="myport", mac_address="0:0:0:0:0:1"),
new_ips=[new_addr]
) as (port_find, port_update, alloc_ip, dealloc_ip):
new_port = dict(port=dict(
fixed_ips=[dict(subnet_id=1)]))
self.plugin.update_port(self.context, 1, new_port)
self.assertTrue(self.called)
class TestQuarkCreatePortOnSharedNetworks(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port=None, network=None, addr=None, mac=None):
self.strategy = {"public_network":
{"bridge": "xenbr0",
"subnets": {"4": "public_v4",
"6": "public_v6"}}}
strategy_json = json.dumps(self.strategy)
quark_ports.STRATEGY = network_strategy.JSONStrategy(strategy_json)
if network:
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
port_model = models.Port()
port_model.update(port)
port_models = port_model
with contextlib.nested(
mock.patch("quark.db.api.port_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.ipam.QuarkIpam.allocate_mac_address"),
mock.patch("neutron.quota.QuotaEngine.limit_check")
) as (port_create, net_find, alloc_ip, alloc_mac, limit_check):
port_create.return_value = port_models
net_find.return_value = network
alloc_ip.return_value = addr
alloc_mac.return_value = mac
yield port_create
def test_create_port_shared_net_no_quota_check(self):
network = dict(id=1, ports=[models.Port()],
tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"],
network_id="public_network",
tenant_id=self.context.tenant_id, device_id=2,
segment_id="cell01",
name=port_name))
with self._stubs(port=port["port"], network=network, addr=ip, mac=mac):
try:
self.plugin.create_port(self.context, port)
except Exception:
self.fail("create_port raised OverQuota")
def test_create_port_shared_net_no_segment_id_fails(self):
network = dict(id=1, ports=[models.Port()],
tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"],
network_id="public_network",
tenant_id=self.context.tenant_id, device_id=2,
name=port_name))
with self._stubs(port=port["port"], network=network, addr=ip, mac=mac):
with self.assertRaises(q_exc.AmbiguousNetworkId):
self.plugin.create_port(self.context, port)
class TestQuarkGetPortCount(test_quark_plugin.TestQuarkPlugin):
def test_get_port_count(self):
"""This isn't really testable."""
with mock.patch("quark.db.api.port_count_all"):
self.plugin.get_ports_count(self.context, {})
class TestQuarkDeletePort(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port=None, addr=None, mac=None):
port_models = None
if port:
net_model = models.Network()
net_model["network_plugin"] = "BASE"
net_model["ipam_strategy"] = "ANY"
port_model = models.Port()
port_model.update(port)
port_model.network = net_model
port_models = port_model
with contextlib.nested(
mock.patch("quark.db.api.port_find"),
mock.patch("quark.ipam.QuarkIpam.deallocate_ips_by_port"),
mock.patch("quark.ipam.QuarkIpam.deallocate_mac_address"),
mock.patch("quark.db.api.port_delete"),
mock.patch("quark.drivers.base.BaseDriver.delete_port")
) as (port_find, dealloc_ip, dealloc_mac, db_port_del,
driver_port_del):
port_find.return_value = port_models
dealloc_ip.return_value = addr
dealloc_mac.return_value = mac
yield db_port_del, driver_port_del
def test_port_delete(self):
port = dict(port=dict(network_id=1, tenant_id=self.context.tenant_id,
device_id=2, mac_address="AA:BB:CC:DD:EE:FF",
backend_key="foo"))
with self._stubs(port=port["port"]) as (db_port_del, driver_port_del):
self.plugin.delete_port(self.context, 1)
self.assertTrue(db_port_del.called)
driver_port_del.assert_called_with(
self.context, "foo", mac_address=port["port"]["mac_address"],
device_id=port["port"]["device_id"],
base_net_driver=registry.DRIVER_REGISTRY.get_driver("BASE"))
def test_port_delete_port_not_found_fails(self):
with self._stubs(port=None) as (db_port_del, driver_port_del):
with self.assertRaises(n_exc.PortNotFound):
self.plugin.delete_port(self.context, 1)
class TestPortDiagnose(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port, list_format=False):
port_res = None
if port:
network_mod = models.Network()
port_mod = models.Port()
port_mod.update(port)
network_mod["network_plugin"] = "UNMANAGED"
port_mod.network = network_mod
port_res = port_mod
if list_format:
ports = mock.MagicMock()
ports.all.return_value = [port_mod]
port_res = ports
with mock.patch("quark.db.api.port_find") as port_find:
port_find.return_value = port_res
yield
def test_port_diagnose(self):
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100"),
address_readable="192.168.1.100", subnet_id=1, network_id=2,
version=4)
fixed_ips = [{"subnet_id": ip["subnet_id"],
"ip_address": ip["address_readable"]}]
port = dict(port=dict(network_id=1, tenant_id=self.context.tenant_id,
device_id=2, mac_address="AA:BB:CC:DD:EE:FF",
backend_key="foo", fixed_ips=fixed_ips,
network_plugin="UNMANAGED"))
with self._stubs(port=port):
diag = self.plugin.diagnose_port(self.context.elevated(), 1, [])
ports = diag["ports"]
# All none because we're using the unmanaged driver, which
# doesn't do anything with these
self.assertEqual(ports["status"], "ACTIVE")
self.assertEqual(ports["device_owner"], None)
self.assertEqual(ports["fixed_ips"], [])
self.assertEqual(ports["security_groups"], [])
self.assertEqual(ports["device_id"], None)
self.assertEqual(ports["admin_state_up"], None)
self.assertEqual(ports["network_id"], None)
self.assertEqual(ports["tenant_id"], None)
self.assertEqual(ports["mac_address"], None)
def test_port_diagnose_with_wildcard(self):
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100"),
address_readable="192.168.1.100", subnet_id=1, network_id=2,
version=4)
fixed_ips = [{"subnet_id": ip["subnet_id"],
"ip_address": ip["address_readable"]}]
port = dict(port=dict(network_id=1, tenant_id=self.context.tenant_id,
device_id=2, mac_address="AA:BB:CC:DD:EE:FF",
backend_key="foo", fixed_ips=fixed_ips,
network_plugin="UNMANAGED"))
with self._stubs(port=port, list_format=True):
diag = self.plugin.diagnose_port(self.context.elevated(), '*', [])
ports = diag["ports"]
# All none because we're using the unmanaged driver, which
# doesn't do anything with these
self.assertEqual(ports[0]["status"], "ACTIVE")
self.assertEqual(ports[0]["device_owner"], None)
self.assertEqual(ports[0]["fixed_ips"], [])
self.assertEqual(ports[0]["security_groups"], [])
self.assertEqual(ports[0]["device_id"], None)
self.assertEqual(ports[0]["admin_state_up"], None)
self.assertEqual(ports[0]["network_id"], None)
self.assertEqual(ports[0]["tenant_id"], None)
self.assertEqual(ports[0]["mac_address"], None)
def test_port_diagnose_with_config_field(self):
ip = dict(id=1, address=netaddr.IPAddress("192.168.1.100"),
address_readable="192.168.1.100", subnet_id=1, network_id=2,
version=4)
fixed_ips = [{"subnet_id": ip["subnet_id"],
"ip_address": ip["address_readable"]}]
port = dict(port=dict(network_id=1, tenant_id=self.context.tenant_id,
device_id=2, mac_address="AA:BB:CC:DD:EE:FF",
backend_key="foo", fixed_ips=fixed_ips,
network_plugin="UNMANAGED"))
with self._stubs(port=port, list_format=True):
diag = self.plugin.diagnose_port(self.context.elevated(), '*',
["config"])
ports = diag["ports"]
# All none because we're using the unmanaged driver, which
# doesn't do anything with these
self.assertEqual(ports[0]["status"], "ACTIVE")
self.assertEqual(ports[0]["device_owner"], None)
self.assertEqual(ports[0]["fixed_ips"], [])
self.assertEqual(ports[0]["security_groups"], [])
self.assertEqual(ports[0]["device_id"], None)
self.assertEqual(ports[0]["admin_state_up"], None)
self.assertEqual(ports[0]["network_id"], None)
self.assertEqual(ports[0]["tenant_id"], None)
self.assertEqual(ports[0]["mac_address"], None)
def test_port_diagnose_no_port_raises(self):
with self._stubs(port=None):
with self.assertRaises(n_exc.PortNotFound):
self.plugin.diagnose_port(self.context.elevated(), 1, [])
def test_port_diagnose_not_authorized(self):
with self._stubs(port=None):
with self.assertRaises(n_exc.NotAuthorized):
self.plugin.diagnose_port(self.context, 1, [])
class TestPortDriverSelection(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, network=None, addr=None, mac=None,
compat_map=None, driver_res=None, ipam="FOO"):
network["ipam_strategy"] = "FOO"
# Response from the backend driver
self.expected_bridge = "backend-drivers-bridge"
if driver_res is None:
driver_res = {"uuid": 1, "bridge": self.expected_bridge}
# Mock out the driver registry
foo_driver = mock.Mock()
foo_driver.create_port.return_value = driver_res
foo_driver.select_ipam_strategy.return_value = "FOO"
bar_driver = mock.Mock()
bar_driver.create_port.return_value = driver_res
bar_driver.select_ipam_strategy.return_value = "BAR"
drivers = {"FOO": foo_driver,
"BAR": bar_driver}
compat_map = compat_map or {}
# Mock out the IPAM registry
foo_ipam = mock.Mock()
foo_ipam.allocate_ip_address.return_value = addr
foo_ipam.allocate_mac_address.return_value = mac
bar_ipam = mock.Mock()
bar_ipam.allocate_ip_address.return_value = addr
bar_ipam.allocate_mac_address.return_value = mac
ipam = {"FOO": foo_ipam, "BAR": bar_ipam}
with contextlib.nested(
mock.patch("quark.db.api.port_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("oslo_utils.uuidutils.generate_uuid"),
mock.patch("quark.plugin_views._make_port_dict"),
mock.patch("quark.db.api.port_count_all"),
mock.patch("neutron.quota.QuotaEngine.limit_check"),
mock.patch("quark.plugin_modules.ports.registry."
"DRIVER_REGISTRY.drivers",
new_callable=mock.PropertyMock(return_value=drivers)),
mock.patch("quark.plugin_modules.ports.registry."
"DRIVER_REGISTRY.port_driver_compat_map",
new_callable=mock.PropertyMock(
return_value=compat_map)),
mock.patch("quark.plugin_modules.ports.ipam."
"IPAM_REGISTRY.strategies",
new_callable=mock.PropertyMock(return_value=ipam))
) as (port_create, net_find, gen_uuid, make_port,
port_count, limit_check, _, _, _):
net_find.return_value = network
gen_uuid.return_value = 1
port_count.return_value = 0
yield (port_create, ipam, net_find)
def test_create_port_with_bad_network_plugin_fails(self):
network_dict = dict(id=1, tenant_id=self.context.tenant_id)
port_name = "foobar"
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name))
network = models.Network()
network.update(network_dict)
network["network_plugin"] = "FAIL"
port_model = models.Port()
port_model.update(port)
port_models = port_model
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, ipam, net_find):
port_create.return_value = port_models
exc = "Driver FAIL is not registered."
with self.assertRaisesRegexp(n_exc.BadRequest, exc):
self.plugin.create_port(self.context, port)
def test_create_port_with_bad_port_network_plugin_fails(self):
network_dict = dict(id=1, tenant_id=self.context.tenant_id)
port_name = "foobar"
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, network_plugin="FAIL"))
network = models.Network()
network.update(network_dict)
network["network_plugin"] = "FOO"
port_model = models.Port()
port_model.update(port)
port_models = port_model
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, ipam, net_find):
port_create.return_value = port_models
exc = "Driver FAIL is not registered."
admin_ctx = self.context.elevated()
with self.assertRaisesRegexp(n_exc.BadRequest, exc):
self.plugin.create_port(admin_ctx, port)
def test_create_port_with_incompatable_port_network_plugin_fails(self):
network_dict = dict(id=1, tenant_id=self.context.tenant_id)
port_name = "foobar"
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, network_plugin="BAR"))
network = models.Network()
network.update(network_dict)
network["network_plugin"] = "FOO"
port_model = models.Port()
port_model.update(port)
port_models = port_model
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, ipam, net_find):
port_create.return_value = port_models
exc = ("Port driver BAR not allowed for underlying network "
"driver FOO.")
admin_ctx = self.context.elevated()
with self.assertRaisesRegexp(n_exc.BadRequest, exc):
self.plugin.create_port(admin_ctx, port)
def test_create_port_with_no_port_network_plugin(self):
network = dict(id=1, tenant_id=self.context.tenant_id,
network_plugin="FOO")
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
expected_mac = "DE:AD:BE:EF:00:00"
expected_bridge = "new_bridge"
expected_device_owner = "new_device_owner"
expected_admin_state = "new_state"
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = expected_mac
port_create_dict["port"]["device_owner"] = expected_device_owner
port_create_dict["port"]["bridge"] = expected_bridge
port_create_dict["port"]["admin_state_up"] = expected_admin_state
admin_ctx = self.context.elevated()
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, ipam, net_find):
self.plugin.create_port(admin_ctx, port_create_dict)
ipam["BAR"].allocate_mac_address.assert_not_called()
ipam["BAR"].allocate_ip_address.assert_not_called()
ipam["FOO"].allocate_ip_address.assert_called_once_with(
admin_ctx, [], network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
segment_id=None, mac_address=mac)
ipam["FOO"].allocate_mac_address.assert_called_once_with(
admin_ctx, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=expected_mac, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
admin_ctx, bridge=self.expected_bridge, uuid=1, name="foobar",
admin_state_up=expected_admin_state, network_id=1,
tenant_id="fake", id=1, device_owner=expected_device_owner,
mac_address=mac["address"], device_id=2, backend_key=1,
security_groups=[], addresses=[], instance_node_id='',
network_plugin="FOO")
def test_create_port_with_port_network_plugin(self):
network = dict(id=1, tenant_id=self.context.tenant_id,
network_plugin="FOO")
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
expected_mac = "DE:AD:BE:EF:00:00"
expected_bridge = "new_bridge"
expected_device_owner = "new_device_owner"
expected_admin_state = "new_state"
expected_network_plugin = "FOO"
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = expected_mac
port_create_dict["port"]["device_owner"] = expected_device_owner
port_create_dict["port"]["bridge"] = expected_bridge
port_create_dict["port"]["admin_state_up"] = expected_admin_state
port_create_dict["port"]["network_plugin"] = expected_network_plugin
admin_ctx = self.context.elevated()
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, ipam, net_find):
self.plugin.create_port(admin_ctx, port_create_dict)
ipam["BAR"].allocate_mac_address.assert_not_called()
ipam["BAR"].allocate_ip_address.assert_not_called()
ipam["FOO"].allocate_ip_address.assert_called_once_with(
admin_ctx, [], network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
segment_id=None, mac_address=mac)
ipam["FOO"].allocate_mac_address.assert_called_once_with(
admin_ctx, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=expected_mac, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
admin_ctx, bridge=self.expected_bridge, uuid=1, name="foobar",
admin_state_up=expected_admin_state, network_id=1,
tenant_id="fake", id=1, device_owner=expected_device_owner,
mac_address=mac["address"], device_id=2, backend_key=1,
security_groups=[], addresses=[], instance_node_id='',
network_plugin=expected_network_plugin)
def test_create_port_with_compatible_port_network_plugin(self):
network = dict(id=1, tenant_id=self.context.tenant_id,
network_plugin="FOO")
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
expected_mac = "DE:AD:BE:EF:00:00"
expected_bridge = "new_bridge"
expected_device_owner = "new_device_owner"
expected_admin_state = "new_state"
expected_network_plugin = "BAR"
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = expected_mac
port_create_dict["port"]["device_owner"] = expected_device_owner
port_create_dict["port"]["bridge"] = expected_bridge
port_create_dict["port"]["admin_state_up"] = expected_admin_state
port_create_dict["port"]["network_plugin"] = expected_network_plugin
compat_map = {"BAR": ["FOO"]}
admin_ctx = self.context.elevated()
with self._stubs(network=network, addr=ip, mac=mac,
compat_map=compat_map) as (port_create, ipam,
net_find):
self.plugin.create_port(admin_ctx, port_create_dict)
ipam["FOO"].allocate_mac_address.assert_not_called()
ipam["FOO"].allocate_ip_address.assert_not_called()
ipam["BAR"].allocate_ip_address.assert_called_once_with(
admin_ctx, [], network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
segment_id=None, mac_address=mac)
ipam["BAR"].allocate_mac_address.assert_called_once_with(
admin_ctx, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=expected_mac, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
admin_ctx, bridge=self.expected_bridge, uuid=1, name="foobar",
admin_state_up=expected_admin_state, network_id=1,
tenant_id="fake", id=1, device_owner=expected_device_owner,
mac_address=mac["address"], device_id=2, backend_key=1,
security_groups=[], addresses=[], instance_node_id='',
network_plugin=expected_network_plugin)
def test_create_port_ipam_selection(self):
network = dict(id=1, tenant_id=self.context.tenant_id,
network_plugin="FOO")
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
expected_mac = "DE:AD:BE:EF:00:00"
expected_bridge = "new_bridge"
expected_device_owner = "new_device_owner"
expected_admin_state = "new_state"
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = expected_mac
port_create_dict["port"]["device_owner"] = expected_device_owner
port_create_dict["port"]["bridge"] = expected_bridge
port_create_dict["port"]["admin_state_up"] = expected_admin_state
admin_ctx = self.context.elevated()
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, ipam, net_find):
self.plugin.create_port(admin_ctx, port_create_dict)
ipam["BAR"].allocate_mac_address.assert_not_called()
ipam["BAR"].allocate_ip_address.assert_not_called()
ipam["FOO"].allocate_ip_address.assert_called_once_with(
admin_ctx, [], network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
segment_id=None, mac_address=mac)
ipam["FOO"].allocate_mac_address.assert_called_once_with(
admin_ctx, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=expected_mac, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
admin_ctx, bridge=self.expected_bridge, uuid=1, name="foobar",
admin_state_up=expected_admin_state, network_id=1,
tenant_id="fake", id=1, device_owner=expected_device_owner,
mac_address=mac["address"], device_id=2, backend_key=1,
security_groups=[], addresses=[], instance_node_id='',
network_plugin="FOO")
def test_create_port_ipam_selection_override_by_driver(self):
network = dict(id=1, tenant_id=self.context.tenant_id,
network_plugin="BAR")
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
expected_mac = "DE:AD:BE:EF:00:00"
expected_bridge = "new_bridge"
expected_device_owner = "new_device_owner"
expected_admin_state = "new_state"
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = expected_mac
port_create_dict["port"]["device_owner"] = expected_device_owner
port_create_dict["port"]["bridge"] = expected_bridge
port_create_dict["port"]["admin_state_up"] = expected_admin_state
admin_ctx = self.context.elevated()
with self._stubs(network=network, addr=ip,
mac=mac, ipam="BAR") as (port_create, ipam, net_find):
self.plugin.create_port(admin_ctx, port_create_dict)
ipam["FOO"].allocate_mac_address.assert_not_called()
ipam["FOO"].allocate_ip_address.assert_not_called()
ipam["BAR"].allocate_ip_address.assert_called_once_with(
admin_ctx, [], network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
segment_id=None, mac_address=mac)
ipam["BAR"].allocate_mac_address.assert_called_once_with(
admin_ctx, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=expected_mac, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
admin_ctx, bridge=self.expected_bridge, uuid=1, name="foobar",
admin_state_up=expected_admin_state, network_id=1,
tenant_id="fake", id=1, device_owner=expected_device_owner,
mac_address=mac["address"], device_id=2, backend_key=1,
security_groups=[], addresses=[], instance_node_id='',
network_plugin="BAR")
def test_create_port_network_plugin_response_no_uuid_raises(self):
network_dict = dict(id=1, tenant_id=self.context.tenant_id)
port_name = "foobar"
mac = dict(address="AA:BB:CC:DD:EE:FF")
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name))
network = models.Network()
network.update(network_dict)
network["network_plugin"] = "FOO"
port_model = models.Port()
port_model.update(port)
port_models = port_model
with self._stubs(network=network, addr=ip,
mac=mac, driver_res={}) as (port_create,
alloc_mac,
net_find):
port_create.return_value = port_models
exc = "uuid"
with self.assertRaisesRegexp(KeyError, exc):
self.plugin.create_port(self.context, port)
def test_create_port_network_plugin_response_is_filtered(self):
network = dict(id=1, tenant_id=self.context.tenant_id,
network_plugin="FOO")
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
expected_mac = "DE:AD:BE:EF:00:00"
expected_bridge = "new_bridge"
expected_device_owner = "new_device_owner"
expected_admin_state = "new_state"
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = expected_mac
port_create_dict["port"]["device_owner"] = expected_device_owner
port_create_dict["port"]["bridge"] = expected_bridge
port_create_dict["port"]["admin_state_up"] = expected_admin_state
driver_res = {
"uuid": 5,
"vlan_id": 50,
"tags": [123, {"foo": "bar"}],
"id": "fail",
"randomkey": None
}
admin_ctx = self.context.elevated()
with self._stubs(network=network, addr=ip,
mac=mac, driver_res=driver_res) as (port_create,
ipam,
net_find):
self.plugin.create_port(admin_ctx, port_create_dict)
ipam["BAR"].allocate_mac_address.assert_not_called()
ipam["BAR"].allocate_ip_address.assert_not_called()
ipam["FOO"].allocate_ip_address.assert_called_once_with(
admin_ctx, [], network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
segment_id=None, mac_address=mac)
ipam["FOO"].allocate_mac_address.assert_called_once_with(
admin_ctx, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=expected_mac, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
admin_ctx, bridge=expected_bridge, uuid=5, name="foobar",
admin_state_up=expected_admin_state, network_id=1,
tenant_id="fake", id=1, device_owner=expected_device_owner,
mac_address=mac["address"], device_id=2, backend_key=5,
security_groups=[], addresses=[], vlan_id=50,
network_plugin=network["network_plugin"], instance_node_id='')
class TestQuarkPortCreateFiltering(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, network=None, addr=None, mac=None):
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
with contextlib.nested(
mock.patch("quark.db.api.port_create"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.ipam.QuarkIpam.allocate_mac_address"),
mock.patch("oslo_utils.uuidutils.generate_uuid"),
mock.patch("quark.plugin_views._make_port_dict"),
mock.patch("quark.db.api.port_count_all"),
mock.patch("neutron.quota.QuotaEngine.limit_check")
) as (port_create, net_find, alloc_ip, alloc_mac, gen_uuid, make_port,
port_count, limit_check):
net_find.return_value = network
alloc_ip.return_value = addr
alloc_mac.return_value = mac
gen_uuid.return_value = 1
port_count.return_value = 0
yield port_create, alloc_mac, net_find
def test_create_port_attribute_filtering(self):
network = dict(id=1, tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = "DE:AD:BE:EF:00:00"
port_create_dict["port"]["device_owner"] = "ignored"
port_create_dict["port"]["bridge"] = "ignored"
port_create_dict["port"]["admin_state_up"] = "ignored"
port_create_dict["port"]["network_plugin"] = "ignored"
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, alloc_mac, net_find):
self.plugin.create_port(self.context, port_create_dict)
alloc_mac.assert_called_once_with(
self.context, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=None, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
self.context, addresses=[], network_id=network["id"],
tenant_id="fake", uuid=1, name="foobar",
mac_address=alloc_mac()["address"], backend_key=1, id=1,
security_groups=[], network_plugin='BASE',
device_id=2, instance_node_id='')
def test_create_port_attribute_filtering_admin(self):
network = dict(id=1, tenant_id=self.context.tenant_id)
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_name = "foobar"
ip = dict()
port = dict(port=dict(mac_address=mac["address"], network_id=1,
tenant_id=self.context.tenant_id, device_id=2,
name=port_name, device_owner="quark_tests",
bridge="quark_bridge", admin_state_up=False))
expected_mac = "DE:AD:BE:EF:00:00"
expected_bridge = "new_bridge"
expected_device_owner = "new_device_owner"
expected_admin_state = "new_state"
expected_network_plugin = "BASE"
port_create_dict = {}
port_create_dict["port"] = port["port"].copy()
port_create_dict["port"]["mac_address"] = expected_mac
port_create_dict["port"]["device_owner"] = expected_device_owner
port_create_dict["port"]["bridge"] = expected_bridge
port_create_dict["port"]["admin_state_up"] = expected_admin_state
port_create_dict["port"]["network_plugin"] = expected_network_plugin
admin_ctx = self.context.elevated()
with self._stubs(network=network, addr=ip,
mac=mac) as (port_create, alloc_mac, net_find):
self.plugin.create_port(admin_ctx, port_create_dict)
alloc_mac.assert_called_once_with(
admin_ctx, network["id"], 1,
cfg.CONF.QUARK.ipam_reuse_after,
mac_address=expected_mac, use_forbidden_mac_range=False)
port_create.assert_called_once_with(
admin_ctx, bridge=expected_bridge, uuid=1, name="foobar",
admin_state_up=expected_admin_state, network_id=1,
tenant_id="fake", id=1, device_owner=expected_device_owner,
mac_address=mac["address"], device_id=2, backend_key=1,
security_groups=[], addresses=[],
network_plugin=expected_network_plugin, instance_node_id='')
class TestQuarkPortUpdateFiltering(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self):
with contextlib.nested(
mock.patch("quark.db.api.port_find"),
mock.patch("quark.db.api.port_update"),
mock.patch("quark.drivers.registry.DriverRegistry.get_driver"),
mock.patch("quark.plugin_views._make_port_dict"),
mock.patch("neutron.quota.QuotaEngine.limit_check")
) as (port_find, port_update, get_driver, make_port, limit_check):
yield port_find, port_update
def test_update_port_attribute_filtering(self):
new_port = {}
new_port["port"] = {
"mac_address": "DD:EE:FF:00:00:00", "device_owner": "new_owner",
"bridge": "new_bridge", "admin_state_up": False, "device_id": 3,
"network_id": 10, "backend_key": 1234, "name": "new_name",
"network_plugin": "BASE"}
with self._stubs() as (port_find, port_update):
self.plugin.update_port(self.context, 1, new_port)
port_update.assert_called_once_with(
self.context,
port_find(),
name="new_name",
security_groups=[])
def test_update_port_attribute_filtering_admin(self):
new_port = {}
new_port["port"] = {
"mac_address": "DD:EE:FF:00:00:00", "device_owner": "new_owner",
"bridge": "new_bridge", "admin_state_up": False, "device_id": 3,
"network_id": 10, "backend_key": 1234, "name": "new_name",
"network_plugin": "BASE"}
admin_ctx = self.context.elevated()
with self._stubs() as (port_find, port_update):
self.plugin.update_port(admin_ctx, 1, new_port)
port_update.assert_called_once_with(
admin_ctx,
port_find(),
name="new_name",
bridge=new_port["port"]["bridge"],
admin_state_up=new_port["port"]["admin_state_up"],
device_owner=new_port["port"]["device_owner"],
mac_address=new_port["port"]["mac_address"],
device_id=new_port["port"]["device_id"],
security_groups=[])
class TestQuarkPortCreateAsAdvancedService(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, port=None, network=None, addr=None, mac=None):
if network:
network["network_plugin"] = "BASE"
network["ipam_strategy"] = "ANY"
port_model = models.Port()
port_model.update(port['port'])
port_models = port_model
db_mod = "quark.db.api"
ipam = "quark.ipam.QuarkIpam"
with contextlib.nested(
mock.patch("%s.port_create" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.port_find" % db_mod),
mock.patch("%s.allocate_ip_address" % ipam),
mock.patch("%s.allocate_mac_address" % ipam),
mock.patch("%s.port_count_all" % db_mod),
) as (port_create, net_find, port_find, alloc_ip, alloc_mac,
port_count):
port_create.return_value = port_models
net_find.return_value = network
port_find.return_value = None
alloc_ip.return_value = addr
alloc_mac.return_value = mac
port_count.return_value = 0
yield port_create
def test_advanced_service_create_port_other_tenant_network(self):
"""NCP-1819 - Advanced service can create port on any network
Tests when an advanced service creating a port on another tenant's
network does not fail AND the tenant_id is that of the context's.
"""
self.context.is_advsvc = True
network_id = "foobar"
network = dict(id=network_id,
tenant_id="other_tenant")
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00",
network_id=network_id,
tenant_id=self.context.tenant_id, device_id=5,
name="Fake"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
port = self.plugin.create_port(self.context, port_1)
self.assertEqual(self.context.tenant_id, port['tenant_id'])
def test_advsvc_can_create_port_with_another_tenant_id(self):
"""NCP-1819 - Advanced Service can create port on another tenant's net
Tests that an advanced service can create a port on another tenant's
network.
"""
another_tenant_id = 'im-another-tenant'
self.context.is_advsvc = True
network_id = "foobar"
network = dict(id=network_id,
tenant_id="other_tenant")
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00",
network_id=network_id,
tenant_id=another_tenant_id, device_id=5,
name="Fake"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
port = self.plugin.create_port(self.context, port_1)
self.assertEqual(another_tenant_id, port['tenant_id'])
def test_non_advsvc_cannot_create_port_another_network(self):
"""NCP-1819 - Normal tenant port create should fail another's network
Tests that a normal tenant creating a port on another tenant's network
should not be allowed and throws an exception.
"""
normal_tenant_id = "other_tenant"
network_id = "foobar"
network = dict(id=network_id,
tenant_id=normal_tenant_id)
ip = dict()
mac = dict(address="AA:BB:CC:DD:EE:FF")
port_1 = dict(port=dict(mac_address="AA:BB:CC:DD:EE:00",
network_id=network_id,
tenant_id=normal_tenant_id, device_id=5,
name="Fake"))
with self._stubs(port=port_1, network=network, addr=ip, mac=mac):
with self.assertRaises(n_exc.NotAuthorized):
self.plugin.create_port(self.context, port_1)
| apache-2.0 | 3,451,333,421,162,092,000 | 44.696044 | 79 | 0.54969 | false |
quantumlib/Cirq | cirq-google/cirq_google/line/placement/chip.py | 1 | 2533 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple, TYPE_CHECKING
import cirq
if TYPE_CHECKING:
import cirq_google
EDGE = Tuple[cirq.GridQubit, cirq.GridQubit]
def above(qubit: cirq.GridQubit) -> cirq.GridQubit:
"""Gives qubit with one unit less on the second coordinate.
Args:
qubit: Reference qubit.
Returns:
New translated qubit.
"""
return cirq.GridQubit(qubit.row, qubit.col - 1)
def left_of(qubit: cirq.GridQubit) -> cirq.GridQubit:
"""Gives qubit with one unit less on the first coordinate.
Args:
qubit: Reference qubit.
Returns:
New translated qubit.
"""
return cirq.GridQubit(qubit.row - 1, qubit.col)
def below(qubit: cirq.GridQubit) -> cirq.GridQubit:
"""Gives qubit with one unit more on the second coordinate.
Args:
qubit: Reference qubit.
Returns:
New translated qubit.
"""
return cirq.GridQubit(qubit.row, qubit.col + 1)
def right_of(qubit: cirq.GridQubit) -> cirq.GridQubit:
"""Gives node with one unit more on the first coordinate.
Args:
qubit: Reference node.
Returns:
New translated node.
"""
return cirq.GridQubit(qubit.row + 1, qubit.col)
def chip_as_adjacency_list(
device: 'cirq_google.XmonDevice',
) -> Dict[cirq.GridQubit, List[cirq.GridQubit]]:
"""Gives adjacency list representation of a chip.
The adjacency list is constructed in order of above, left_of, below and
right_of consecutively.
Args:
device: Chip to be converted.
Returns:
Map from nodes to list of qubits which represent all the neighbours of
given qubit.
"""
c_set = set(device.qubits)
c_adj: Dict[cirq.GridQubit, List[cirq.GridQubit]] = {}
for n in device.qubits:
c_adj[n] = []
for m in [above(n), left_of(n), below(n), right_of(n)]:
if m in c_set:
c_adj[n].append(m)
return c_adj
| apache-2.0 | -7,775,776,244,764,378,000 | 25.385417 | 78 | 0.66364 | false |
c0deh4xor/CapTipper | CTPlugin.py | 7 | 2361 | #
# CapTipper is a malicious HTTP traffic explorer tool
# By Omri Herscovici <omriher AT gmail.com>
# http://omriher.com
# @omriher
#
#
# This file is part of CapTipper, and part of the Whatype library
# Whatype is an independent file type identification python library
# https://github.com/omriher/whatype
#
# CapTipper is a free software under the GPLv3 License
#
from collections import namedtuple
import inspect
import imp
import os
import glob
import CTCore
class ConsolePlugin(object):
description = ""
author = ""
def __init__(self):
self.conversations = CTCore.conversations
self.objects = CTCore.objects
self.hosts = CTCore.hosts
def run(self):
raise NotImplementedError
def get_name_by_id(self,id):
name = CTCore.get_name(id)
return name
def get_body_by_id(self,id):
response, size = CTCore.get_response_and_size(id, "all")
return response
def get_plaintext_body_by_id(self,id):
if id < len(self.conversations) and self.conversations[id].magic_ext == "GZ":
data, name = CTCore.ungzip(id)
else:
data = self.get_body_by_id(id)
return data
def is_valid_id(self,id):
if int(id) >= len(self.objects) or int(id) < 0:
return False
return True
def init_plugins():
p_files = glob.glob(CTCore.plugins_folder + "*.py")
for p in p_files:
p_full = os.path.join(os.path.dirname(os.path.realpath(__file__)),p)
(path, name) = os.path.split(p_full)
(name, ext) = os.path.splitext(name)
(p_file, filename, data) = imp.find_module(name, [path])
mod = imp.load_module(name, p_file, filename, data)
for name, value in inspect.getmembers(mod):
if inspect.isclass(value):
if issubclass(value, ConsolePlugin) and value is not ConsolePlugin:
p_num = len(CTCore.plugins)
CTCore.plugins.append(namedtuple('Plugin', ['id', 'name','module', 'description']))
CTCore.plugins[p_num].id = p_num
CTCore.plugins[p_num].name = name
CTCore.plugins[p_num].module = value
CTCore.plugins[p_num].description = value.description
| gpl-3.0 | -1,616,008,007,180,516,600 | 29.662338 | 103 | 0.593816 | false |
Mause/dcputoolchain-module-site | tests/test_dtmm_utils.py | 1 | 6884 | # The MIT License (MIT)
# Copyright (c) 2013 Dominic May
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import common
import json
import base64
import unittest2
from mock import patch, Mock
from google.appengine.api import memcache, urlfetch
class TestDTMMUtils(common.DMSTestCase):
@patch('google.appengine.api.urlfetch.fetch', autospec=True)
def test_authed_fetch_with_remaining(self, fetch):
fetch.return_value.headers = {'x-ratelimit-remaining': 4000}
fetch.return_value.content = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit.')
import dtmm_utils
end_data = dtmm_utils.authed_fetch('http://mock.com')
self.assertEqual(fetch.return_value.content, end_data.content)
fetch.assert_called_with(
url='http://mock.com?client_auth_data=%7Bu%27client_secret%27%3A+u%27false_data%27%2C+u%27client_id%27%3A+u%27false_data%27%7D',
headers={'X-Admin-Contact': '[email protected]'}
)
self.assertEqualMemcache('requests_remaining', 4000)
@patch('google.appengine.api.urlfetch.fetch', autospec=True)
def test_authed_fetch_without_remaining(self, fetch):
fetch.return_value.headers = {'x-ratelimit-remaining': None}
fetch.return_value.content = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit.')
import dtmm_utils
dtmm_utils.authed_fetch('http://mock.com')
fetch.assert_called_with(
url='http://mock.com?client_auth_data=%7Bu%27client_secret%27%3A+u%27false_data%27%2C+u%27client_id%27%3A+u%27false_data%27%7D',
headers={'X-Admin-Contact': '[email protected]'}
)
self.assertEqualMemcache('requests_remaining', None)
@patch('dtmm_utils.authed_fetch')
def test_get_url_content_fetch_from_remote(self, mock_authed_fetch):
url = 'http://mock.com'
content = {
u'tree': [{
u'sha': u'ac178f6489f2d3f601df6a9a5e641b62a0388eae',
u'mode': u'100644',
u'path': u'README.md',
u'type': u'blob',
u'size': 314
}]
}
mock_authed_fetch.return_value.content = json.dumps(content)
import dtmm_utils
end_data = dtmm_utils.get_url_content(None, url)
self.assertEqual(end_data, content)
mock_authed_fetch.assert_called_with(url)
self.assertEqual(
memcache.get(dtmm_utils.md5_hash(url)),
content
)
def test_get_url_content_retrieve_from_memcache(self):
import dtmm_utils
url_digest = dtmm_utils.md5_hash('http://mock.com')
memcache.set(url_digest, {'content': 'word'})
end_data = dtmm_utils.get_url_content(None, 'http://mock.com')
self.assertEqual(end_data, {'content': 'word'})
@patch('dtmm_utils.authed_fetch', autospec=True)
@patch('logging.error', autospec=True)
def test_get_url_content_download_error_handling(self, _, authed_fetch):
authed_fetch.side_effect = urlfetch.DownloadError
import dtmm_utils
mock_handler = Mock()
dtmm_utils.get_url_content(mock_handler, 'http://mock.com')
mock_handler.error.assert_called_with(408)
@patch('dtmm_utils.authed_fetch')
def test_get_tree(self, mock_authed_fetch):
content = {
'tree': [{
u'sha': u'ac178f6489f2d3f601df6a9a5e641b62a0388eae',
u'mode': u'100644',
u'path': u'README.md',
u'type': u'blob',
u'size': 314
}]
}
mock_authed_fetch.return_value.content = json.dumps(content)
import dtmm_utils
end_data = dtmm_utils._get_tree()
self.assertEqual(end_data, content['tree'])
@patch('dtmm_utils._get_tree')
def test_get_modules(self, _get_tree):
_get_tree.return_value = [
{'path': 'bad_file.bleh'},
{'path': 'good_file.lua'}
]
import dtmm_utils
modules = dtmm_utils.get_modules()
self.assertEqual(modules, [_get_tree.return_value[1]])
@patch('dtmm_utils.get_url_content', autospec=True)
def test_get_live_module_data(self, get_url_content):
get_url_content.return_value = {
'content': base64.b64encode('''
MODULE = {
Type = "Hardware",
Name = "HMD2043",
Version = "1.1",
SDescription = "Deprecated HMD2043 hardware device",
URL = "False URL"
};''')
}
import dtmm_utils
end_data = dtmm_utils.get_live_module_data(
None, {"url": "http://mock.url/hardware_file"})
self.assertEqual(
end_data,
{
'URL': 'False URL',
'SDescription': 'Deprecated HMD2043 hardware device',
'Version': '1.1',
'Type': 'Hardware',
'Name': 'HMD2043'
}
)
@patch('dtmm_utils.get_url_content', autospec=True)
def test_get_live_hardware_data(self, get_url_content):
get_url_content.return_value = {
'content': base64.b64encode('''
HARDWARE = {
ID = 0x74fa4cae,
Version = 0x07c2,
Manufacturer = 0x21544948 -- HAROLD_IT
};''')
}
import dtmm_utils
end_data = dtmm_utils.get_live_hardware_data(
None, {"url": "http://mock.url/hardware_file"})
self.assertEqual(
end_data,
{
'Version': 1986,
'ID': 1962560686,
'Manufacturer': 559171912
}
)
def main():
unittest2.main()
if __name__ == '__main__':
main()
| mit | 8,914,181,437,229,234,000 | 34.484536 | 140 | 0.593841 | false |
chuckharmston/ghosttown | app/search/classification/wikipedia.py | 1 | 2027 | from urllib.parse import urlencode, urlparse, urlunparse
import requests
from memorize import memorize
from memcached import memcached
from .base import BaseClassifier
class WikipediaClassifier(BaseClassifier):
"""
Classifier that is applied if the returned result is a Wikipedia article.
Adds:
abstract - an excerpt from the Wikipedia article.
slug - the article's URL slug.
title - the article's title.
"""
type = 'wikipedia'
def is_match(self, result):
"""
It is a Wikipedia article if both the netloc ends with 'wikipedia.org'
and the path starts with '/wiki'.
"""
return (self.url.netloc.endswith('wikipedia.org') and
self.url.path.startswith('/wiki'))
def _api_url(self, page_title):
"""
Constructs a URL to the Wikipedia API endpoint for an article with the
passed page title.
"""
endpoint = list(self.url)
endpoint[2] = '/w/api.php'
endpoint[4] = urlencode({
'action': 'query',
'exintro': '',
'explaintext': '',
'format': 'json',
'meta': 'siteinfo',
'prop': 'extracts',
'redirects': '',
'titles': page_title
})
return urlunparse(endpoint)
@memorize(memcached, prefix='wikipedia')
def _api_response(self, page_title):
"""
Makes an API request to Wikipedia, fetching the extract for the article
with the passed page title.
https://www.mediawiki.org/wiki/API:Main_page
"""
url = self._api_url(page_title)
response = requests.get(url)
return list(response.json()['query']['pages'].items())[0][1]
def enhance(self):
slug = self.url.path.replace('wiki/', '').strip('/')
api_data = self._api_response(slug)
return {
'abstract': api_data['extract'].strip(' \n\t'),
'slug': slug,
'title': api_data['title']
}
| mpl-2.0 | 8,694,191,756,906,397,000 | 28.808824 | 79 | 0.571288 | false |
anlambert/tulip | library/talipot-python/plugins/layout/H3Layout.py | 1 | 3415 | # Copyright (C) 2019 The Talipot developers
#
# Talipot is a fork of Tulip, created by David Auber
# and the Tulip development Team from LaBRI, University of Bordeaux
#
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU General Public License version 3, or any later version
# See top-level LICENSE file for more information
from talipot import tlp
import talipotplugins
numpyOk = True
# h3 module requires numpy so we need to check it is available
# in the Python environment
try:
# https://github.com/buzzfeed/pyh3
from h3.tree import Tree
except Exception:
numpyOk = False
class H3Layout(tlp.LayoutAlgorithm):
def __init__(self, context):
tlp.LayoutAlgorithm.__init__(self, context)
self.addFloatParameter(
'layout scaling',
'the scale factor to apply to the computed layout', '1000')
def check(self):
if not numpyOk:
return (False,
('Python numpy module is required to execute that '
'layout algorithm.\nYou can easily install it with '
'the pip tool or your package manager if you are a '
'linux user.'))
if not tlp.ConnectedTest.isConnected(self.graph):
return (False, 'The graph must be connected')
return (True, '')
def run(self):
# compute a spanning tree of the input graph
spanningTree = tlp.TreeTest.computeTree(self.graph)
# get edges to use an input for the H3 layout implementation
# from buzzfeed and reverse their orientation as it does not
# use the same direction as in Talipot graphs
edges = [self.graph.ends(e)[::-1] for e in spanningTree.getEdges()]
# compute the layout
tree = Tree(edges)
# copy result to Talipot layout property
self.result.setAllEdgeValue([])
for n in self.graph.getNodes():
self.result[n] = (tree.nodes[n].coord.x,
tree.nodes[n].coord.y,
tree.nodes[n].coord.z)
# apply some scaling factor to the layout to get a correct rendering
# in Talipot
scaling = 1000
if self.dataSet:
scaling = self.dataSet['layout scaling']
self.result.scale((scaling, scaling, scaling))
# cleanup computed spanning tree
tlp.TreeTest.cleanComputedTree(self.graph, spanningTree)
return True
pluginDoc = """
Implements the H3 layout technique for drawing large directed graphs as
node-link diagrams in 3D hyperbolic space. That algorithm can lay out much
larger structures than can be handled using traditional techniques
for drawing general graphs because it assumes a hierarchical nature
of the data.
It was first published as: <b> H3: Laying out Large Directed Graphs
in 3D Hyperbolic Space </b>. Tamara Munzner. Proceedings of the 1997 IEEE
Symposium on Information Visualization, Phoenix, AZ, pp 2-10, 1997.
The implementation in Python (MIT License) has been written by
BuzzFeed engineers (https://github.com/buzzfeed/pyh3).
"""
# The line below does the magic to register the plugin into the plugin database
# and updates the GUI to make it accessible through the menus.
talipotplugins.registerPluginOfGroup(
'H3Layout', 'H3', 'Antoine Lambert', '30/08/2017',
pluginDoc, '1.0', 'Hierarchical')
| lgpl-3.0 | -1,614,473,418,251,960,800 | 33.846939 | 79 | 0.667643 | false |
ahmed-aalhaj/django-cpanel | django-cpanel.py | 1 | 1361 | #!/usr/bin/python3
"""
Welcome to Django helper - a small script to help you test your Django project easily
By: Ahmed Alhaj.
github.com/ahmedXalhaj
"""
import os, sys
#Important: Change proj_directory to where your project is located
proj_directory = os.getcwd()
sys.path.extend([proj_directory])
working=True
while working:
ask=input("What do you need? ")
if ask=="help":
print("""
Available commands:
run : launchs the development server
mkgrt : Equals to "makemigrations" + "migrate" commands
help : Prints this help
done : Quits the script
Note: Whenever you wanna answer a question with yes, just type "y", and for no, just hit Enter.
"""
)
elif ask=="run":
os.system("./manage.py runserver")
print(" ")
done=input("Are we done yet? ")
if done=="y":
working=False
elif ask=="mkgrt":
ask2 = input("Which app? ")
if ask2:
os.system("./manage.py makemigrations %s" % (ask2,))
os.system("./manage.py migrate")
else:
print("You need to choose the app!")
print(" ")
done=input("Are we done yet? ")
if done=="y":
working=False
elif ask=="done":
working=False
print("""
Hope you had a nice session. See you soon!""")
| gpl-3.0 | 2,271,277,100,131,071,200 | 24.679245 | 101 | 0.579721 | false |
udoprog/ontv | ontv/action/list.py | 1 | 1266 | import datetime
from ..utils import has_aired_filter
from ..format import format_episodes_count
from ..format import format_episodes_count_legend
def series_name_key(s):
return s['series_name']
def action(ns):
ns.out(ns.t.bold_magenta(u"Series you are currently watching"))
ns.out(u"")
now = datetime.datetime.now()
has_aired = has_aired_filter(now)
series_dao = ns.series
series_list = ns.series.list_series()
episodes_legend = format_episodes_count_legend(ns.t)
if series_list:
for series in sorted(series_list, key=series_name_key):
episodes = ns.series.get_episodes(series)
episodes_count = None
if episodes is not None:
episodes_count, _ = format_episodes_count(
ns.t, series_dao, has_aired, episodes)
ns.out(ns.t.bold_cyan(
u"{0[series_name]} (id: {0[id]})".format(series)))
if episodes_count:
ns.out(ns.t.cyan(
u" Episodes ({0}): {1}".format(
episodes_legend, episodes_count)))
else:
ns.out(ns.t.bold_red("You are not watching any series"))
return 0
def setup(parser):
parser.set_defaults(action=action)
| gpl-3.0 | -5,983,885,762,306,351,000 | 24.32 | 67 | 0.590837 | false |
cutoffthetop/zeit.content.image | src/zeit/content/image/browser/tests/test_image.py | 1 | 3254 | import ZODB.utils
import os.path
import unittest
import zeit.cms.testing
import zeit.content.image.testing
class TestDelete(zeit.cms.testing.BrowserTestCase):
"""Test correct registration of delete views for images.
This test must be done on an image (rather testcontent), since the adapter
lookup was non-deterministic and "for example" broke for images. So we used
images explicitely to test absence of weird behaviour.
"""
layer = zeit.content.image.testing.ZCML_LAYER
def test_delete_message_in_repository(self):
self.browser.open('http://localhost/++skin++vivi/repository/2006/'
'DSC00109_2.JPG/@@delete.html')
self.assertEllipsis(
'...Do you really want to delete the object from the folder...',
self.browser.contents)
def test_delete_message_in_workingcopy(self):
self.browser.open('http://localhost/++skin++vivi/repository/2006/'
'DSC00109_2.JPG/@@checkout')
self.browser.open('@@delete.html')
self.assertEllipsis(
'...Do you really want to delete your workingcopy?...',
self.browser.contents)
class BlobCleanupTest(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.image.testing.ZCML_LAYER
def setUp(self):
super(BlobCleanupTest, self).setUp()
self.orig_mktemp = ZODB.utils.mktemp
ZODB.utils.mktemp = self.mktemp
self.tempfile = None
def tearDown(self):
ZODB.utils.mktemp = self.orig_mktemp
super(BlobCleanupTest, self).tearDown()
def mktemp(self, dir=None, prefix='tmp'):
self.assertEqual(None, self.tempfile)
self.tempfile = self.orig_mktemp(dir, prefix)
return self.tempfile
def test_temporary_file_for_thumbnail_is_cleaned_up_after_request(self):
b = self.browser
b.open('http://localhost/++skin++cms/repository/'
'2006/DSC00109_2.JPG/@@preview')
self.assertFalse(os.path.exists(self.tempfile))
class ImageEdit(zeit.cms.testing.BrowserTestCase):
layer = zeit.content.image.testing.ZCML_LAYER
def setUp(self):
super(ImageEdit, self).setUp()
b = self.browser
b.open(
'http://localhost/++skin++vivi/repository'
'/2006/DSC00109_2.JPG/@@checkout')
b.getControl(
name='form.copyrights.0..combination_00').value = 'required'
@unittest.skip(
'Disabled because the frontend does not interpret rewritten links '
'correctly yet.')
def test_rewrites_links_from_www_zeit_de_to_xml_zeit_de(self):
b = self.browser
b.getControl('Links to').value = 'http://www.zeit.de/foo/bar'
b.getControl('Apply').click()
self.assertEllipsis('...Updated on...', b.contents)
self.assertEqual(
'http://xml.zeit.de/foo/bar', b.getControl('Links to').value)
def test_leaves_other_links_alone(self):
b = self.browser
b.getControl('Links to').value = 'http://example.de/foo/bar'
b.getControl('Apply').click()
self.assertEllipsis('...Updated on...', b.contents)
self.assertEqual(
'http://example.de/foo/bar', b.getControl('Links to').value)
| bsd-3-clause | -5,436,951,450,482,950,000 | 34.758242 | 79 | 0.636447 | false |
XingHeStudio/xDisplayAtHome | Other.libs/Rtmplite/siprtmp.py | 1 | 90472 | # Copyright (c) 2007-2009, Mamta Singh. All rights reserved. See LICENSING for details.
# Copyright (c) 2010-2011, Kundan Singh.
'''
Introduction
------------
The goal of this project is to allow multimedia calls from Flash Player to SIP network and vice-versa. This allows either a
web browser or a standalone AIR-based application to call to and receive call from a SIP phone. The SIP-RTMP gateway implements
translation of signaling as well as media to support audio, video and text with the SIP user agent. The client side ActionScript
library allows any third-party to build user interface for the web-based soft-phone. The Gateway can run either as a server hosted
by the provider, or as a local application on the client's host.
For other Flash-SIP projects see:
1. http://www.gtalk2voip.com/sipper/
2. http://www.flaphone.com/ (formerly Flashphone.ru)
3. http://code.google.com/p/red5phone/
Design choices
--------------
Two design alternatives: dedicated server vs. server app. The advantages of a dedicated server that implements SIP-RTMP gateway
is that management is easier, and the application does just one thing. On the other hand implementing the gateway as a RTMP server
application is more extensible, and the same server can be used to implement other applications. I outline the implementations
using both alternatives, and finally pick the second alternative in this implementation.
In the dedicated server case, the FlashServer class of rtmp.py module is extended into a Gateway class. This subclass then
overrides the various methods such as publishhandler and playhandler to map to similar operations using the SIP library such as
register, invite or accept. One advantage of this approach is that the Gateway class can be used as a component in other
applications without having to run a separate Server.
In the server application case, the Gateway class extends the App class of rtmp.py to implement the SIP-RTMP gateway application,
and installs itself as application named 'sip'. The Gateway class overrides the methods such as onConnect, onPublish, etc., to
map to the SIP library methods such as register, invite or accept. One advantage of this approach is that the same Server can
be used to perform other RTMP server functions besides hosting a SIP gateway.
There are several API alternatives from the Flash client point of view as well:
1. The RTMP NetConnection is just used as RPC layer to control the SIP library.
2. Have 1-to-1 mapping between a RTMP NetConnection and a SIP user agent. (choosen one)
3. Have 1-to-1 mapping between a RTMP connection's scope and a SIP multi-party conference.
In the first approach, the application connects to the gateway using NetConnection URL of the form 'rtmp://server/sip'. Once
connected, the application uses various RPC commands and indications to register, invite, accept or bye a SIP session. Each
command has a full set of arguments needed to execute that command. For example, NetConnection.call('invite',..., 'alice','bob')
will make a call from local user 'alice' to remote user 'bob'. One major problem with this approach is that there is no
information hiding or abstraction in the API. Hence, any connected application can alter the state of any user or call in the
library. One could use cookies to store state information, but nevertheless the API is pretty rudimentary.
In the second approach, a single SIP user agent is associated with a NetConnection. The application connects to the URL of the
form 'rtmp://server/sip/[email protected]' and supplies additional connection arguments such as display name and password.
The gateway associates this connection with the user address-of-record (AOR) 'sip:[email protected]'. In particular, it sends
SIP REGISTER on behalf of this user, and keeps refreshing the registration as long as the NetConnection is connected. Thus, this
NetConnection represents an abstraction of the SIP user agent for this user. The application uses RPC commands and indications
to invite, accept or bye a SIP session in this user agent. In the simple implementation, a single user agent is capable of a
single SIP session at any instance. The API for multi-line SIP user agent will be more complex. When the application calls
NetConnection.call('invite', ..., '[email protected]') the gateway sends a SIP INVITE request to the AOR sip:[email protected]. When a
call is successful, the application can use the NetStream named 'local' and 'remote' to send and receive audio/video with the
remote user. In this approach a multi-party call is implemented entirely in the application by having two different NetConnection
objects in two different calls, or by making a call to a separate multi-point conference server. Additional commands and
indications are used to represent text messages and presence information. Alternatively, a SharedObject named 'contacts' could
represent the local user's contact list with presence information accessible from the Flash application. Since the SharedObject
is scoped to the NetConnection's URL, it represents that particular user's contact list.
In the third approach, the Flash application connects a NetConnection to a conference URL of the form 'rtmp://server/sip/abc1'.
In this case the conference is identified by name 'abc1'. Each connection to this URL creates a new conference leg from an
RTMP user. Then the application uses NetConnection RPC commands such as 'invite', 'accept' and indications such as 'invited',
'accepted', to inform the gateway to change the membership of the conference, either by inviting a new user or by accepting an
incoming invitation. The gateway can be distributed such that the conference context is maintained in a gateway-farm. The
membership information can be stored using a SharedObject accessible from the Flash application. One major advantage of this
approach is that it maps the URL to a conference context and supports built-in multi-party conferencing. Whenever a new participant
joins the conference, the gateway informs the application about the stream name for that participant. The application opens
a new NetStream for that stream name to play, and receives media from that participant on that stream. There is at most one
published stream in a NetConnection, which represents the local participant's media.
The third approach seems most logical and complete, however requires implementation of a distributed conference state in the
gateway farm, and multi-party conference server logic. We do not want to mix media going to the Flash application, because
not all media (e.g., video) can be mixed and audio mixing incurs additional CPU load on the server. For example, a typical
mixer employs a decode-add-encode cycle. However, existing SIP clients do not usually handle multiple media streams well.
Hence the conference server logic becomes more complex where it mixes some audio going to SIP user agents, and does not mix
audio going to the RTMP clients. Secondly, maintaining a consistent conference membership information among the distributed
gateway farm is a challenge which requires implementing various XCON extensions to server. Thirdly, a centralized conference model
doesn't mend well with a P2P-SIP network. More details about centralized, distributed and P2P-SIP conferencing can be found
in the work of http://kundansingh.com. Because of all these issues I have decided to implement the second approach instead.
The second approach is described in much detail next.
Design description
------------------
This module defines two classes: Gateway and Context. The Gateway class extends the rtmp.App class to implement the SIP-RTMP
gateway application in the RTMP server. The Context class implements the translator context for each user or connection from
the RTMP side. The main routine is similar to that in rtmp.py, in that it launches the server additionally with the "sip" gateway
application service.
Since there is a one-to-one mapping between a RTMP connection and a SIP user, a single Context behaves as a single line SIP
user agent, which can be in at most one SIP registration and at most one SIP call state at any time. I think implementing
multiple lines can be easily done in the Flash application by creating additional connections to the server.
The Gateway class overrides these methods of the App class: onConnect causes a SIP registration, onDisconnect causes a SIP
unregistration, onCommand invokes various commands such as 'invite', 'bye', 'accept', 'reject' from the RTMP side to the
SIP side, onPublish and onClose update the published stream information, onPlay and onStop update the played stream information
and onPublishData handle the media data from RTMP to SIP side. A new context is created in onConnect and destroyed in
onDisconnect. The Client (RTMP) as well as User (SIP) objects store a reference to the context. I use the SIP stack from the
p2p-sip (39 Peers) project at http://39peers.net.
The Context class maintains a mapping between RTMP client and SIP user (single line phone). It also maintains state regarding
the media sesion, incoming and outgoing pending call, and published and played streams. One unique feature of the translator
is that it tries to re-use the same port for the given SIP URL when registering with the SIP server. This way we avoid
registering multiple contacts in the SIP server for the same SIP URL.
As you will see in the later section, a connection from the RTMP client supplies a SIP URL of the registering user. The context
maps this request to a SIP REGISTER request using the local contact address for that SIP URL. This allows the gateway to
receive incoming SIP calls for this SIP URL. When the RTMP client invokes commands such as "invite", they get mapped to the
SIP side using the methods defined on the User class. Similarly, when the User class invokes callback, they get mapped to the
RTMP callbacks such as "invited".
The RTMP client MUST create at most one published NetStream and at most one played NetStream for the given connection.
The published stream supplies the client's audio and video to the context. The context maps this audio and video data to the
appropriate SIP side using the RTP module available in the SIP stack. Similarly the audio and video data from the SIP side
coming in RTP are mapped to the audio and video data given to the played stream to the RTMP client.
Interoperability with SIP/SDP/RTP
---------------------------------
The Flash application must be version 10 or higher so that it can support Speex audio codec. We can only interoperate with
SIP user agents that support Speex/16000 or Speex/8000. The reason is that Flash Player supports only limited set of codecs for
audio captured from Microphone. Flash Player 9 and earlier supported only proprietary NellyMoser codec, which are not understood
or supported beyond Flash platform. Flash Player 10 incorporated Speex audio codec which is an open source and open specification,
and are available in several SIP applications such as X-Lite. The support of Speex audio codec is not as widely available in PSTN
gateways though. Note that we support wideband (16000 Hz) and narrowband (8000 Hz) variant of Speex audio codec. The selection
can be done from Flash application during NetConnection.connect.
This section describes other interoperability issues with a SIP or Flash client. When the client issues an outbound
"invite" request, the mapped SIP INVITE advertises the session using SDP module of the SIP stack. This session contains
media stream offer for both audio and video. The audio stream has only Speex/16000 format whereas the video stream has RTMP
specific proprietary x-flv format (more about this later). An example SDP offer is shown below:
v=0
o=- 1247290948 1247290948 IN IP4 Macintosh-2.local
s=-
c=IN IP4 192.168.1.3
t=0 0
m=audio 22700 RTP/AVP 96
a=rtpmap:96 speex/16000
m=video 26498 RTP/AVP 97
a=rtpmap:97 x-flv/90000
If the response contains a both valid audio and video answer streams, then we assume that the remote side is also our own Flash
application, as it can support the proprietary x-flv video format. If the answer contains port=0 for video stream, that means
the remote party does not support our proprietary video format, then we assume that the remote side is standard SIP user agent.
Similar SDP negotiation happens for incoming call. In particular, if incoming SDP offer does not have speex audio codec, then
we disable the audio stream. Similarly if the incoming SDP offer does not have a x-flv video codec, then we disable the video
stream.
One caveat in the implementation is that the media matching is done when the Flash application accepts the incoming call. Thus,
it is possible that for an incoming call, the Flash application gets alerted even when there is no matching media session.
And when the Flash application tries it accept the incoming call, the gateway performs media matching and rejects the incoming
SIP call, and informs the Flash application that call got disconnected. I need to fix this by doing media matching as soon as
incoming SIP invitation is received.
If the remote party does not support x-flv video but supports speex/16000 audio, then we only send audio data from RTMP to
SIP side. Similarly, only audio data will be mapped from SIP to RTMP side, hence the Flash application will not see remote
party's video. Standard RTP and RTCP formatting is used for sending/receiving data to/from the SIP side. The timestamp
of RTP is deirived from the RTMP message's time stamp property. In particular, RTMP message uses 'millisecond' unit where as
RTP header uses 'clock rate' unit. Since we support only 16000 Hz clock rate, each millisecond unit is equivalent to
16 clock rate unit, and each speex frame of typically 20 ms is equivalent to 320 clock rate.
If the remote party supports x-flv, then we disable the speex/16000 audio. Even though the remote side is SIP, we assume that
it is backed by a Flash application with a similar gateway as this. Since x-flv format includes both audio and video, we
do not need another audio only stream in the session. Next I describe the x-flv format.
The x-flv video format is basically a modification of the RTMP media messages, so that it works with RTP. It includes interleaved
audio and video packets. One problem with RTMP media message is that there is no sequence number which makes it hard to detect
and correct packet losses over RTP/UDP transport. Another problem is that video packet size can be huge, which causes problem
with UDP transport -- certain NATs may drop large packets. For these reasons, the RTMP media message is broken down into smaller
chunks such that each chunk can be sent in a single RTP message.
The timestamp of RTP is derived from the RTMP message's time stamp property. The payload type reflect 'x-flv/90000' media type
as negotiated in SDP. In particular for outgoing call, it will use payload type of 97 and for incoming call, it will use the
payload type that was advertised by the remote party's SDP. If remote party is also using our gateway, then it will be 97.
The sequence number, SSRC and other fields in the RTMP message are taken care by the RTP module of the SIP stack and are
independent of the RTMP side, as long as the sequence number keeps incrementing for each RTP packet sent, and SSRC is
randomly generated for the session and remains constant in the session.
The RTP paylaod is constructed as follows. First the RTMP message is constructed in its entirety. The Message object in rtmp
module has type, size and time properties. These are added in that order using big endian 32-bit number each as the header,
followed by the data part of the message. Note that the data part of the media message actually has one byte type information
containing codec type (e.g., 0xb2 for speex/16000), but we treat the whole data part including the type together to simplify
the translation. Thus the assembled media message looks as follows:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
type | RTMP message type |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
size | RTMP message body size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
time | RTMP message time stamp |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
body | RTMP message body ... |
time | The size of this body is |
time | in the second field above |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Now the assembled media message is broken down in smaller chunks such that each chunk has at most 1000 bytes. Typically for
audio media message the size is smaller than that already hence it generates only one chunk. On the other hand a large video
media message may generate several chunks. Each chunk is treated as opaque data for the rest of the formatting. Thus, the
receiving side must re-assemble the full message as described above from the received chunks before acting of the message.
Note that if a message is split into chunks, all the chunks must be received before the whole message can be constructed.
Even if a single chunk is missing due to packet loss, the whole message needs to be discarded. The chunks idea is part of
the RTMP specification itself, however is not useful as it is, because of lack of proper sequence numbering to detect packet
losses. Hence this chunk algorithm is different than what RTMP specification uses.
Each chunk is prepended with a chunk header to form the complete RTP payload. Each chunk header starts with four bytes of
magic word 'RTMP' which is actually a big-endian 32-bit number 0x52544d50. This magic word allows detecting corrupted or
incorrect x-flv payload type. There are two sequence numbers: the message sequence number (seq) and chunk number (cseq).
Each assembed message as described before gets a unique auto-incremented message sequence number. If a message is broken
into 5 chunks, say, then the chunk will get chunk numbers as 0, 1, 2, 3, 4 in that order. Thus the first chunk of a message
always as chunk number of 0. In the chunk header, next 32-bits contain the big-endian message sequence number. Note that
this sequence number is different than the RTP sequence number, because the RTP sequence number is based on the lower layer's
actual message sent count, whereas this message sequence number is based on RTMP's message count. This is followed by a
big-endian 16-bit chunk number. Next 16-bit field is an optional size of the assembled message and is present if-and-only-if
the chunk number is 0, i.e., this is the first chunk of the message. This field is not present for subsequent chunks of the
message. This field is useful to know the full size of the assembled message, so that a receiver can know when to finish
the chunks and re-assemble the full message. I could have used the body size present in the full message, but that looked
more complicated to me in parsing on the receiver, hence I added this optional field. The complete chunk is shown below.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
magic | magic word 'RTMP' 0x52544d50 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
seq | message sequence number (seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
rest | chunk number (cseq) | (optional) message size |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
body | chunk data ... |
time | lower layer (UDP) provides size information |
time | of the full packet |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The sender is expected to send all the messages in the correct sequence number order, and all the chunks of the message
again in the correct chunk number order.
The receiver processing is described below. First the payload type is matched to identify it as x-flv packet as per the
SDP negotiation. The other fields such as timestamp can be ignored because they appear in the actual assembed message anyway.
The payload of the RTP packet is parsed using the chunk format described above. The receiver verifies the magic word of 'RTMP'
and if failed it ignores the packet. The message sequence number is extracted as seq. If the seq is 0, then message size is
extracted. Remaining data is assumed to be chunk data. The receiver maintains the last seq received so far, and also all the
chunk data in the last seq received so far. The receiver may maintain more than one seq data, if it wants to handle out-of-
order packets. For each received packet, the receiver checks if all the chunks are received or not? if the total size of
all the chunk data received so far becomes equal to the message size found in the first chunk, then we have received all the
chunks. When all the chunks are received, all the chunk data are concatenated in the order of chunk number, to for the
complete assembled message. This message is than used to contruct the rtmp module's Message object by extracted the type,
body size, time stamp and body data as mentioned before. Note that the receiver may detect lost chunks if there is a missing
chunk number and may discard all the chunks in this message seq. The receiver may also detect missing first chunk if the
the new seq number is more than the last seq but the chunk number is not 0. In this case it may discard all future chunks
in this message seq.
Once a message is assembled, it is given to the RTMP side using the played NetStream.
Client API in ActionScript
--------------------------
This section described the client side API needed to use this SIP-RTMP gateway service. The first step for the client is to
create a connection to the gateway. It is assumed that the client is written in ActionScript with appropriate Flex SDK that
supports Flash Player 10 or layer features such as Speex audio codec. Note also that since the rtmp.py module currently supports
only AMF0, the client must specify this as the object encoding format. First the client creates a new NetConnection as follows:
var nc:NetConnection = new NetConnection();
nc.objectEncoding = ObjectEncoding.AMF0;
Then to receive various callbacks such as "invited", and to receive various events such as "NetConnection.Connect.Success" it
installs the listeners as follows. This assumes that the callbacks will be invoked on the current (this) object.
nc.client = this;
nc.addEventListener(NetStatusEvent.NET_STATUS, netStatusHandler);
nc.addEventListener(SecurityErrorEvent.SECURITY_ERROR, errorHandler);
nc.addEventListener(IOErrorEvent.IO_ERROR, errorHandler);
Finally to establish the connection, it invokes the 'connect' method on the NetConnection using a RTMP URL pointing to this
gateway service. In particular if the gateway is running on your local host, then use "rtmp://localhost/sip/...". If the
gateway is running on the "server" host, then use "rtmp://server/sip/...". The connection must also be scoped to the given
SIP user's address. For exacmple if the client's SIP user name is "[email protected]" then the URL will be
"rtmp://server/sip/[email protected]".
nc.connect('rtmp://server/sip/[email protected]', 'alice', 'mypass', 'Alice Smith');
For your testing purpose, if you are running the SIP server such as sipd.py locally, and your local IP address is
'192.168.1.3' then the URL to connect becomes "rtmp://localhost/sip/[email protected]". The connect method takes additional
arguments for authentication and registration: authentication name, authentication password, and display name. Note that
you must supply authentication name, authentication password and display name to perform SIP registration even if there is no
authentication requested by your SIP server. However, you must set authentication password to empty string '' if you do not
want to do SIP registration, and just want to make outbound SIP calls (assuming that your SIP server allows outbound calls without
SIP registration).
nc.connect('rtmp://localhost/sip/[email protected]', 'alice', 'mypass', 'Alice Smith');
Internally, a call such as one mentioned before causes the gateway to send SIP registration for local SIP URL of the form
"Alice Smith" <sip:[email protected]> and authenticate if needed using username 'alice' and password 'mypass'. The netStatus
event with code 'NetConnection.Connect.Success' is dispatched when connection and registration are successful, and with code
'NetConnection.Connect.Rejected' or 'NetConnection.Connect.Failed' if the connection or registration failed for some reason.
Typically a registration or authentication fail results in rejected message whereas a RTMP connection failure due to incorrect
server name results in failed message. The client will typically treat both message as same. Once the NetConnection is connected
the client is in connected state and can make or receive calls via the gateway.
For a call, the Flash application needs to set up its devices correctly. I recommend the following set up. In particular, you
should set the Microphone codec to use Speex audio codec, and Camera to operate in reasonable dimension and frame rate. Note that
this requires Flash Player 10 if your want to run the code, and associated Flex SDK if you want to compile your code.
var mic:Microphone = Microphone.getMicrophone(-1); // little known fact that -1 gives default microphone.
mic.setUseEchoSuppression(true);
mic.setLoopback(false);
mic.setSilenceLevel(0);
mic.codec = 'Speex';
mic.gain = 80;
var cam:Camera = Camera.getCamera(); // on Mac OS, use Flash Player settings to set default camera
cam.setLoopback(false); // so that local video is not compressed in the view
cam.setMode(320, 240, 12); // tune this based on your needs
cam.setQuality(0, 70); // tune this based on your needs
localVideo.attachCamera(cam);
To place an outbound call, the client invokes the RPC method "invite" on the NetConnection and supplies the remote party's SIP
address. This SIP address must be a fully qualified SIP URL or SIP address, which includes optional display name. Examples are
"Bob Jones" <sip:[email protected]> and sip:[email protected].
nc.call('invite', null, '"Bob Jones" <sip:[email protected]>');
If you registered using "Alice Smith" <sip:[email protected]> from another browser instance, then you can use that URL in
the "invite" method to call that user. Note however that calling a user on a NetConnection who was registered using the same
instance of the NetConnection may result in unexpected behavior, as this means you are using your phone to call your own
number in a single like SIP user agent. The expected behavior is that you will receive 'busy' response in this case.
nc.call('invite', null, 'sip:[email protected]');
An incoming call is indicated using a callback method "invited" on the NetConnection.client property. The remote party's
SIP address and your SIP address are both supplied as arguments, along with a unique invitation identifier. The invitation
identifier is useful for multiple call case, if you received multiple incoming calls and want to respond differently to
them.
public function invited(yourName:String, myName:String):void { ... }
The client should display some kind of alert to the user on incoming call. If the user accepts the call, the client invokes
the "accept" RPC method to accept the incoming invitation using the same invitation identifier that was received in "invited".
nc.call('accept', null);
If the user wants to reject an incoming call, the client invokes the "reject" RPC method and also supplies the original
invitation identifier and an optional reason for rejecting the call. The reason to reject is of the format "code text" where
code is a three digit reject code such as 486 for busy, and 603 for decline. The text is a human readable text phrase
indicating the reason for rejection. The numeric code is optional, if not supplied, then the gateway uses a pre-configured
reject code of 603.
nc.call('reject', null, '486 Busy Here');
nc.call('reject', null); // uses "603 Decline" as default
Once a call is established, either an outbound or inbound, the client will need to create two streams to exchange audio and
video with the remote party. The "local" stream is used to publish the local audio and video, and the "remote" stream is used to
play the remote's audio and video. As mentioned earlier the current implementation allows only two streams in the NetConnection,
one in each direction. If the client opens more than one published stream or more than one played stream, then the gateway will
only use the latest stream and ignore the previous one. Once the Camera and Microphone are attached to the local stream and
the stream is published, the gateway starts getting audio video data from local user and sends them to the remote party. Once
the remote stream is attached to a Video display object and is played, the gateway streams remote party's audio and video
data to this client, and the video gets displayed in the Video object and the audio gets played out by the Flash Player.
var local:NetStream = new NetStream(nc), remote:NetStream = new NetStream(nc);
local.attachAudio(mic);
local.attachCamera(cam);
local.publish('local');
remote.play('remote');
remoteVideo.attachStream(remote);
The client may terminate an active call or a pending outbound call using the "bye" RPC method as follows.
nc.call('bye');
Note that the client must also close the two streams when the call is terminated either by local or remote user.
local.close();
remote.close();
The gateway invokes several callbacks on the client besides the "invited" callback which was discussed earlier.
In particular the "byed" callback indicates that the remote party terminated an active call, "accepted"
callback indicates that the remote party accepted our call invitation,"rejected" callback indicates that the remote party
rejected our call invitation, and "cancelled' callback indicates that the remote party cancelled its call invitation to
us. The "rejected" and "cancelled" callbacks take some arguments. These functions must be defined in the client to handle
the approrpiate events.
public function accepted():void { ... }
public function rejected(reason:String):void { ... }
public function cancelled(frm:String, to:String):void { ... }
public function byed():void { ... }
If the user wants to make a SIP call to a phone number, he can use the standard SIP URL typically supported by the phone
providers. For example, if the user has an account for save 'phoneprovider.com' VoIP provider with user name of
'12125551234' and password of '5678', and want to make call to another number 18001234567, the client can do the following.
nc.connect("rtmp://server/sip/[email protected]", "12125551234", "5678")
nc.call("invite", null, "sip:[email protected]");
If your VoIP provider does not require a SIP registration to make outbound calls, you will need to supply the authentication
credentials in the "invite" call. TODO: this is for future work.
nc.connect("rtmp://server/sip/[email protected]", "alice", "", "Alice Smith");
nc.call("invite", null, "sip:[email protected]", "alice", "mypass");
If you want to use the default preconfigured VoIP provider of the gateway service, you can use the "tel:" URL to make a call.
TODO: this is for future work.
nc.call('invite', null, 'tel:12125551234');
If you want to remain anonymous in your outbound call, the recommended way is to use the SIP address of <sip:anonymous@invalid>
If you supply your password as "" then no SIP registration will be done.
nc.connect("rtmp://server/sip/anonymous@invalid", "anonymous", "", "Anonymous User");
To use a secure connection replace sip with sips and rtmp with rtmps. TODO: this is for future work.
In particuar, a rtmps URL uses secure TLS connection from Flash Player to the gateway server and sips URL uses secure TLS
hop-by-hop connection from gateway server to your SIP destination. A NetConnection that uses sips will only be able to receive
secure connections from remote party. Thus, the application may need two netconnections to support both secure and regular
SIP signaling. Note that the URL in connect method is "rtmps://.../sips/...".
nc.connect('rtmps://server/sips/...',...);
nc.call("invite", null, "sips:[email protected]");
Note, however, that security using this method is not end-to-end secure even for media. In particular, the gateway server has
access to your media stream. You should use both rtmps and sips together. If your gateway server is running on local host, you
may not need rtmps though. Note also that signaling security does not guarantee media encryption and privacy. My implementation
will make sure that SRTP is required when using sips.
In an active call, you can send DTMF digits using RFC 2833. The following example sends digit "5" in the RTP session of the
active call using RFC 2833 (touch-tones).
nc.call("sendDTMF", null, "5");
The digits are sent only if the remote end acknowledged support for telephone-event in SDP of session initiation. Only single
digit can be sent using sendDTMF using rfc2833.py module, and does not use redundancy rfc2198.py payload.
Limitations
-----------
1. The URI schemes 'sips' and 'rtmps' are not yet implemented.
2. Audio interoperability requires that the SIP user agent support Speex codec, and that the Flash Player is version 10 or later.
The older version of Flash Player included a proprietary Nellymoser codec which is not interoperable with other SIP phones.
3. Video communication is transported using a proprietary packetization format, and will work only between two Flash clients
connected via a gateway following the packetization protocol defined in this file.
4. Multi-party conferencing is not implemented. If at all, the logic should be implemented in the application or external
third-party conference server in this design approach.
5. NAT/firewall traversal is not implemented. Thus, the gateway should run in public Internet, a third-party solution such as
RTP proxy be used to connect to SIP side, the PSTN gateway should be in public Internet and the Flash client network should
allow outbound RTMP traffic to the gateway. In future I will add support for STUN and TURN in the gateway so that it can be
run behind NAT or on user's local computer, and can be used to connect to SIP clients behind NAT.
An example SIP user agent component is available in the videoPhone directory. To build use Flex builder or mxmlc compiler. A
pre-compiled SWF is included in that project directory's bin-release sub-directory for you to try out the user agent.
Major Updates
-------------
Support for transcoding between Flash Player's speex and SIP side's PCMU and PCMA using external audiospeex module.
If the audiospeex module is found in PYTHONPATH then it is automatically used, and session negotiation includes new
codecs of pcmu/8000 and pcma/8000 along with speex/8000 and speex/16000. Please see the project web site for details on
how to build/compile this audiospeex module.
Support for G.711 PCMU and PCMA, and H.264 from Flash Player 11. The NetConnection.connect API is modified to ignore the
rate parameter if codecs are supplied in "invite" or "accept" calls. The "invite" and "accept" calls can now have a list of
supported codecs with one or more of the following values.
codec Flash Player sends and receives SDP contains
wideband speex wideband. speex/16000
narrowband send speex wideband but receive speex narrowband. speex/8000
pcmu pcmu at 8kHz and 20ms pcmu/8000
pcmu pcma at 8kHz and 20ms pcma/8000
ulaw speex pcmu/8000 via transcoding
alaw speex pcma/8000 via transcoding
dtmf sendDTMF, ignore on receive telephone-event/8000
h264 H264Avc (Baseline or Main) h264/90000
flv RTMP message x-flv/90000
The list supplied in "invite" or "accept" is used in decreasing order of preference.
For backward compatibility, if no list of codecs are supplied, it uses the default speex/16000 (or speex/8000) and
x-flv/90000. The media part is moved to a separate MediaContext class which is reused by both multitask and gevent version.
For example, following is the default for backward compatibility
nc.call('invite', null, '"Bob Jones" <sip:[email protected]>', 'wideband', 'dtmf', 'flv');
To support all codecs but prefer speex wideband, use
nc.call(...., 'wideband', 'narrowband', 'pcmu', 'pcma', 'ulaw', 'alaw', 'dtmf', 'h264', 'flv')
If the application sends 'pcmu', 'pcma' or 'h264' it must be using Flash Player 11 or later that supports these codecs.
If the application sends 'ulaw' or 'alaw' but siprtmp cannot find the audiospeex module, it will continue the call with
speex in SDP. As mentioned above, if the application doesn't supply any codec, the default setting is used. If the application
supplies some codecs, others are not used. For example, if only 'wideband' is supplied and no video, it will not use
x-flv/90000 in SDP.
Additionally, the "accepted" callback can have two additional arguments for preferred audio and video codecs after the
media negotiation is complete. Note that "accepted" is invoked for both incoming and outgoing calls.
public function accepted(audioCodec:String=null, videoCodec:String=null) { ... }
The audio and video codecs have the meaning as described below, and the Flash application should listen for the callback
to change the encoding for microphone and camera as needed before starting to publish. For backward compatibility with
existing Flash applications, if a list of codecs was not supplied in "invite" or "accept" then the additional arguments
are not supplied in "accepted" callback.
The "audioCodec" argument can be "speex", "pcmu", "pcma" or "default". If it is default, the application can use either
speex or nellymoser but not pcmu or pcma. The "videoCodec" argument can be "h264" or "default". If it is default, the
application can use any codec. The capability negotiation tries to honor the preference order of the codecs by comparing the
codecs supported by the Flash application and the remote SIP endpoint. If the remote side is capable of "flv" and the
Flash side specified "flv" then both audioCodec and videoCodec will be set to "default". If the remote side is not capable
of any of advertised video codecs, then the videoCodec will be set to null, to tell Flash application to stop video publish.
If the remote side is not capable of any of the advertised audio codecs but the call was accepted, then the audioCodec will
be set to null, to tell Flash application to stop audio publish. If the Flash application advertised "wideband", "narrowband",
"ulaw", "alaw", "pcmu", "pcma" but the remote side is capable of only "pcmu" then audioCodec will be set to "speex" requiring
transcoding to "pcmu". Hence, the application should always put "ulaw" or "alaw" after "pcmu" or "pcma" if available.
For audio codecs the first advertised codec which is supported by the target is used. Thus the Flash application has more
control of which audio codec will be used in case of multiple choices.
The recommended codec list for Flash Player 11+ is "wideband", "narrowband", "pcmu", "pcma", "ulaw", "alaw", "dtmf", "h264", "flv"
The recommended codec list for Flash Player 10 is "wideband", "narrowband", "ulaw", "alaw", "dtmf", "flv".
'''
from __future__ import with_statement
import os, sys, socket, time, traceback, random, multitask
from struct import pack, unpack
from rtmp import App, Header, Message, FlashServer
from amf import AMF0
try:
from app.voip import User, Session, MediaSession
from std.rfc3550 import RTP, Network as RTPNetwork
from std.rfc2396 import Address
from std.rfc4566 import SDP, attrs as format
from std.rfc2833 import DTMF
from std.rfc3261 import Header as SIPHeader
from std.kutil import setlocaladdr, getlocaladdr
except:
print 'Please include p2p-sip src directory in your PYTHONPATH'
sys.exit(1)
try: import audiospeex, audioop
except: audiospeex = None
_debug = _debugAll = False # debug for regular trace and debugAll for media related traces
class Context(object):
'''Context stores state needed for gateway. The client.context property holds an instance of this class. The methods invoked
by RTMP side are prefixed with rtmp_ and those invoked by SIP side are prefixed sip_. All such methods are actually generators.
'''
def __init__(self, app, client):
self.app, self.client = app, client
self.user = self.session = self.outgoing = self.incoming = None # SIP User and session for this connection
self.publish_stream = self.play_stream = self.media = self._preferred = None # streams on RTMP side, media context and preferred rate.
self._gin = self._gss = None # generators that needs to be closed on unregister
if not hasattr(self.app, '_ports'): self.app._ports = {} # used to persist SIP port wrt registering URI. map: uri=>port
def rtmp_register(self, login=None, passwd='', display=None, rate="wideband"):
global agent
scheme, ignore, aor = self.client.path.partition('/')
self._preferred = rate
if _debug: print 'rtmp-register scheme=', scheme, 'aor=', aor, 'login=', login, 'passwd=', '*'*(len(passwd) if passwd else 0), 'display=', display
addr = '"%s" <sip:%s>'%(display, aor) if display else 'sip:%s'%(aor)
sock = socket.socket(type=socket.SOCK_DGRAM) # signaling socket for SIP
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = self.app._ports.get(aor, 0)
try: sock.bind((agent.int_ip, port)); port = sock.getsockname()[1]
except:
if _debug: print ' exception in register', (sys and sys.exc_info() or None)
yield self.client.rejectConnection(reason='Cannot bind socket port')
raise StopIteration(None)
#self.ports[name] = sock.getsockname()[1] # store the port number
# TODO: storing and keeping the persistent port per user doesn't work well if the app is re-loaded in brief interval.
try:
user = self.user = User(sock, nat=False).start() # create SIP user. Ownership of sock is moved to User.
user.context, user.username, user.password = self, login, passwd
if user.password:
if _debug: print ' registering addr=', addr, 'port=', port
result, reason = yield user.bind(addr, refresh=True)
if _debug: print ' registration returned', result, reason
if result == 'failed':
yield self.client.rejectConnection(reason=reason)
raise StopIteration(None)
self._gin = self._incominghandler(); multitask.add(self._gin) # incoming SIP messages handler
else: user.address = Address(addr)
if _debug: print ' register successful', self.user.address
yield self.client.accept()
except:
if _debug: print ' exception in register', (sys and sys.exc_info() or None)
yield self.client.rejectConnection(reason=sys and str(sys.exc_info()[1]) or 'Server Error')
raise StopIteration(None)
def rtmp_unregister(self):
try:
if self.user is not None:
if _debug: print 'rtmp-unregister', (self.client and self.client.path or None)
yield self._cleanup() # close the call first, if any
yield self.user.close()
yield self.user.stop()
if self.user.sock:
try: self.user.sock.close()
except: pass
self.user.sock = None
self.user.context = None; self.user = None
if self._gin is not None: self._gin.close(); self._gin = None
if self._gss is not None: self._gss.close(); self._gss = None
if self.media:
self.media.close(); self.media = None
except:
if _debug: print ' exception in unregister', (sys and sys.exc_info() or None)
def rtmp_invite(self, dest, *args):
global agent
try:
if _debug: print 'rtmp-invite %r %r'%(dest, args)
if self.user: # already a registered user exists
if not self.session: # not already in a session, so create one
try: dest = Address(dest) # first try the default scheme supplied by application
except: dest = Address(self.user.address.uri.scheme + ':' + dest) # otherwise scheme is picked from registered URI
if _debug: print ' create media context'
media = MediaContext(self, None, agent.int_ip, self._preferred, RTPNetwork, *args) # create a media context for the call
self.outgoing = self.user.connect(dest, sdp=media.session.mysdp, provisional=True)
try:
session, reason = yield self.outgoing
if _debug: print ' session=', session, 'reason=', reason
while reason is not None and reason.partition(" ")[0] in ('180', '183'):
yield self.client.call('ringing', reason)
self.outgoing = self.user.continueConnect(session, provisional=True)
session, reason = yield self.outgoing
except:
media.close()
if self.outgoing is not None: raise
else: raise StopIteration(None) # else call was cancelled in another task
self.outgoing = None # because the generator returned, and no more pending outgoing call
if session: # call connected
self.media, self.session, session.media = media, session, media.session
self.media.session.setRemote(session.yoursdp)
self._gss = self._sessionhandler(); multitask.add(self._gss) # receive more requests from SIP
codecs = self.media.accepting();
if _debug: print 'sip-accepted %r'%(codecs,)
yield self.client.call('accepted', *codecs)
else: # connection failed, close media socket
media.close()
yield self.client.call('rejected', reason)
else: yield self.client.call('rejected', 'Already in an active or pending call')
else: yield self.client.call('rejected', 'Registration required before making a call')
except:
if _debug: print ' exception in invite', (sys and sys.exc_info() or None)
if _debug: traceback.print_exc()
yield self.client.call('rejected', 'Internal server error')
def rtmp_accept(self, *args):
global agent
if _debug: print 'rtmp-accept %r'%(args,)
incoming = self.incoming; self.incoming = reason = media = None # clear self.incoming, and store value in incoming
try:
if self.user is not None and incoming is not None:
self.media = MediaContext(self, incoming[1].request, agent.int_ip, self._preferred, RTPNetwork, *args) # create a media context for the call
if self.media.session.mysdp is None:
reason = '488 Incompatible SDP'
else:
session, reason = yield self.user.accept(incoming, sdp=self.media.session.mysdp)
if session: # call connected
self.session, session.media = session, self.media.session
self._gss = self._sessionhandler(); multitask.add(self._gss) # receive more requests from SIP
codecs = self.media.accepting();
if _debug: print 'sip-accepted %r'%(codecs,)
yield self.client.call('accepted', *codecs)
else:
if not reason: reason = '500 Internal Server Error in Accepting'
else:
if _debug: print ' no incoming call. ignored.'
except:
if _debug: print ' exception in rtmp_accept', (sys and sys.exc_info())
reason = '500 Internat Server Exception'
if reason:
if self.media:
self.media.close(); self.media = None
if self.user: yield self.user.reject(incoming, reason) # TODO: a better way would be to reject in _incominghandler
if self.client: yield self.client.call('byed')
def rtmp_reject(self, reason='Decline'):
try:
if _debug: print 'rtmp-reject'
if self.user is not None and self.incoming is not None:
yield self.user.reject(self.incoming, reason)
self.incoming = None # no more pending incoming call
elif _debug: print ' no incoming call. ignored'
except:
if _debug: print ' exception in reject', (sys and sys.exc_info() or None)
def rtmp_bye(self):
try:
if _debug: print 'rtmp-bye'
if self.session is None and self.outgoing is not None: # pending outgoing invite
if _debug: print ' cancel outbound invite'
self.outgoing.close()
self.outgoing = None
elif self.session:
yield self._cleanup()
except:
if _debug: print ' exception in bye', (sys and sys.exc_info() or None)
def sip_invite(self, dest):
try:
if _debug: print 'sip-invite'
yield self.client.call('invited', str(dest), str(self.user.address))
except:
if _debug: print ' exception in sip_invite', (sys and sys.exc_info() or None)
yield
def sip_cancel(self, dest):
try:
if _debug: print 'sip-cancel'
yield self.client.call('cancelled', str(dest), str(self.user.address))
except:
if _debug: print ' exception in sip_cancel', (sys and sys.exc_info() or None)
yield
def sip_bye(self):
try:
if _debug: print 'sip-bye'
yield self.client.call('byed')
except:
if _debug: print ' exception in sip_bye', (sys and sys.exc_info() or None)
yield
def sip_hold(self, value):
try:
if _debug: print 'sip-hold', value
yield self.client.call('holded', value)
except:
if _debug: print ' exception in sip_hold', (sys and sys.exc_info() or None)
yield
def _incominghandler(self): # Handle incoming SIP messages
try:
user = self.user
while True:
cmd, arg = (yield user.recv())
if _debug: print 'incominghandler', cmd
if cmd == 'connect': # incoming invitation, inform RTMP side
self.incoming = arg
multitask.add(self.sip_invite(str(Address(arg[0]))))
elif cmd == 'close': # incoming call cancelled
self.incoming = None
multitask.add(self.sip_cancel(str(Address(arg[0]))))
except StopIteration: raise
except:
if _debug: print 'incominghandler exiting', (sys and sys.exc_info() or None)
self._gin = None
def _sessionhandler(self): # Handle SIP session messages
try:
session = self.session
while True:
cmd, arg = (yield session.recv())
if cmd == 'close': multitask.add(self.sip_bye()); break # exit from session handler
if cmd == 'change': # new SDP received from SIP side
is_hold = bool(arg and arg['c'] and arg['c'].address == '0.0.0.0')
multitask.add(self.sip_hold(is_hold))
yield self._cleanup()
except GeneratorExit: pass
except:
if _debug: print 'exception in sessionhandler', (sys and sys.exc_info() or None)
self._gss = None
if _debug: print 'sessionhandler exiting'
def _cleanup(self): # cleanup a session
if self.session:
yield self.session.close() # close the session
self.session = None
if self.media:
self.media.close()
self.media = None
if self._gss is not None: self._gss.close(); self._gss = None
def received(self, media, fmt, packet): # an RTP packet is received. Hand over to sip_data.
if fmt is not None:
multitask.add(self.sip_data(fmt, packet))
def sip_data(self, fmt, data): # handle media stream received from SIP
try:
p = RTP(data) if not isinstance(data, RTP) else data
if _debugAll: print ' <-s pt=%r seq=%r ts=%r ssrc=%r marker=%r len=%d'%(p.pt, p.seq, p.ts, p.ssrc, p.marker, len(p.payload))
if self.media:
messages = self.media.rtp2rtmp(fmt, p)
if self.play_stream and messages:
for message in messages:
if _debugAll: print 'f<- type=%r len=%r time=%r codec=0x%02x'%(message.type, message.size, message.time, message.data and ord(message.data[0]) or -1)
yield self.play_stream.send(message)
except (ValueError, AttributeError), E:
if _debug: print ' exception in sip_data', E; traceback.print_exc()
yield
def rtmp_data(self, stream, message): # handle media data message received from RTMP
try:
if _debugAll: print 'f-> type=%x len=%d codec=0x%02x'%(message.header.type, message.size, message.data and ord(message.data[0]) or -1)
if self.media:
messages = self.media.rtmp2rtp(stream, message)
if self.session and self.media.session and messages:
for payload, ts, marker, fmt in messages:
if _debugAll: print ' ->s fmt=%r %r/%r ts=%r marker=%r len=%d'%(fmt.pt, fmt.name, fmt.rate, ts, marker, len(payload))
self.media.session.send(payload=payload, ts=ts, marker=marker, fmt=fmt)
except:
if _debug: print ' exception in rtmp_data'; traceback.print_exc()
yield
def rtmp_sendDTMF(self, digit):
try:
if _debug: print 'rtmp-sendDTMF', digit
if self.media:
messages = self.media.dtmf2rtp(digit)
if self.session and self.media.session and messages is not None:
for payload, ts, marker, fmt in messages:
self.media.session.send(payload=payload, ts=ts, marker=marker, fmt=fmt)
except:
if _debug: print ' exception in rtmp_sendDTMF'; traceback.print_exc()
yield
def rtmp_hold(self, value):
try:
if _debug: print 'rtmp-hold', value
self.session.hold(value)
except:
if _debug: print ' exception in rtmp_hold'; traceback.print_exc()
traceback.print_exc()
yield
def requestFIR(self):
# TODO: this should be sent if we received INFO for FIR from remote.
if self.session and self.session.ua:
ua = self.session.ua
m = ua.createRequest('INFO')
m['Content-Type'] = SIPHeader('application/media_control+xml', 'Content-Type')
m.body = '''<?xml version="1.0" encoding="utf-8" ?>
<media_control>
<vc_primitive>
<to_encoder>
<picture_fast_update></picture_fast_update>
</to_encoder>
</vc_primitive>
</media_control>
'''
ua.sendRequest(m)
class MediaContext(object):
'''MediaContext stores the media related session and context for any transcoding for the gateway.
It is independent of multitask or gevent and reused by gevent version.
'''
def __init__(self, context, request=None, listen_ip=None, rate='wideband', NetworkClass=None, *args):
if not NetworkClass: raise ValueError('must supply the RTP NetworkClass')
self._context, self._rate, self._codecs = context, rate, args
self._flv, self._h264, self._touchtone, self._narrowband, self._wideband, self._pcmu, self._pcma = format(pt=-1, name='x-flv', rate=90000), format(pt=-1, name='h264', rate=90000), format(pt=-1, name='telephone-event', rate=8000), format(pt=-1, name='speex', rate=8000), format(pt=-1, name='speex', rate=16000), format(pt=0, name='pcmu', rate=8000), format(pt=8, name='pcma', rate=8000)
self._audio, self._video = self._getMediaStreams()
self._reset()
streams = [x for x in [self._audio, self._video] if x]
self.session = MediaSession(app=context, streams=streams, request=request, listen_ip=listen_ip, NetworkClass=NetworkClass) # create the actual MediaSession
def close(self):
if self.session:
self.session.close()
self.session = None
self._reset()
self._context = self.session = None
def _reset(self):
# x-flv states
self._flv1_txseq = self._flv2_rxseq = self._flv2_rxlen = 0
self._flv2_rxchunks = []
# H264 transcoder state
self._h1_cfgVer = self._h1_profileIdc = self._h1_profileCompat = self._h1_levelIdc = self._h1_lenSize = self._h1_SPS = self._h1_PPS = self._h1_data = None
self._h2_SPS, self._h2_PPS, self._h2_sentSeq, self._h2_sentMetaData, self._h2_startTs, self._h2_startTm, self._h2_queue, self._h2_firstTime, self._h2_lastFIR = None, None, False, False, 0, 0, [], True, 0
# Audio transcoder state: 1 for rtmp->rtp and 2 for rtp->rtmp
self._au1_resample = self._au1_speex2lin = self._au1_lin2speex = self._au1_fmt = self._au2_lin2speex = None # transcoder states for audiospeex module.
self._au1_ts = self._au2_ts0 = self._au2_tm = self._au2_ssrc = 0
def rtmp2rtp(self, stream, message): # public method called by Context to transcode RTMP to RTP for media.
if self.session: # order of following processing is important
if self.session.hasType('video') and self.session.hasYourFormat(self._flv): # the remote SIP user supports our video format. send FLV video to remote in RTP.
return self._rtmp2rtpFLV(message) # both audio and video sent via this
elif message.header.type == Message.VIDEO and message.size > 1: # possibly H.264 video packet
if self.session.hasYourFormat(self._h264): # if h264 is available
return self._rtmp2rtpH264(message)
# else just ignore the message for audio-only call to SIP VoIP phone
elif message.header.type == Message.AUDIO and message.size > 1: # audio packet of speex codec.
return self._rtmp2rtpAU(message)
elif _debug: print ' ignoring in rtmp2rtp type=', message.header.type, 'size=', message.size
def rtp2rtmp(self, fmt, p): # public method called by Context to transcode RTP to RTMP for media
if str(fmt.name).lower() == str(self._flv.name).lower(): # this is a video (FLV) packet, just assemble and return to rtmp
return self._rtp2rtmpFLV(p)
elif str(fmt.name).lower() == str(self._touchtone.name).lower(): # this is DTMF
if _debug: print 'ignoring incoming DTMF touchtone'
elif str(fmt.name).lower() == str(self._h264.name).lower(): # this is H264
return self._rtp2rtmpH264(fmt, p)
#if self.session and self.session.hasYourFormat(self._h264): # uncomment for loopback
# self.session.send(payload=p.payload, ts=p.ts, marker=p.marker, fmt=self._h264)
else: # this is a audio (Speex) packet. Build RTMP header and return to rtmp
if self._context.play_stream: # avoid transcoding if play-stream is not created yet.
return self._rtp2rtmpAU(fmt, p)
def dtmf2rtp(self, digit): # public method called by Context to send DTMF to RTP.
if len(digit) != 1:
if _debug: print ' only single digit DTMF is supported in sendDTMF'
elif not self.session or not self.session.hasType('audio'):
if _debug: print ' ignoring sendDTMF: not an active audio call'
else:
payload = repr(DTMF(key=digit, end=True))
if _debug: print ' sending payload %r'%(payload,)
return [(payload, self._au1_ts, False, self._touchtone)]
def _getMediaStreams(self):
global audiospeex
audio, video = SDP.media(media='audio'), SDP.media(media='video')
if not self._codecs: # use the default codecs for backward compatibility
audio.fmt, video.fmt = [format(pt=96, name='speex', rate=8000 if self._rate == 'narrowband' else 16000)], [format(pt=97, name='x-flv', rate=90000)]
if audiospeex:
audio.fmt.extend([format(pt=98, name='speex', rate=16000 if self._rate == 'narrowband' else 8000), format(pt=0, name='pcmu', rate=8000), format(pt=8, name='pcma', rate=8000)])
# add touchtone format to allow sending this format as well.
audio.fmt.extend([format(pt=101, name='telephone-event', rate=8000)])
else:
pcmu = pcma = narrowband = hasvideo = hasaudio = False
for codec in self._codecs:
if codec == 'wideband': audio.fmt.append(format(pt=96, name='speex', rate=16000)); hasaudio = True
elif codec == 'narrowband' and not narrowband: audio.fmt.append(format(pt=98, name='speex', rate=8000)); hasaudio = narrowband = True
elif codec == 'pcmu' and not pcmu: audio.fmt.append(format(pt=0, name='pcmu', rate=8000)); hasaudio = pcmu = True
elif codec == 'pcma' and not pcma: audio.fmt.append(format(pt=8, name='pcma', rate=8000)); hasaudio = pcma = True
elif codec == 'ulaw' and audiospeex and not pcmu: audio.fmt.append(format(pt=0, name='pcmu', rate=8000)); hasaudio = pcmu = True
elif codec == 'alaw' and audiospeex and not pcma: audio.fmt.append(format(pt=8, name='pcma', rate=8000)); hasaudio = pcma = True
elif codec == 'dtmf': audio.fmt.append(format(pt=101, name='telephone-event', rate=8000)); hasaudio = True
elif codec == 'flv': video.fmt.append(format(pt=97, name='x-flv', rate=90000)); hasvideo = True
elif codec and codec.startswith('h264'): video.fmt.append(format(pt=99, name='h264', rate=90000)); hasvideo = True
elif _debug: print 'ignoring %r, may already be added'%(codec,)
if codec and codec.startswith('h264') and 'a' not in video: video['a'] = ['fmtp:99 profile-level-id=420014;packetization-mode=1'] # TODO: handle h264/baseline vs h264/main
if not hasaudio: audio = None
if not hasvideo: video = None
return (audio, video)
def _rtmp2rtpFLV(self, message): # convert given RTMP message to RTP packets and send to SIP side
data = pack('>III', message.type, message.size, message.time) + message.data # assembled message
origlen, packets, cseq = len(data), [], 0
hdr = pack('>Ihh', self._flv1_txseq, cseq, len(data)) # header for first chunk
while len(data) > 0:
packets.append('RTMP'+hdr+data[:1000])
data = data[1000:]
cseq += 1
hdr = pack('>Ih', self._flv1_txseq, cseq)
# if _debug: print ' FLV sending type=%d,len=%d split seq=%d, chunks=%d'%(message.type, origlen, self._flv1_txseq, len(packets))
self._flv1_txseq += 1
return [(packet, message.time*(self._flv.rate/1000), False, self._flv) for packet in packets]
def _rtp2rtmpFLV(self, p): # convert given RTP packet to RTMP message and play to the rtmp side.
magic, payload = p.payload[:4], p.payload[4:]
if magic != 'RTMP':
if _debug: print 'ignoring non-RTMP packet in received video'
return
seq, cseq = unpack('>Ih', payload[:6])
# if _debug: print ' FLV received seq=%d cseq=%d len=%d'%(seq, cseq, len(payload))
if cseq == 0: # first packet in the chunks. Initialize the rx state.
self._flv2_rxseq, self._flv2_rxchunks[:] = seq, []
self._flv2_rxlen, = unpack('>h', payload[6:8])
self._flv2_rxchunks.append(payload[8:])
else:
if seq != self._flv2_rxseq or len(self._flv2_rxchunks) == 0:
if _debug: print 'probably missed a begin packet'
return
if cseq != len(self._flv2_rxchunks):
if _debug: print 'probably out of order packet'
return
self._flv2_rxchunks.append(payload[6:])
got = sum(map(lambda x: len(x), self._flv2_rxchunks), 0)
if got < self._flv2_rxlen: return # not all chunk is received yet
if got > self._flv2_rxlen:
if _debug: print 'unexpected error, got more than expected %d > %d'%(got, self._flv2_rxlen)
return
if self._flv2_rxlen < 12:
if _debug: print 'received data is too small %d'%(self._flv2_rxlen)
return
data, message = ''.join(self._flv2_rxchunks), Message()
self._flv2_rxlen, self._flv2_rxchunks[:] = 0, [] # clear the state now that we have full packet
message.type, msglen, message.time = unpack('>III', data[0:12]); message.data = data[12:]
if msglen != len(message.data):
if _debug: print 'invalid message len %d != %d'%(msglen, len(message.data))
return
return [message]
def _rtmp2rtpH264(self, message):
# if _debug: print 'f-> ', len(message.data), repr(message.data[:20])
messages = []
if message.data[:2] == '\x17\x00': # AVC seq
data = message.data[2:]
cfgVer, profileIdc, profileCompat, levelIdc = unpack('>BBBB', data[3:7])
if cfgVer == 1:
lenSize = (ord(data[7]) & 0x03) + 1
numSPS, data, SPS = (ord(data[8]) & 0x1f), data[9:], []
for i in range(numSPS):
lenSPS, data = unpack('>H', data[:2])[0], data[2:]
SPS.append(data[:lenSPS])
data = data[lenSPS:]
numPPS, data, PPS = ord(data[0]), data[1:], []
for j in range(numPPS):
lenPPS, data = unpack('>H', data[:2])[0], data[2:]
PPS.append(data[:lenPPS])
data = data[lenPPS:]
# if _debug: print 'avcCfg: cfgVer=%r profileIdc=%r profileCompat=%r levelIdc=%r lenSize=%r numSPS=%r numPPS=%r SPS=%r PPS=%r'%(cfgVer, profileIdc, profileCompat, levelIdc, lenSize, numSPS, numPPS, SPS, PPS)
# store the parameter sets
self._h1_cfgVer, self._h1_profileIdc, self._h1_profileCompat, self._h1_levelIdc, self._h1_lenSize, self._h1_SPS, self._h1_PPS, self._h1_data = cfgVer, profileIdc, profileCompat, levelIdc, lenSize, SPS, PPS, message.data
if SPS: # send this to other end.
ts, marker = message.time * self._h264.rate / 1000, True
# if _debug: print ' ->s', len(SPS[0]), repr(SPS[0])
messages.append((SPS[0], ts, marker, self._h264))
if PPS:
# if _debug: print ' ->s', len(PPS[0]), repr(PPS[0])
ts, marker = message.time * self._h264.rate / 1000, True
messages.append((PPS[0], ts, marker, self._h264))
elif message.data[:2] == '\x17\x01' or message.data[:2] == '\x27\x01': # AVC intra or inter
if self._h1_PPS and self._h1_SPS: # indicates that SPS/PPS are sent
try:
nals = []
lenSize, data = self._h1_lenSize, message.data[5:]
while data:
nalSize = data[:lenSize]
if lenSize == 1: nalSize = unpack('>B', nalSize)[0]
elif lenSize == 2: nalSize = unpack('>H', nalSize)[0]
elif lenSize == 4: nalSize = unpack('>I', nalSize)[0]
else: raise ValueError('invalid lenSize %d'%(lenSize,))
nalData, data = data[lenSize:lenSize+nalSize], data[lenSize+nalSize:]
nals.append(nalData)
# if _debug: print ' nals count=', len(nals), 'types=', repr([(ord(x[0]) & 0x1f) for x in nals])
if nals:
remaining = nals[-1]
# message.data = message.data[:5] + pack('>I', len(remaining)) + remaining
maxSize = 1446
nalType, nri = (ord(remaining[0]) & 0x1f), (ord(remaining[0]) & 0x60)
if nalType == 5 or nalType == 1: # others are ignored for now
ts, marker = message.time * self._h264.rate / 1000, True # treat each Message as an access unit
if len(remaining) <= (maxSize-1):
# if _debug: print ' ->s', len(remaining), repr(remaining[:15])
messages.append((remaining, ts, marker, self._h264))
else: # TODO: only if packetization-mode != 0
start = 0x80
remaining = remaining[1:]
while remaining:
data, remaining = remaining[:maxSize-2], remaining[maxSize-2:]
end = 0x00 if remaining else 0x40
payload = pack('>BB', nri | 28, start | end | nalType) + data
start = 0x00
# if _debug: print ' ->s', len(payload), repr(payload[:15])
messages.append((payload, ts, bool(end), self._h264))
except:
print 'exception', sys.exc_info()
traceback.print_exc()
return messages
def _rtp2rtmpH264(self, fmt, p):
messages = []
nalType = ord(p.payload[0]) & 0x1f
if nalType == 7: # SPS
self._h2_SPS = p.payload
elif nalType == 8: # PPS
self._h2_PPS = p.payload
elif len(p.payload) > 1:
if nalType == 24: # cisco phone sends SPS/PPS in aggregated packet
payload = p.payload[1:]
while payload:
size, payload = unpack('>H', payload[:2])[0], payload[2:]
naldata, payload = payload[:size], payload[size:]
nt = ord(naldata[0]) & 0x1f
if nt == 7:
self._h2_SPS = naldata
if _debug: print 'extract SPS from aggregated %r'%(naldata,)
elif nt == 8:
self._h2_PPS = naldata
if _debug: print 'extract PPS from aggregated %r'%(naldata,)
if nalType in (1, 5, 28, 24):
p.nalType = nalType
self._h2_queue.append(p) # assumes sorted order by seq.
if len(self._h2_queue) >= 2 and (self._h2_queue[-1].seq != (self._h2_queue[-2].seq + 1)):
if (self._h2_queue[-2].seq != 65535) or (self._h2_queue[-1].seq != 0):
if _debug: print 'new packet does not directly follow previous: %r != %r + 1'%(self._h2_queue[-1].seq, self._h2_queue[-2].seq)
if len(self._h2_queue) >= 2 and (self._h2_queue[-1].ts != self._h2_queue[-2].ts):
if _debug: print 'clearing old queue since new packet has different ts: %r != %r'%(self._h2_queue[-1].ts, self._h2_queue[-2].ts)
self._h2_queue[:] = self._h2_queue[-1:] # clear the queue
# we recieved the marker, so we know this is the end of the frame
if p.marker and len(self._h2_queue) > 0:
queued, self._h2_queue = self._h2_queue, []
# handle fragmentation and aggregation
nalType, realNri, payloads, newdata, pendingdata = 0, 0, [], '', []
for q in queued:
if q.nalType == 5 or q.nalType == 1:
if not newdata:
nalType = q.nalType
# 0x17 (for intra-frame) or 0x27 (for non-intra frame)
# 0x00 (configuration data) or 0x01 (picture data)
newdata = ('\x17' if nalType == 5 else '\x27') + '\x01\x00\x00\x00'
newdata += pack('>I', len(q.payload)) + q.payload
elif q.nalType == 24: # expand aggregated packet
payload = q.payload[1:]
while payload:
size, payload = unpack('>H', payload[:2])[0], payload[2:]
naldata, payload = payload[:size], payload[size:]
nt = ord(naldata[0]) & 0x1f
if nt == 5 or nt == 1: # don't handle 7 and 8 as they are already handled before
if not newdata:
nalType = nt
newdata = ('\x17' if nalType == 5 else '\x27') + '\x01\x00\x00\x00'
if _debug: print 'extract from aggregate type=%r len=%r'%(nalType, len(naldata))
newdata += pack('>I', len(naldata)) + naldata
elif q.nalType == 28: # aggregate all fragments
if not newdata:
nalType, realNri = (ord(q.payload[1]) & 0x1f), (ord(q.payload[0]) & 0x60)
# 0x17 (for intra-frame) or 0x27 (for non-intra frame)
# 0x00 (configuration data) or 0x01 (picture data)
newdata = ('\x17' if nalType == 5 else '\x27') + '\x01\x00\x00\x00'
pendingdata.append(q.payload[2:])
if ord(q.payload[1]) & 0x40: # end bit
remaining = pack('>B', nalType | realNri) + ''.join(pendingdata)
if _debug: print 'aggregated %r packets, len=%r, type=%r'%(len(pendingdata), len(remaining), nalType)
pendingdata[:] = []
newdata += pack('>I', len(remaining)) + remaining
else:
continue
# we store the data of the frame
if newdata:
payloads.append(newdata)
SPS, PPS, sentSeq = self._h2_SPS, self._h2_PPS, self._h2_sentSeq
if self._context.play_stream is None or not PPS or not SPS or PPS and SPS and not sentSeq and nalType != 5:
if _debug: print 'H264 drop until next intra'
self._h2_queue[:] = [] # drop until next intra
if (time.time() - self._h2_lastFIR) > 5.0:
self._h2_lastFIR = time.time()
self._context.requestFIR()
else:
if PPS and SPS and not sentSeq and nalType == 5:
self._h2_sentSeq = sentSeq = True
# compute the timestamp
if not self._h2_startTs:
self._h2_startTs = p.ts
if not self._h2_startTm:
self._h2_startTm = self._context.play_stream.client.relativeTime
tm = (p.ts - self._h2_startTs) / (self._h264.rate / 1000) + self._h2_startTm
if payloads and nalType == 5: # send SPS/PPS
if _debug: print " SPS", repr(SPS), "PPS", repr(PPS)
# 0x17 (1 if intra, 7 for H.264/AVC) 0x00 (configuration data)
data = '\x17\x00\x00\x00\x00\x01' + SPS[1:4] + '\xff\xe1' + pack('>H', len(SPS)) + SPS + '\x01' + pack('>H', len(PPS)) + PPS
payloads.insert(0, data)
if self._context.play_stream:
messages.extend([Message(Header(time=tm, size=len(payload), type=Message.VIDEO, streamId=self._context.play_stream.id), payload) for payload in payloads])
return messages
def accepting(self): # called by Context to return the selected codec after negotiation
global audiospeex
session = self.session
if not self._codecs: # do not set codecs for backward compatibility with older applications
preferred = self._audio and self._audio.fmt[0]
if audiospeex and session.hasType('audio') and not session.hasYourFormat(preferred): # if we have audiospeex transcoding module and remote doesn't have our preferred format, enable transcoding
fmt = ([fy for fy in self._audio.fmt if session.hasYourFormat(fy)] + [None])[0]
if _debug: print ' enable transcoding between %r/%r and %r/%r'%(preferred.name if preferred else None, preferred.rate if preferred else 0, fmt.name if fmt else None, fmt.rate if fmt else 0)
if fmt: self._au1_fmt = fmt # this will enable transcoding in rtmp2rtpAU
return tuple()
else:
if 'flv' in self._codecs and self.session.hasYourFormat(self._flv):
return ('default', 'default') # let the Flash Player choose between speex/nellymoser and h264/sorenson.
audiop = videop = None
for codec in self._codecs: # need to deal with only audio codecs
if not audiop and codec in ('wideband', 'narrowband', 'pcmu', 'pcma', 'ulaw', 'alaw'):
if codec == 'wideband' and session.hasYourFormat(self._wideband) or codec == 'narrowband' and session.hasYourFormat(self._narrowband) or codec == 'pcmu' and session.hasYourFormat(self._pcmu) or codec == 'pcma' and session.hasYourFormat(self._pcma):
audiop = 'speex' if codec in ('wideband', 'narrowband') else codec # no transcoding needed
elif codec == 'ulaw' and session.hasYourFormat(self._pcmu) or codec == 'alaw' and session.hasYourFormat(self._pcma):
if audiospeex: # enable transcoding if needed
preferred = self._narrowband
fmt = ([fy for fy in self._audio.fmt if session.hasYourFormat(fy)] + [None])[0]
if _debug: print ' enable transcoding between %r/%r and %r/%r'%(preferred.name if preferred else None, preferred.rate if preferred else 0, fmt.name if fmt else None, fmt.rate if fmt else 0)
if fmt: self._au1_fmt = fmt
audiop = 'speex'
if not videop and codec == 'h264' and session.hasYourFormat(self._h264):
videop = 'h264'
return (audiop, videop)
def _rtmp2rtpAU(self, message):
global audiospeex
# if _debug: print ' AU received %r'%(message.data[0],)
first, payload, fmt = ord(message.data[0]), message.data[1:], None
codec = {0xb0: 'speex', 0x70: 'pcma', 0x80: 'pcmu'}.get(first & 0xf0, '')
if not codec: return # probably nellymoser or something else but target doesn't support x-flv.
session = self.session
if not self._au1_fmt: # no transcoding needed
if codec == 'speex' and session.hasYourFormat(self._wideband):
fmt = self._wideband
elif codec == 'speex' and session.hasYourFormat(self._narrowband):
fmt, payload = self._narrowband, self._removeWideband(payload) # remove wideband if target supports only narrowband but not wideband
elif codec == 'pcmu' and session.hasYourFormat(self._pcmu):
fmt = self._pcmu
elif codec == 'pcma' and session.hasYourFormat(self._pcma):
fmt = self._pcma
elif _debug: print 'ignoring codec audio type %r'%(first,)
elif audiospeex: # perform transcoding from speex/16000 to self._au1_fmt
fmt = self._au1_fmt
if str(fmt.name).lower() != 'speex' or fmt.rate != 16000: # only if transcoding is needed.
linear, self._au1_speex2lin = audiospeex.speex2lin(payload, sample_rate=16000, state=self._au1_speex2lin)
linear, self._au1_resample = audiospeex.resample(linear, input_rate=16000, output_rate=fmt.rate, state=self._au1_resample)
if str(fmt.name).lower() == 'speex' and fmt.rate != 16000: # transcode speex/16000 to speex/rate
payload, self._au1_lin2speex = audiospeex.lin2speex(linear, sample_rate=fmt.rate, state=self._au1_lin2speex)
elif str(fmt.name).lower() == 'pcmu' and fmt.rate == 8000 or fmt.pt == 0: # transcode speex/16000 to pcmu/8000
payload = audioop.lin2ulaw(linear, 2)
elif str(fmt.name).lower() == 'pcma' and fmt.rate == 8000 or fmt.pt == 8:
payload = audioop.lin2alaw(linear, 2)
else: raise ValueError, 'ignoring unsupported payload type %r %r/%r'%(fmt.pt, fmt.name, fmt.rate)
# TODO: map from RTMP timestamp to RTP
if fmt: self._au1_ts += (fmt.rate * 20 / 1000) # assume 20 ms at 8000 or 16000 Hz
return [(payload, self._au1_ts, False, fmt)] if payload and fmt else None
def _rtp2rtmpAU(self, fmt, p):
global audiospeex
if not self._au1_fmt: # no transcoding needed
speex_data, input_rate = p.payload, fmt.rate or 8000 # TODO: assume pcmu or pcma at 8kHz
if str(fmt.name).lower() == 'speex':
type = '\xb2'
elif str(fmt.name).lower() == 'pcmu' and fmt.rate == 8000 or fmt.pt == 0:
type = '\x82'
elif str(fmt.name).lower() == 'pcma' and fmt.rate == 8000 or fmt.pt == 8:
type = '\x72'
else:
raise ValueError, 'ignoring unsupported payload type %r %r/%r'%(fmt.pt, fmt.name, fmt.rate)
elif str(fmt.name).lower() == 'speex': # no transcoding since Flash supports speex 8000/16000 anyway
type, speex_data, input_rate = '\xb2', p.payload, fmt.rate
else: # perform transcoding from self._au1_fmt to speex/8000
type, input_rate = '\xb2', fmt.rate or 8000
if str(fmt.name).lower() == 'pcmu' and fmt.rate == 8000 or fmt.pt == 0:
linear = audioop.ulaw2lin(p.payload, 2)
elif str(fmt.name).lower() == 'pcma' and fmt.rate == 8000 or fmt.pt == 8:
linear = audioop.alaw2lin(p.payload, 2)
else:
raise ValueError, 'ignoring unsupported payload type %r %r/%r'%(fmt.pt, fmt.name, fmt.rate)
# TODO: never send speex/16000 to Flash after transcoding
speex_data, self._au2_lin2speex = audiospeex.lin2speex(linear, sample_rate=8000, state=self._au2_lin2speex)
if self._au2_ssrc and p.ssrc != self._au2_ssrc: # ssrc has probably changed, so reset timers.
self._au2_ts0 = self._au2_tm = self._au2_ssrc = 0
if not self._au2_ts0: self._au2_ts0 = p.ts
if not self._au2_ssrc: self._au2_ssrc = p.ssrc
if not self._au2_tm: self._au2_tm = self._context.play_stream.client.relativeTime
payload, tm = type + speex_data, (p.ts - self._au2_ts0) / (input_rate / 1000) + self._au2_tm
header = Header(time=tm, size=len(payload), type=Message.AUDIO, streamId=self._context.play_stream.id)
m = Message(header, payload)
# if _debug: print ' RTMP pt=%x len=%d hdr=%r'%(m.header.type, m.size, m.header)
return [m]
def _removeWideband(self, payload):
if ord(payload[0]) & 0x80 == 0: # narrowband
mode = (ord(payload[0]) & 0x78) >> 3
bits = (5, 43, 119, 160, 220, 300, 364, 492, 79)[mode] if mode < 9 else 0
size, bits = bits / 8, bits % 8
if bits and (size + 1) <= len(payload):
payload = payload[:size] + chr(((ord(payload[size]) & ((0xff << (8-bits)) & 0xff)) | (0xff >> (bits + 1))) & 0xff)
elif not bits and size <= len(payload):
payload = payload[:size]
return payload
class Gateway(App):
'''The SIP-RTMP gateway implemented as RTMP server application.'''
def __init__(self):
App.__init__(self)
def onConnect(self, client, *args):
App.onConnect(self, client, args)
for c in self.clients: multitask.add(c.connectionClosed())
client.context = Context(self, client)
multitask.add(client.context.rtmp_register(*args))
return None
def onDisconnect(self, client):
App.onDisconnect(self, client)
multitask.add(client.context.rtmp_unregister())
def onCommand(self, client, cmd, *args):
App.onCommand(self, client, cmd, args)
if hasattr(client.context, 'rtmp_%s'%(cmd,)) and callable(eval('client.context.rtmp_%s'%(cmd,))):
multitask.add(eval('client.context.rtmp_%s'%(cmd,))(*args))
elif _debug: print 'invalid command', cmd
def onPublish(self, client, stream):
if _debug: print self.name, 'onPublish', client.path, stream.name
client.context.publish_stream = stream
def onClose(self, client, stream):
if _debug: print self.name, 'onClose', client.path, stream.name
client.context.publish_stream = None
def onPlay(self, client, stream):
if _debug: print self.name, 'onPlay', client.path, stream.name
client.context.play_stream = stream
client.context.media._au2_ts0 = client.context.media._au2_tm = 0
def onStop(self, client, stream):
if _debug: print self.name, 'onStop', client.path, stream.name
client.context.play_stream = None
def onStatus(self, client, info):
if _debug: print self.name, 'onStatus', info
def onResult(self, client, result):
if _debug: print self.name, 'onResult', result
def onPublishData(self, client, stream, message):
multitask.add(client.context.rtmp_data(stream, message))
return False
#---------------------------------- Testing -------------------------------
# The main routine to start, run and stop the service. This part is similar to rtmp.py
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(version='SVN $Revision: 162 $, $Date: 2012-09-22 19:29:18 -0700 (Sat, 22 Sep 2012) $'.replace('$', ''))
parser.add_option('-i', '--host', dest='host', default='0.0.0.0', help="listening IP address for RTMP. Default '0.0.0.0'")
parser.add_option('-p', '--port', dest='port', default=1935, type="int", help='listening port number for RTMP. Default 1935')
parser.add_option('-r', '--root', dest='root', default='./', help="document path prefix. Directory must end with /. Default './'")
parser.add_option('-l', '--int-ip', dest='int_ip', default='0.0.0.0', help="listening IP address for SIP and RTP. Default '0.0.0.0'")
parser.add_option('-e', '--ext-ip', dest='ext_ip', default=None, help='IP address to advertise in SIP/SDP. Default is to use "--int-ip" or any local interface')
parser.add_option('-d', '--verbose', dest='verbose', default=False, action='store_true', help='enable debug trace')
parser.add_option('-D', '--verbose-all', dest='verbose_all', default=False, action='store_true', help='enable full debug trace of all modules')
(options, args) = parser.parse_args()
import rtmp, app.voip, std.rfc3550, std.rfc3261
rtmp._debug = options.verbose_all
app.voip._debug = options.verbose or options.verbose_all
#std.rfc3550._debug = options.verbose
std.rfc3261._debug = options.verbose_all
_debug = options.verbose or options.verbose_all
_debugAll = options.verbose_all
if _debug and not audiospeex:
print 'warning: audiospeex module not found; disabling transcoding to/from speex'
if options.ext_ip: setlocaladdr(options.ext_ip)
elif options.int_ip != '0.0.0.0': setlocaladdr(options.int_ip)
try:
agent = FlashServer()
agent.apps['sip'] = Gateway
agent.root, agent.int_ip, agent.ext_ip = options.root, options.int_ip, options.ext_ip
agent.start(options.host, options.port)
if _debug: print time.asctime(), 'Flash Server Starts - %s:%d' % (options.host, options.port)
while True:
try: multitask.run()
except multitask.Timeout: pass
except KeyboardInterrupt:
pass
if _debug: time.asctime(), 'Flash Server Stops'
| gpl-2.0 | -4,745,211,580,218,919,000 | 66.215453 | 393 | 0.647637 | false |
lenoch/tagsetbench | 3rdparty/python/shlex.py | 1 | 11671 | """A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os
import re
import sys
from collections import deque
from io import StringIO
__all__ = ["shlex", "split", "quote"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.punctuation = '()=>/'
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print('shlex: reading from %s, line %d'
% (self.instream, self.lineno))
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d'
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print("shlex: in state", repr(self.state),
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.punctuation:
if self.token:
self.pushback.appendleft(nextchar)
else:
self.token = nextchar
break
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
else:
self.token = nextchar
self.state = 'a'
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
break # to jsem přidal kvůli prázdným řetězcům
elif (self.posix and nextchar in self.escape and
self.state in self.escapedquotes):
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.punctuation:
self.state = ' '
if self.token:
self.pushback.appendleft(nextchar)
else:
self.token = nextchar
break
else:
self.token = self.token + nextchar
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search
def quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _print_tokens(lexer):
while 1:
tt = lexer.get_token()
if not tt:
break
print("Token: " + repr(tt))
# podle _find_unsafe
# NOTE: doplnil jsem obě uvozovky, protože by se (word=" lemma=" k=I x=" 2–3)
# nedalo převést zpátky, myslim
# NOTE: a odstranil jsem ? (ten zatím nemá v mém jazyce speciální význam)
_need_escaping = re.compile(r'[()= \t\n"\']').search
def escape(s): # začátek podle shlex.quote
if not isinstance(s, str):
s = str(s)
if not s:
return "''"
elif _need_escaping(s) is None:
return s
elif "'" not in s:
return "'" + s + "'"
elif '"' not in s:
return '"' + s + '"'
else:
# good enough for repr(), which is the target anyway, not the shell
return "'''" + s + "'''"
if __name__ == '__main__':
if len(sys.argv) == 1:
_print_tokens(shlex())
else:
fn = sys.argv[1]
with open(fn) as f:
_print_tokens(shlex(f, fn))
| mit | 29,509,348,548,850,196 | 34.953704 | 77 | 0.493862 | false |
andyxhadji/incubator-airflow | airflow/operators/s3_file_transform_operator.py | 4 | 6750 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tempfile import NamedTemporaryFile
import subprocess
from airflow.exceptions import AirflowException
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class S3FileTransformOperator(BaseOperator):
"""
Copies data from a source S3 location to a temporary location on the
local filesystem. Runs a transformation on this file as specified by
the transformation script and uploads the output to a destination S3
location.
The locations of the source and the destination files in the local
filesystem is provided as an first and second arguments to the
transformation script. The transformation script is expected to read the
data from source, transform it and write the output to the local
destination file. The operator then takes over control and uploads the
local destination file to S3.
S3 Select is also available to filter the source contents. Users can
omit the transformation script if S3 Select expression is specified.
:param source_s3_key: The key to be retrieved from S3. (templated)
:type source_s3_key: str
:param source_aws_conn_id: source s3 connection
:type source_aws_conn_id: str
:param source_verify: Whether or not to verify SSL certificates for S3 connetion.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
This is also applicable to ``dest_verify``.
:type source_verify: bool or str
:param dest_s3_key: The key to be written from S3. (templated)
:type dest_s3_key: str
:param dest_aws_conn_id: destination s3 connection
:type dest_aws_conn_id: str
:param replace: Replace dest S3 key if it already exists
:type replace: bool
:param transform_script: location of the executable transformation script
:type transform_script: str
:param select_expression: S3 Select expression
:type select_expression: str
"""
template_fields = ('source_s3_key', 'dest_s3_key')
template_ext = ()
ui_color = '#f9c915'
@apply_defaults
def __init__(
self,
source_s3_key,
dest_s3_key,
transform_script=None,
select_expression=None,
source_aws_conn_id='aws_default',
source_verify=None,
dest_aws_conn_id='aws_default',
dest_verify=None,
replace=False,
*args, **kwargs):
super(S3FileTransformOperator, self).__init__(*args, **kwargs)
self.source_s3_key = source_s3_key
self.source_aws_conn_id = source_aws_conn_id
self.source_verify = source_verify
self.dest_s3_key = dest_s3_key
self.dest_aws_conn_id = dest_aws_conn_id
self.dest_verify = dest_verify
self.replace = replace
self.transform_script = transform_script
self.select_expression = select_expression
def execute(self, context):
if self.transform_script is None and self.select_expression is None:
raise AirflowException(
"Either transform_script or select_expression must be specified")
source_s3 = S3Hook(aws_conn_id=self.source_aws_conn_id,
verify=self.source_verify)
dest_s3 = S3Hook(aws_conn_id=self.dest_aws_conn_id,
verify=self.dest_verify)
self.log.info("Downloading source S3 file %s", self.source_s3_key)
if not source_s3.check_for_key(self.source_s3_key):
raise AirflowException(
"The source key {0} does not exist".format(self.source_s3_key))
source_s3_key_object = source_s3.get_key(self.source_s3_key)
with NamedTemporaryFile("wb") as f_source, NamedTemporaryFile("wb") as f_dest:
self.log.info(
"Dumping S3 file %s contents to local file %s",
self.source_s3_key, f_source.name
)
if self.select_expression is not None:
content = source_s3.select_key(
key=self.source_s3_key,
expression=self.select_expression
)
f_source.write(content.encode("utf-8"))
else:
source_s3_key_object.download_fileobj(Fileobj=f_source)
f_source.flush()
if self.transform_script is not None:
transform_script_process = subprocess.Popen(
[self.transform_script, f_source.name, f_dest.name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(transform_script_stdoutdata, transform_script_stderrdata) = \
transform_script_process.communicate()
self.log.info("Transform script stdout %s", transform_script_stdoutdata)
if transform_script_process.returncode > 0:
raise AirflowException(
"Transform script failed %s", transform_script_stderrdata)
else:
self.log.info(
"Transform script successful. Output temporarily located at %s",
f_dest.name
)
self.log.info("Uploading transformed file to S3")
f_dest.flush()
dest_s3.load_file(
filename=f_dest.name,
key=self.dest_s3_key,
replace=self.replace
)
self.log.info("Upload successful")
| apache-2.0 | 4,949,567,757,155,156,000 | 41.993631 | 88 | 0.638519 | false |
anthropo-lab/XP | sens_et_travail_project/redirect/views.py | 1 | 8538 | from otree.api import Currency as c, currency_range
from . import models
from ._builtin import Page, WaitPage
from .models import Constants, RedirectionManager
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import TemplateView
from django.shortcuts import render
from django.http import JsonResponse
##################################
##################################
class ExpertsView(TemplateView):
template_name = "redirect/RedirectionManagement.html"
def get(self, request, *args, **kwargs):
# Get the data
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
# Set the context
context = {
'manager_status': "ready",
'use_cookie': my_manager.use_cookie,
'current_redir1': my_manager.current_redir1,
'current_redir2': my_manager.current_redir2,
'current_redir3': my_manager.current_redir3,
'current_redir_conf_exp_main': my_manager.current_redir_conf_exp_main,
'current_redir_inner_xp': my_manager.current_redir_inner_xp,
}
return render(request=request, template_name = self.template_name, context=context)
else:
context = {
'manager_status': "not ready",
}
return render(request=request, template_name = self.template_name, context=context)
##################################
##################################
def ajax_set_redir1(request):
# Link to the proper RedirectionManager
address_from_client = request.GET.get('address', None)
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
my_manager.current_redir1 = address_from_client
my_manager.save()
# Send back response
data = {
'address_setting_result': address_from_client,
}
return JsonResponse(data)
else:
# Send back response
data = {
'address_setting_result': "Manager is not ready, nothing set",
}
return JsonResponse(data)
def ajax_set_redir2(request):
# Link to the proper RedirectionManager
address_from_client = request.GET.get('address', None)
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
my_manager.current_redir2 = address_from_client
my_manager.save()
# Send back response
data = {
'address_setting_result': address_from_client,
}
return JsonResponse(data)
else:
# Send back response
data = {
'address_setting_result': "Manager is not ready, nothing set",
}
return JsonResponse(data)
def ajax_set_redir3(request):
# Link to the proper RedirectionManager
address_from_client = request.GET.get('address', None)
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
my_manager.current_redir3 = address_from_client
my_manager.save()
# Send back response
data = {
'address_setting_result': address_from_client,
}
return JsonResponse(data)
else:
# Send back response
data = {
'address_setting_result': "Manager is not ready, nothing set",
}
return JsonResponse(data)
def ajax_set_redir_inner_xp(request):
# Link to the proper RedirectionManager
address_from_client = request.GET.get('address', None)
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
my_manager.current_redir_inner_xp = address_from_client
my_manager.save()
# Send back response
data = {
'address_setting_result': address_from_client,
}
return JsonResponse(data)
else:
# Send back response
data = {
'address_setting_result': "Manager is not ready, nothing set",
}
return JsonResponse(data)
def ajax_set_redir_conf_exp_main(request):
# Link to the proper RedirectionManager
address_from_client = request.GET.get('address', None)
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
my_manager.current_redir_conf_exp_main = address_from_client
my_manager.save()
# Send back response
data = {
'address_setting_result': address_from_client,
}
return JsonResponse(data)
else:
# Send back response
data = {
'address_setting_result': "Manager is not ready, nothing set",
}
return JsonResponse(data)
##################################
#################################
def get_redir1(request):
if request.session.get("otree"):
cookie_parti_url = request.session["otree"]
return HttpResponseRedirect(cookie_parti_url)
else:
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
if my_manager.current_redir1 == "closed":
return HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
else:
return HttpResponseRedirect(my_manager.current_redir1)
else:
return HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
def get_redir2(request):
if request.session.get("otree"):
cookie_parti_url = request.session["otree"]
return HttpResponseRedirect(cookie_parti_url)
else:
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
if my_manager.current_redir2 == "closed":
return HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
else:
return HttpResponseRedirect(my_manager.current_redir2)
else:
HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
def get_redir3(request):
if request.session.get("otree"):
cookie_parti_url = request.session["otree"]
return HttpResponseRedirect(cookie_parti_url)
else:
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
if my_manager.current_redir3 == "closed":
return HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
else:
return HttpResponseRedirect(my_manager.current_redir3)
else:
HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
#################################################
def get_redir_conf_exp_main(request):
if request.session.get("otree"):
cookie_parti_url = request.session["otree"]
return HttpResponseRedirect(cookie_parti_url)
else:
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
if my_manager.current_redir_conf_exp_main == "closed":
return HttpResponse("Désolé, ce lien ne sera activé que lorsque la conférence commencera")
else:
return HttpResponseRedirect(my_manager.current_redir_conf_exp_main)
else:
HttpResponse("Désolé, ce lien ne sera activé que lorsque la conférence commencera")
def get_redir_inner_xp(request, sessioncode, participantcode):
managers = RedirectionManager.objects.all()
if len(managers) == 1:
my_manager = managers[0]
if my_manager.current_redir_inner_xp == "closed":
return HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
else:
url_with_participant_label = my_manager.current_redir_inner_xp\
+ "?participant_label="\
+ str(sessioncode) + "_" + str(participantcode)
print("url_with_participant_label: ", url_with_participant_label) #???
return HttpResponseRedirect(url_with_participant_label)
else:
HttpResponse("Désolé, ce lien ne sera activé que lorsque l'expérience commencera")
page_sequence = [
]
| gpl-3.0 | 3,616,413,761,281,950,700 | 36.27193 | 106 | 0.598729 | false |
robingall2910/RobTheBoat | commands/information.py | 1 | 19328 | import os
import socket
import datetime
import time
import traceback
from discord.ext import commands
from utils.tools import *
from utils.logger import log
from utils.unicode import *
from utils.config import Config
from usps import USPSApi
from fedex.services.track_service import FedexTrackRequest
from fedex.config import FedexConfig
config = Config()
halloween = datetime(2020, 10, 31)
christmas = datetime(2020, 12, 25)
newyear = datetime(2021, 1, 1)
usps = USPSApi(config._trackingKey)
FedexConfigObj = FedexConfig(key=config._fedexKey,
password=config._fedexPassword,
account_number='510087240',
meter_number='114084089',
freight_account_number=None,
use_test_server=False)
class Information(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def id(self, ctx, user:discord.User=None):
"""Gets your ID or if you @mention a user it gets their id"""
if user is None:
await ctx.send("Your ID is `{}`".format(ctx.message.author.id))
else:
await ctx.send("{}'s ID is `{}`".format(user, user.id))
@commands.guild_only()
@commands.command()
async def serverinfo(self, ctx):
"""Gets information on the current server"""
guild = ctx.guild
human_count = len([member for member in guild.members if not member.bot])
bot_count = len(([member for member in guild.members if member.bot]))
timeout_times = {60:"1 minute", 300:"5 minutes", 900:"15 minutes", 1800:"30 minutes", 3600:"1 hour"}
fields = {"ID":guild.id, "Created on":format_time(guild.created_at), "Region":guild.region, "Member Count ({} total)".format(len(guild.members)):"{} humans, {} bots".format(human_count, bot_count), "Channel Count ({} total)".format(len(guild.channels)):"{} text, {} voice".format(len(guild.text_channels), len(guild.voice_channels)), "Role Count":len(guild.roles), "Owner":guild.owner, "Owner ID":guild.owner_id, "AFK Channel":guild.afk_channel, "AFK Timeout":timeout_times[guild.afk_timeout], "Verification Level":str(ctx.guild.verification_level).capitalize().replace("High", tableflip).replace("Extreme", doubleflip), "2FA Enabled":convert_to_bool(guild.mfa_level)}
embed = make_list_embed(fields)
embed.title = guild.name
if ctx.me.color is not None:
embed.color = ctx.me.color
else:
embed.color = 0xff0000
if guild.icon_url:
embed.set_thumbnail(url=guild.icon_url)
await ctx.send(embed=embed)
@commands.guild_only()
@commands.command()
async def userinfo(self, ctx, *, user:discord.Member=None):
"""Gets your information or the information of the specified user"""
try:
if user is None:
user = ctx.author
voice_channel = None
self_mute = False
self_deaf = False
server_mute = False
server_deaf = False
if user.voice:
voice_channel = user.voice.channel
self_mute = user.voice.self_mute
self_deaf = user.voice.self_deaf
server_mute = user.voice.mute
server_deaf = user.voice.deaf
fields = {"ID":user.id, "Bot Account":user.bot, "Created on":format_time(user.created_at), "Status":user.status, "Role Count":len(user.roles), "Joined on":format_time(user.joined_at), "Nickname":user.nick, "Voice Channel":voice_channel, "Self Muted":self_mute, "Self Deafened":self_deaf, "Server Muted":server_mute, "Server Deafened":server_deaf}
embed = make_list_embed(fields)
embed.set_footer(text="Requested by {}".format(ctx.author), icon_url=ctx.author.avatar_url)
embed.title = str(user)
embed.color = user.color
embed.set_thumbnail(url=get_avatar(user))
await ctx.send(embed=embed)
except Exception:
await ctx.send(traceback.format_exc())
@commands.command(aliases=['tp', 'package'])
async def trackpackage(self, ctx, service: str, *, trackingnum: str):
"""Tracks your package for you."""
if service == "usps" or "USPS":
try:
track = usps.track(trackingnum)
print(track.result)
embed = discord.Embed(description=f"Currently {track.result['TrackResponse']['TrackInfo']['TrackSummary']['Event']} as of {track.result['TrackResponse']['TrackInfo']['TrackSummary']['EventDate']}")
embed.title = f"USPS Tracking - {track.result['TrackResponse']['TrackInfo']['@ID']}"
for u in range(0, len(track.result['TrackResponse']['TrackInfo']['TrackDetail'])):
tr = track.result['TrackResponse']['TrackInfo']['TrackDetail'][u]
if tr['EventState'] is None:
embed.add_field(name=f'Event #{u}', value=f"{tr['EventDate']} at {tr['EventTime']}: {tr['Event']} ({tr['EventCity']})")
else:
embed.add_field(name=f'Event #{u}', value=f"{tr['EventDate']} at {tr['EventTime']}: {tr['Event']} ({tr['EventCity']}, {tr['EventState']} {tr['EventZIPCode']})")
embed.set_footer(text='Timezone Warning: All the listed times are based on the area the package is in.')
await ctx.send(embed=embed)
except Exception:
await ctx.send(traceback.format_exc())
if service == "FedEx" or "fedex":
fedex = FedexTrackRequest()
fedex.SelectionDetails.PackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'
fedex.SelectionDetails.PackageIdentifier.Value = trackingnum
fedex.send_request()
print(fedex.response)
await ctx.send("should've recieved info")
else:
await ctx.send("No other service is available yet.")
@commands.command()
async def roleinfo(self, ctx, *, name:str):
"""Gets information on a role, warning, it might take up the entire screen"""
try:
role = discord.utils.get(ctx.guild.roles, name=name)
if role is None:
await ctx.send("`{}` isn't real. Or is it?".format(name))
return
color = role.color
count = len([member for member in ctx.guild.members if discord.utils.get(member.roles, name=role.name)])
perms = role.permissions
fields = {
"Position":role.position,
"User count":count,
"Mentionable":role.mentionable,
"Display seperately":role.hoist,"Add reactions":perms.add_reactions,
"Administrator":perms.administrator,
"Attach files":perms.attach_files,
"Ban members":perms.ban_members,
"Change nickname":perms.change_nickname,
"Connect":perms.connect,
"Create instant invites":perms.create_instant_invite,
"Deafen members":perms.deafen_members,
"Embed links":perms.embed_links,
"External emojis":perms.external_emojis,
"Kick members":perms.kick_members,
"Manage channels":perms.manage_channels,
"Manage emojis":perms.manage_emojis,
"Manage guild":perms.manage_guild,
"Manage messages":perms.manage_messages,
"Manage nicknames":perms.manage_nicknames,
"Manage roles":perms.manage_roles,
"Manage webhooks":perms.manage_webhooks,
"Mention everyone":perms.mention_everyone,
"Move members":perms.move_members,
"Mute members":perms.mute_members,
"Read message history":perms.read_message_history,
"Read messages":perms.read_messages,
"Send messages":perms.send_messages,
"Send TTS messages":perms.send_tts_messages,
"Speak":perms.speak,
"Use voice activation":perms.use_voice_activation,
"View audit logs":perms.view_audit_log
}
embed = make_list_embed(fields)
embed.set_footer(text="Requested by {}".format(ctx.author), icon_url=ctx.author.avatar_url)
embed.title = "{} - {}".format(role.name, role.id)
if color is None:
embed.color = None
else:
embed.color = color
await ctx.send(embed=embed)
except Exception:
await ctx.send(traceback.format_exc())
@commands.command()
async def avatar(self, ctx, *, user:discord.User=None):
"""Gets your avatar url or the avatar url of the specified user"""
if user is None:
user = ctx.message.author
if not user.avatar_url:
avatar_url = user.default_avatar_url
else:
avatar_url = user.avatar_url
await ctx.send("{}'s avatar url is: {}".format(user.mention, avatar_url))
@commands.command()
async def defaultavatar(self, ctx, *, user:discord.User=None):
"""Gets your default avatar url or the default avatar url of the specified user"""
if user is None:
user = ctx.message.author
await ctx.send("{}'s default avatar url is: {}".format(user.mention, user.default_avatar_url))
#/s
@commands.command()
async def emoteurl(self, ctx, *, emote:str):
"""Gets the url for a CUSTOM emote (meaning no unicode emotes)"""
emote_id = None
try:
if extract_emote_id(emote) is not None:
emote_id = extract_emote_id(emote)
except:
pass
if emote_id is None:
await ctx.send("That is not a custom emote")
return
await ctx.send("https://discordapp.com/api/emojis/{}.png".format(emote_id))
@commands.command()
async def discr(self, ctx, *, discriminator:str):
"""Gets a username#discriminator list of all users that the bot can see with the specified discriminator"""
members = []
for member in list(self.bot.get_all_members()):
if member.discriminator == discriminator and str(member) not in members:
members.append(str(member))
if len(members) == 0:
members = "Well, I don't see anyone with `{}` anywhere really...".format(discriminator)
else:
members = "```{}```".format(", ".join(members))
await ctx.send(members)
@commands.command()
async def daystillhalloween(self, ctx):
"""Displays how many days until it's halloween"""
await ctx.send("Days until halloween: `{} days`".format((halloween - datetime.today()).days))
@commands.command()
async def daystillchristmas(self, ctx):
"""Displays how many days until it's christmas"""
await ctx.send("Days until christmas: `{} days`".format((christmas - datetime.today()).days))
@commands.command()
async def daystillnewyears(self, ctx):
"""Displays how many days until it's the new year"""
await ctx.send("Days until new years: `{} days`".format((newyear - datetime.today()).days))
@commands.command()
async def getserverinfo(self, ctx, *, name:str):
"""Gets very basic server info on the server with the specified name"""
guild = discord.utils.get(self.bot.guilds, name=name)
if guild is None:
await ctx.send("I could not find a server by the name of `{}`".format(name))
else:
await ctx.send("```Name: {}\nID: {}\nOwner: {}\nOwner ID: {}\nMember count: {}\nDate created: {}```".format(guild.name, guild.id, guild.owner, guild.owner.id, len(guild.members), format_time(guild.created_at)))
@commands.command()
async def isitdown(self, ctx, *, url:str):
"""Checks to see if a website is online or not"""
await ctx.channel.trigger_typing()
url = url.strip("<>")
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://{}".format(url)
try:
starttime = time.time()
requests.get(url, timeout=3)
ping = "%.01f seconds" % (time.time() - starttime)
await ctx.send("`{}` is online. Ping time is `{}`".format(url, ping))
except Exception as e:
await ctx.send("`{}` is offline.".format(url))
await ctx.send("Error {}".format(e))
@commands.command()
async def getemotes(self, ctx):
"""Gets a list of the server's emotes"""
emotes = ctx.guild.emojis
if len(emotes) == 0:
await ctx.send("This server does not have any emotes!")
return
emotes = ["`:{}:` = {}".format(emote.name, emote) for emote in emotes]
await ctx.send("Current emotes for this server\n" + "\n".join(emotes))
@commands.command()
async def osu(self, ctx, *, username:str):
"""Gets an osu! profile stats with the specified name"""
if not config.enableOsu:
await ctx.send("The osu! command has been disabled.")
return
try:
import osuapi
except ImportError:
log.critical("The osu api is enabled, but the osuapi module was not found! Please run \"pip install osuapi\"")
await ctx.send("Couldn't import the osu! api module, contact the bot developer!")
return
await ctx.channel.trigger_typing()
api = osuapi.OsuApi(config.osuKey, connector=osuapi.AHConnector())
try:
user = await api.get_user(username)
except osuapi.HTTPError as e:
if e.code == 401:
log.critical("An invalid osu! api key was set, please check the config for instructions on how to get a proper api key!")
await ctx.send("An invalid osu! api key was set, contact the bot developer!")
else:
log.critical("An unknown error occured while trying to get an osu! profile.")
await ctx.send("An unknown error occured while trying to get that user's osu! profile, contact the bot developer!")
try:
user = user[0]
except IndexError:
await ctx.send("Could find any osu! profile named `{}`".format(username))
return
fields = {"ID":user.user_id, "Country":user.country, "Level":int(user.level), "Hits":user.total_hits, "Score":user.total_score, "Accuracy":"{0:.2f}%".format(user.accuracy), "Play Count":user.playcount, "Ranked Score":user.ranked_score, "A rank":user.count_rank_a, "S rank":user.count_rank_s, "SS rank":user.count_rank_ss}
embed = make_list_embed(fields)
embed.title = "{}'s osu! Stats".format(user.username)
embed.color = 0xFF00FF
embed.set_thumbnail(url="http://s.ppy.sh/a/{}".format(user.user_id))
await ctx.send(embed=embed)
@commands.command()
async def donate(self, ctx):
"""give me money"""
await ctx.send("Have money? Want to give it to me? https://donate.dragonfire.me/")
@commands.command()
async def st(self, ctx):
"""Speedtest.net results"""
rb = "```rb\n{0}\n```"
await ctx.channel.trigger_typing()
msg = "speedtest-cli --share --simple"
input = os.popen(msg)
output = input.read()
await ctx.send(rb.format(output))
# msg.replace("serverip", "Server IP").replace("\n", "\n").replace("\"", "").replace("b'", "").replace("'",
# "")))
@commands.command()
async def emoteinfo(self, ctx, *, emote:discord.Emoji):
"""Gets information on a custom emote (Only works for servers the bot is on)"""
fields = {"Name":emote.name, "ID":emote.id, "Server Origin":emote.guild.name, "Created On":format_time(emote.created_at), "Colons Required":emote.require_colons, "Managed by Twitch":emote.managed}
embed = make_list_embed(fields)
embed.title = ":{}:".format(emote.name)
embed.color = 0xFF0000
embed.set_thumbnail(url=emote.url)
await ctx.send(embed=embed)
@commands.command()
async def ipping(self, ctx, *, ip: str):
"""Pings to an ip address or domain"""
rb = "```rb\n{0}\n```"
await ctx.channel.trigger_typing()
msg = "ping -c 4 {0}".format(ip)
input = os.popen(msg)
output = input.read()
await ctx.send(rb.format(output))
@commands.command()
async def traceroute(self, ctx, *, ip: str):
"""Traces the route to the connection of a website or IP"""
rb = "```rb\n{0}\n```"
await ctx.channel.trigger_typing()
msg = "traceroute {0}".format(ip)
input = os.popen(msg)
output = input.read()
await ctx.send(rb.format(output))
@commands.command()
async def getnumericip(self, ctx, address:str):
"""Resolves the numeric ip of a domain"""
try:
await ctx.send(socket.gethostbyname(address))
except socket.gaierror:
await ctx.send("`{}` is not a valid address".format(address))
@commands.command()
async def getuserbyid(self, ctx, id:int):
"""Gets a user by id"""
user = discord.utils.get(list(self.bot.get_all_members()), id=id)
if not user:
await ctx.send("Could not find any user in my mutual servers with an ID of `{}`".format(id))
return
if user.activity:
game = user.activity.name
else:
game = None
fields = {"Name":user.name, "Discriminator":user.discriminator, "ID":user.id, "Status":str(user.status).replace("dnd", "do not disturb"), "Game":game, "Bot":user.bot}
embed = make_list_embed(fields)
embed.title = str(user)
embed.color = 0xFF0000
embed.set_thumbnail(url=get_avatar(user))
await ctx.send(embed=embed)
@commands.guild_only()
@commands.command()
async def roleid(self, ctx, role:discord.Role):
"""Gets the id for the specified role"""
await ctx.send("The role ID for `{}` is `{}`".format(role.name, role.id))
@commands.command(aliases=['rl'])
async def reverselookup(self, ctx, username: str):
"""reverse looks up a user"""
try:
for guild in self.bot.guilds:
result = discord.utils.find(lambda m: m.name == username, guild.members)
if result is not None:
await ctx.send(f"Result found in {result.guild} - ID {result.id}")
except Exception:
await ctx.send(traceback.format_exc())
def setup(bot):
bot.add_cog(Information(bot)) | mit | 9,213,162,191,118,445,000 | 47.060914 | 676 | 0.575486 | false |
cprov/snapcraft | tests/unit/test_options.py | 3 | 10604 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
from unittest import mock
import testtools
from testtools.matchers import Equals
import snapcraft
from snapcraft.project._project_options import (
_get_platform_architecture,
_32BIT_USERSPACE_ARCHITECTURE,
)
from snapcraft.internal import common
from snapcraft.internal.errors import SnapcraftEnvironmentError
from tests import unit
class NativeOptionsTestCase(unit.TestCase):
scenarios = [
(
"amd64",
dict(
machine="x86_64",
architecture=("64bit", "ELF"),
expected_arch_triplet="x86_64-linux-gnu",
expected_deb_arch="amd64",
expected_kernel_arch="x86",
expected_core_dynamic_linker="lib64/ld-linux-x86-64.so.2",
),
),
(
"amd64-kernel-i686-userspace",
dict(
machine="x86_64",
architecture=("32bit", "ELF"),
expected_arch_triplet="i386-linux-gnu",
expected_deb_arch="i386",
expected_kernel_arch="x86",
expected_core_dynamic_linker="lib/ld-linux.so.2",
),
),
(
"i686",
dict(
machine="i686",
architecture=("32bit", "ELF"),
expected_arch_triplet="i386-linux-gnu",
expected_deb_arch="i386",
expected_kernel_arch="x86",
expected_core_dynamic_linker="lib/ld-linux.so.2",
),
),
(
"armv7l",
dict(
machine="armv7l",
architecture=("32bit", "ELF"),
expected_arch_triplet="arm-linux-gnueabihf",
expected_deb_arch="armhf",
expected_kernel_arch="arm",
expected_core_dynamic_linker="lib/ld-linux-armhf.so.3",
),
),
(
"aarch64",
dict(
machine="aarch64",
architecture=("64bit", "ELF"),
expected_arch_triplet="aarch64-linux-gnu",
expected_deb_arch="arm64",
expected_kernel_arch="arm64",
expected_core_dynamic_linker="lib/ld-linux-aarch64.so.1",
),
),
(
"aarch64-kernel-armv7l-userspace",
dict(
machine="aarch64",
architecture=("32bit", "ELF"),
expected_arch_triplet="arm-linux-gnueabihf",
expected_deb_arch="armhf",
expected_kernel_arch="arm",
expected_core_dynamic_linker="lib/ld-linux-armhf.so.3",
),
),
(
"armv8l-kernel-armv7l-userspace",
dict(
machine="armv8l",
architecture=("32bit", "ELF"),
expected_arch_triplet="arm-linux-gnueabihf",
expected_deb_arch="armhf",
expected_kernel_arch="arm",
expected_core_dynamic_linker="lib/ld-linux-armhf.so.3",
),
),
(
"ppc",
dict(
machine="ppc",
architecture=("32bit", "ELF"),
expected_arch_triplet="powerpc-linux-gnu",
expected_deb_arch="powerpc",
expected_kernel_arch="powerpc",
expected_core_dynamic_linker="lib/ld-linux.so.2",
),
),
(
"ppc64le",
dict(
machine="ppc64le",
architecture=("64bit", "ELF"),
expected_arch_triplet="powerpc64le-linux-gnu",
expected_deb_arch="ppc64el",
expected_kernel_arch="powerpc",
expected_core_dynamic_linker="lib64/ld64.so.2",
),
),
(
"ppc64le-kernel-ppc-userspace",
dict(
machine="ppc64le",
architecture=("32bit", "ELF"),
expected_arch_triplet="powerpc-linux-gnu",
expected_deb_arch="powerpc",
expected_kernel_arch="powerpc",
expected_core_dynamic_linker="lib/ld-linux.so.2",
),
),
(
"s390x",
dict(
machine="s390x",
architecture=("64bit", "ELF"),
expected_arch_triplet="s390x-linux-gnu",
expected_deb_arch="s390x",
expected_kernel_arch="s390",
expected_core_dynamic_linker="lib/ld64.so.1",
),
),
]
@mock.patch("platform.architecture")
@mock.patch("platform.machine")
def test_architecture_options(
self, mock_platform_machine, mock_platform_architecture
):
mock_platform_machine.return_value = self.machine
mock_platform_architecture.return_value = self.architecture
options = snapcraft.ProjectOptions()
self.assertThat(options.arch_triplet, Equals(self.expected_arch_triplet))
self.assertThat(options.deb_arch, Equals(self.expected_deb_arch))
self.assertThat(options.kernel_arch, Equals(self.expected_kernel_arch))
# The core dynamic linker is correct. Guard against stray absolute
# paths, as they cause os.path.join to discard the previous
# argument.
self.assertFalse(os.path.isabs(self.expected_core_dynamic_linker))
with mock.patch("os.path.lexists") as mock_lexists:
mock_lexists.return_value = True
with mock.patch("os.path.islink") as mock_islink:
mock_islink.return_value = False
self.assertThat(
options.get_core_dynamic_linker("core"),
Equals(
os.path.join(
common.get_core_path("core"),
self.expected_core_dynamic_linker,
)
),
)
@mock.patch("platform.architecture")
@mock.patch("platform.machine")
def test_get_platform_architecture(
self, mock_platform_machine, mock_platform_architecture
):
mock_platform_machine.return_value = self.machine
mock_platform_architecture.return_value = self.architecture
platform_arch = _get_platform_architecture()
userspace_conversions = _32BIT_USERSPACE_ARCHITECTURE
if self.architecture[0] == "32bit" and self.machine in userspace_conversions:
self.assertThat(platform_arch, Equals(userspace_conversions[self.machine]))
else:
self.assertThat(platform_arch, Equals(self.machine))
class OptionsTestCase(unit.TestCase):
def test_cross_compiler_prefix_missing(self):
options = snapcraft.ProjectOptions(target_deb_arch="x86_64")
with testtools.ExpectedException(
SnapcraftEnvironmentError,
"Cross compilation not supported for target arch 'x86_64'",
):
options.cross_compiler_prefix
@mock.patch("platform.architecture")
@mock.patch("platform.machine")
def test_cross_compiler_prefix_empty(
self, mock_platform_machine, mock_platform_architecture
):
mock_platform_machine.return_value = "x86_64"
mock_platform_architecture.return_value = ("64bit", "ELF")
options = snapcraft.ProjectOptions(target_deb_arch="i386")
self.assertThat(options.cross_compiler_prefix, Equals(""))
class TestHostIsCompatibleWithTargetBase(unit.TestCase):
scenarios = (
("trusty core", dict(codename="trusty", base="core", is_compatible=True)),
("xenial core", dict(codename="xenial", base="core", is_compatible=True)),
("bionic core", dict(codename="bionic", base="core", is_compatible=False)),
("trusty core18", dict(codename="trusty", base="core18", is_compatible=True)),
("xenial core18", dict(codename="xenial", base="core18", is_compatible=True)),
("bionic core18", dict(codename="bionic", base="core18", is_compatible=True)),
(
"Random codename core18",
dict(codename="random", base="core18", is_compatible=False),
),
(
"trusty unknown-base",
dict(codename="trusty", base="unknown", is_compatible=False),
),
)
def setUp(self):
super().setUp()
patcher = mock.patch("snapcraft.internal.os_release.OsRelease.version_codename")
self.codename_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_compatibility(self):
self.codename_mock.return_value = self.codename
self.assertThat(
snapcraft.ProjectOptions().is_host_compatible_with_base(self.base),
Equals(self.is_compatible),
)
class TestLinkerVersionForBase(unit.TestCase):
def setUp(self):
super().setUp()
patcher = mock.patch("snapcraft.file_utils.get_linker_version_from_file")
self.get_linker_version_mock = patcher.start()
self.addCleanup(patcher.stop)
def test_get_linker_version_for_core(self):
self.assertThat(
snapcraft.ProjectOptions()._get_linker_version_for_base("core"),
Equals("2.23"),
)
self.get_linker_version_mock.assert_not_called()
def test_get_linker_version_for_core18(self):
self.assertThat(
snapcraft.ProjectOptions()._get_linker_version_for_base("core18"),
Equals("2.27"),
)
self.get_linker_version_mock.assert_not_called()
def test_get_linker_version_for_random_core(self):
self.get_linker_version_mock.return_value = "4.10"
self.assertThat(
snapcraft.ProjectOptions()._get_linker_version_for_base("random"),
Equals("4.10"),
)
self.get_linker_version_mock.assert_called_once_with("ld-2.23.so")
| gpl-3.0 | 6,656,563,746,960,375,000 | 35.947735 | 88 | 0.566862 | false |
GenericStudent/home-assistant | homeassistant/components/harman_kardon_avr/media_player.py | 14 | 3346 | """Support for interface with an Harman/Kardon or JBL AVR."""
import hkavr
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "Harman Kardon AVR"
DEFAULT_PORT = 10025
SUPPORT_HARMAN_KARDON_AVR = (
SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discover_info=None):
"""Set up the AVR platform."""
name = config[CONF_NAME]
host = config[CONF_HOST]
port = config[CONF_PORT]
avr = hkavr.HkAVR(host, port, name)
avr_device = HkAvrDevice(avr)
add_entities([avr_device], True)
class HkAvrDevice(MediaPlayerEntity):
"""Representation of a Harman Kardon AVR / JBL AVR TV."""
def __init__(self, avr):
"""Initialize a new HarmanKardonAVR."""
self._avr = avr
self._name = avr.name
self._host = avr.host
self._port = avr.port
self._source_list = avr.sources
self._state = None
self._muted = avr.muted
self._current_source = avr.current_source
def update(self):
"""Update the state of this media_player."""
if self._avr.is_on():
self._state = STATE_ON
elif self._avr.is_off():
self._state = STATE_OFF
else:
self._state = None
self._muted = self._avr.muted
self._current_source = self._avr.current_source
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Muted status not available."""
return self._muted
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""Available sources."""
return self._source_list
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_HARMAN_KARDON_AVR
def turn_on(self):
"""Turn the AVR on."""
self._avr.power_on()
def turn_off(self):
"""Turn off the AVR."""
self._avr.power_off()
def select_source(self, source):
"""Select input source."""
return self._avr.select_source(source)
def volume_up(self):
"""Volume up the AVR."""
return self._avr.volume_up()
def volume_down(self):
"""Volume down AVR."""
return self._avr.volume_down()
def mute_volume(self, mute):
"""Send mute command."""
return self._avr.mute(mute)
| apache-2.0 | 3,908,575,964,110,652,000 | 24.937984 | 84 | 0.61327 | false |
simonwydooghe/ansible | test/units/modules/network/fortios/test_fortios_user_device_category.py | 21 | 8037 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_user_device_category
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_user_device_category.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_user_device_category_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_category': {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_category.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('user', 'device-category', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_device_category_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_category': {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_category.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('user', 'device-category', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_device_category_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_device_category': {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_category.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'device-category', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_user_device_category_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'user_device_category': {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_category.fortios_user(input_data, fos_instance)
delete_method_mock.assert_called_with('user', 'device-category', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_user_device_category_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_category': {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_category.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('user', 'device-category', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_user_device_category_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'user_device_category': {
'random_attribute_not_valid': 'tag',
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
},
'vdom': 'root'}
is_error, changed, response = fortios_user_device_category.fortios_user(input_data, fos_instance)
expected_data = {
'comment': 'Comment.',
'desc': 'test_value_4',
'name': 'default_name_5'
}
set_method_mock.assert_called_with('user', 'device-category', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 | 6,451,249,012,014,730,000 | 35.69863 | 142 | 0.653975 | false |
marios-zindilis/musicbrainz-django-models | musicbrainz_django_models/models/l_release_group_release_group.py | 1 | 2249 | """
.. module:: l_release_group_release_group
The **L Release Group Release Group** Model.
PostgreSQL Definition
---------------------
The :code:`l_release_group_release_group` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE l_release_group_release_group ( -- replicate
id SERIAL,
link INTEGER NOT NULL, -- references link.id
entity0 INTEGER NOT NULL, -- references release_group.id
entity1 INTEGER NOT NULL, -- references release_group.id
edits_pending INTEGER NOT NULL DEFAULT 0 CHECK (edits_pending >= 0),
last_updated TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
link_order INTEGER NOT NULL DEFAULT 0 CHECK (link_order >= 0),
entity0_credit TEXT NOT NULL DEFAULT '',
entity1_credit TEXT NOT NULL DEFAULT ''
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class l_release_group_release_group(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param int edits_pending: the MusicBrainz Server uses a PostgreSQL `check`
to validate that the value is a positive integer. In Django, this is
done with `models.PositiveIntegerField()`.
:param int link_order: the MusicBrainz Server uses a PostgreSQL `check`
to validate that the value is a positive integer. In Django, this is
done with `models.PositiveIntegerField()`.
"""
id = models.AutoField(primary_key=True)
link = models.ForeignKey('link')
entity0 = models.ForeignKey('release_group', related_name='links_to_release_group')
entity1 = models.ForeignKey('release_group')
edits_pending = models.PositiveIntegerField(default=0)
last_updated = models.DateTimeField(auto_now=True)
link_order = models.PositiveIntegerField(default=0)
entity0 = models.TextField(default='')
entity1 = models.TextField(default='')
def __str__(self):
return 'L Release Group Release Group'
class Meta:
db_table = 'l_release_group_release_group'
| gpl-2.0 | 3,216,969,509,217,047,600 | 37.118644 | 88 | 0.663851 | false |
alonho/logbook | logbook/more.py | 3 | 14393 | # -*- coding: utf-8 -*-
"""
logbook.more
~~~~~~~~~~~~
Fancy stuff for logbook.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import re
import os
from collections import defaultdict
from cgi import parse_qsl
from logbook.base import RecordDispatcher, dispatch_record, NOTSET, ERROR, NOTICE
from logbook.handlers import Handler, StringFormatter, \
StringFormatterHandlerMixin, StderrHandler
from logbook._termcolors import colorize
from logbook.helpers import PY2, string_types, iteritems
from logbook.ticketing import TicketingHandler as DatabaseHandler
from logbook.ticketing import BackendBase
if PY2:
from urllib import urlencode
else:
from urllib.parse import urlencode
_ws_re = re.compile(r'(\s+)(?u)')
TWITTER_FORMAT_STRING = \
u'[{record.channel}] {record.level_name}: {record.message}'
TWITTER_ACCESS_TOKEN_URL = 'https://twitter.com/oauth/access_token'
NEW_TWEET_URL = 'https://api.twitter.com/1/statuses/update.json'
class CouchDBBackend(BackendBase):
"""Implements a backend that writes into a CouchDB database.
"""
def setup_backend(self):
from couchdb import Server
uri = self.options.pop('uri', u'')
couch = Server(uri)
db_name = self.options.pop('db')
self.database = couch[db_name]
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket.
"""
db = self.database
ticket = record.to_dict()
ticket["time"] = ticket["time"].isoformat() + "Z"
ticket_id, _ = db.save(ticket)
db.save(ticket)
class TwitterFormatter(StringFormatter):
"""Works like the standard string formatter and is used by the
:class:`TwitterHandler` unless changed.
"""
max_length = 140
def format_exception(self, record):
return u'%s: %s' % (record.exception_shortname,
record.exception_message)
def __call__(self, record, handler):
formatted = StringFormatter.__call__(self, record, handler)
rv = []
length = 0
for piece in _ws_re.split(formatted):
length += len(piece)
if length > self.max_length:
if length - len(piece) < self.max_length:
rv.append(u'…')
break
rv.append(piece)
return u''.join(rv)
class TaggingLogger(RecordDispatcher):
"""A logger that attaches a tag to each record. This is an alternative
record dispatcher that does not use levels but tags to keep log
records apart. It is constructed with a descriptive name and at least
one tag. The tags are up for you to define::
logger = TaggingLogger('My Logger', ['info', 'warning'])
For each tag defined that way, a method appears on the logger with
that name::
logger.info('This is a info message')
To dispatch to different handlers based on tags you can use the
:class:`TaggingHandler`.
The tags themselves are stored as list named ``'tags'`` in the
:attr:`~logbook.LogRecord.extra` dictionary.
"""
def __init__(self, name=None, tags=None):
RecordDispatcher.__init__(self, name)
# create a method for each tag named
list(setattr(self, tag, lambda msg, *args, **kwargs:
self.log(tag, msg, *args, **kwargs)) for tag in (tags or ()))
def log(self, tags, msg, *args, **kwargs):
if isinstance(tags, string_types):
tags = [tags]
exc_info = kwargs.pop('exc_info', None)
extra = kwargs.pop('extra', {})
extra['tags'] = list(tags)
return self.make_record_and_handle(NOTSET, msg, args, kwargs,
exc_info, extra)
class TaggingHandler(Handler):
"""A handler that logs for tags and dispatches based on those.
Example::
import logbook
from logbook.more import TaggingHandler
handler = TaggingHandler(dict(
info=OneHandler(),
warning=AnotherHandler()
))
"""
def __init__(self, handlers, filter=None, bubble=False):
Handler.__init__(self, NOTSET, filter, bubble)
assert isinstance(handlers, dict)
self._handlers = dict(
(tag, isinstance(handler, Handler) and [handler] or handler)
for (tag, handler) in iteritems(handlers))
def emit(self, record):
for tag in record.extra.get('tags', ()):
for handler in self._handlers.get(tag, ()):
handler.handle(record)
class TwitterHandler(Handler, StringFormatterHandlerMixin):
"""A handler that logs to twitter. Requires that you sign up an
application on twitter and request xauth support. Furthermore the
oauth2 library has to be installed.
If you don't want to register your own application and request xauth
credentials, there are a couple of leaked consumer key and secret
pairs from application explicitly whitelisted at Twitter
(`leaked secrets <http://bit.ly/leaked-secrets>`_).
"""
default_format_string = TWITTER_FORMAT_STRING
formatter_class = TwitterFormatter
def __init__(self, consumer_key, consumer_secret, username,
password, level=NOTSET, format_string=None, filter=None,
bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.username = username
self.password = password
try:
import oauth2
except ImportError:
raise RuntimeError('The python-oauth2 library is required for '
'the TwitterHandler.')
self._oauth = oauth2
self._oauth_token = None
self._oauth_token_secret = None
self._consumer = oauth2.Consumer(consumer_key,
consumer_secret)
self._client = oauth2.Client(self._consumer)
def get_oauth_token(self):
"""Returns the oauth access token."""
if self._oauth_token is None:
resp, content = self._client.request(
TWITTER_ACCESS_TOKEN_URL + '?', 'POST',
body=urlencode({
'x_auth_username': self.username.encode('utf-8'),
'x_auth_password': self.password.encode('utf-8'),
'x_auth_mode': 'client_auth'
}),
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
if resp['status'] != '200':
raise RuntimeError('unable to login to Twitter')
data = dict(parse_qsl(content))
self._oauth_token = data['oauth_token']
self._oauth_token_secret = data['oauth_token_secret']
return self._oauth.Token(self._oauth_token,
self._oauth_token_secret)
def make_client(self):
"""Creates a new oauth client auth a new access token."""
return self._oauth.Client(self._consumer, self.get_oauth_token())
def tweet(self, status):
"""Tweets a given status. Status must not exceed 140 chars."""
client = self.make_client()
resp, content = client.request(NEW_TWEET_URL, 'POST',
body=urlencode({'status': status.encode('utf-8')}),
headers={'Content-Type': 'application/x-www-form-urlencoded'})
return resp['status'] == '200'
def emit(self, record):
self.tweet(self.format(record))
class JinjaFormatter(object):
"""A formatter object that makes it easy to format using a Jinja 2
template instead of a format string.
"""
def __init__(self, template):
try:
from jinja2 import Template
except ImportError:
raise RuntimeError('The jinja2 library is required for '
'the JinjaFormatter.')
self.template = Template(template)
def __call__(self, record, handler):
return self.template.render(record=record, handler=handler)
class ExternalApplicationHandler(Handler):
"""This handler invokes an external application to send parts of
the log record to. The constructor takes a list of arguments that
are passed to another application where each of the arguments is a
format string, and optionally a format string for data that is
passed to stdin.
For example it can be used to invoke the ``say`` command on OS X::
from logbook.more import ExternalApplicationHandler
say_handler = ExternalApplicationHandler(['say', '{record.message}'])
Note that the above example is blocking until ``say`` finished, so it's
recommended to combine this handler with the
:class:`logbook.ThreadedWrapperHandler` to move the execution into
a background thread.
.. versionadded:: 0.3
"""
def __init__(self, arguments, stdin_format=None,
encoding='utf-8', level=NOTSET, filter=None,
bubble=False):
Handler.__init__(self, level, filter, bubble)
self.encoding = encoding
self._arguments = list(arguments)
if stdin_format is not None:
stdin_format = stdin_format
self._stdin_format = stdin_format
import subprocess
self._subprocess = subprocess
def emit(self, record):
args = [arg.format(record=record).encode(self.encoding)
for arg in self._arguments]
if self._stdin_format is not None:
stdin_data = self._stdin_format.format(record=record) \
.encode(self.encoding)
stdin = self._subprocess.PIPE
else:
stdin = None
c = self._subprocess.Popen(args, stdin=stdin)
if stdin is not None:
c.communicate(stdin_data)
c.wait()
class ColorizingStreamHandlerMixin(object):
"""A mixin class that does colorizing.
.. versionadded:: 0.3
"""
def should_colorize(self, record):
"""Returns `True` if colorizing should be applied to this
record. The default implementation returns `True` if the
stream is a tty and we are not executing on windows.
"""
if os.name == 'nt':
return False
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def get_color(self, record):
"""Returns the color for this record."""
if record.level >= ERROR:
return 'red'
elif record.level >= NOTICE:
return 'yellow'
return 'lightgray'
def format_and_encode(self, record):
rv = super(ColorizingStreamHandlerMixin, self) \
.format_and_encode(record)
if self.should_colorize(record):
color = self.get_color(record)
if color:
rv = colorize(color, rv)
return rv
class ColorizedStderrHandler(ColorizingStreamHandlerMixin, StderrHandler):
"""A colorizing stream handler that writes to stderr. It will only
colorize if a terminal was detected. Note that this handler does
not colorize on Windows systems.
.. versionadded:: 0.3
"""
# backwards compat. Should go away in some future releases
from logbook.handlers import FingersCrossedHandler as \
FingersCrossedHandlerBase
class FingersCrossedHandler(FingersCrossedHandlerBase):
def __init__(self, *args, **kwargs):
FingersCrossedHandlerBase.__init__(self, *args, **kwargs)
from warnings import warn
warn(PendingDeprecationWarning('fingers crossed handler changed '
'location. It\'s now a core component of Logbook.'))
class ExceptionHandler(Handler, StringFormatterHandlerMixin):
"""An exception handler which raises exceptions of the given `exc_type`.
This is especially useful if you set a specific error `level` e.g. to treat
warnings as exceptions::
from logbook.more import ExceptionHandler
class ApplicationWarning(Exception):
pass
exc_handler = ExceptionHandler(ApplicationWarning, level='WARNING')
.. versionadded:: 0.3
"""
def __init__(self, exc_type, level=NOTSET, format_string=None,
filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
StringFormatterHandlerMixin.__init__(self, format_string)
self.exc_type = exc_type
def handle(self, record):
if self.should_handle(record):
raise self.exc_type(self.format(record))
return False
class DedupHandler(Handler):
"""A handler that deduplicates log messages.
It emits each unique log record once, along with the number of times it was emitted.
Example:::
with logbook.more.DedupHandler():
logbook.error('foo')
logbook.error('bar')
logbook.error('foo')
The expected output:::
message repeated 2 times: foo
message repeated 1 times: bar
"""
def __init__(self, format_string='message repeated {count} times: {message}', *args, **kwargs):
Handler.__init__(self, bubble=False, *args, **kwargs)
self._format_string = format_string
self.clear()
def clear(self):
self._message_to_count = defaultdict(int)
self._unique_ordered_records = []
def pop_application(self):
Handler.pop_application(self)
self.flush()
def pop_thread(self):
Handler.pop_thread(self)
self.flush()
def handle(self, record):
if not record.message in self._message_to_count:
self._unique_ordered_records.append(record)
self._message_to_count[record.message] += 1
return True
def flush(self):
for record in self._unique_ordered_records:
record.message = self._format_string.format(message=record.message, count=self._message_to_count[record.message])
# record.dispatcher is the logger who created the message, it's sometimes supressed (by logbook.info for example)
dispatch = record.dispatcher.call_handlers if record.dispatcher is not None else dispatch_record
dispatch(record)
self.clear()
| bsd-3-clause | 8,318,399,578,049,945,000 | 34.272059 | 125 | 0.621152 | false |
Lysxia/dissemin | dissemin/settings/common.py | 1 | 9232 | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
"""
Django settings for dissemin project.
See the doc for details of usage:
http://dissemin.readthedocs.org/en/latest/install.html
For the full list of Django settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from datetime import timedelta
from django.utils.translation import ugettext_lazy as _
try:
from .secret import SECRET_KEY, DATABASES, EMAIL_HOST, EMAIL_HOST_USER, EMAIL_HOST_PASSWORD, EMAIL_USE_TLS, ROMEO_API_KEY, CORE_API_KEY, REDIS_HOST, REDIS_PORT, REDIS_DB, REDIS_PASSWORD
except ImportError as e:
raise RuntimeError('Secret file is missing, did you forget to add a secret.py in your settings folder?')
try:
from .university import UNIVERSITY_BRANDING, CAS_SERVER_URL, CAS_LOGOUT_COMPLETELY, CAS_PROVIDE_URL_TO_LOGOUT, ENABLE_CAS
except ImportError as e:
raise RuntimeError('University-specific file is missing, did you forget to add a university.py in your settings folder?')
# dirname(__file__) → repo/dissemin/settings/common.py
# .. → repo/dissemin/settings
# .. → repo/dissemin
# .. → repo/
BASE_DIR = os.path.dirname(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
### DOI proxy ###
# The interface where to get DOI metadata from.
#
# This interface should at least support fetching metadata for one
# single DOI, like this:
# curl -LH "Accept: application/citeproc+json" http://DOI_PROXY_DOMAIN/10.1080/15568318.2012.660115
# (returns the citation as Citeproc+JSON)
#
DOI_PROXY_DOMAIN = 'doi-cache.dissem.in' # This acts as a caching proxy for dx.doi.org
#
# In addition, if the endpoint supports it, batch requests can be performed:
# curl -d 'dois=["10.1016/j.physletb.2015.01.010","10.5380/dp.v1i1.1922","10.1007/978-3-319-10936-7_9"]' \\
# http://doi-cache.ulminfo.fr/batch
# (returns a list of citation in Citeproc+JSON format)
#
DOI_PROXY_SUPPORTS_BATCH = True
# Uncomment these settings if you rather want
# to fetch metadata directly from CrossRef (slower as not cached,
# and more requests as there is no batch support).
#DOI_PROXY_DOMAIN = 'dx.doi.org'
#DOI_PROXY_SUPPORTS_BATCH = False
### RoMEO proxy ###
# Set this to 'sherpa.ac.uk' if our custom mirror is not up anymore.
# Otherwise our proxy caches results and is more reliable than the original endpoint.
ROMEO_API_DOMAIN = 'romeo-cache.dissem.in'
### Paper deposits ###
# Max size of the PDFs (in bytes)
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
DEPOSIT_MAX_FILE_SIZE = 1024*1024*20 # 20 MB
# Max download time when the file is downloaded from an URL (in seconds)
URL_DEPOSIT_DOWNLOAD_TIMEOUT = 10
### Paper freshness options ###
# On login of an user, minimum time between the last harvest to trigger
# a new harvest for that user.
PROFILE_REFRESH_ON_LOGIN = timedelta(days=1)
### Application definition ###
# You should not have to change anything in this section.
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.orcid',
'statistics',
'publishers',
'papers',
'upload',
'deposit',
'deposit.zenodo',
'deposit.hal',
'deposit.sword',
'notification',
'bootstrap_pagination',
'django_js_reverse',
'solo',
'rest_framework',
'rest_framework_swagger',
'haystack',
'widget_tweaks',
'capture_tag',
)
CRISPY_TEMPLATE_PACK = 'bootstrap'
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
if ENABLE_CAS:
MIDDLEWARE_CLASSES += (
'django_cas_ng.middleware.CASMiddleware',
)
AUTHENTICATION_BACKENDS += (
'django_cas_ng.backends.CASBackend',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'OPTIONS': {
'loaders': (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
),
'context_processors': (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request"
),
'debug': True
}
}
]
ROOT_URLCONF = 'dissemin.urls'
WSGI_APPLICATION = 'dissemin.wsgi.application'
### Static files (CSS, JavaScript, Images) ###
# This defines how static files are stored and accessed.
# https://docs.djangoproject.com/en/1.8/howto/static-files/
#
# Relative URL where static files are accessed (you don't have to change this).
STATIC_URL = '/static/'
# Relative URL where user uploads are accessed (you don't have to change this).
MEDIA_URL = '/media/'
### Celery config ###
# Celery runs asynchronous tasks such as metadata harvesting or
# complex updates.
# To communicate with it, we need a "broker".
# This is an example broker with Redis
# (with settings configured in your secret.py)
REDIS_URL = ':%s@%s:%s/%d' % (
REDIS_PASSWORD,
REDIS_HOST,
REDIS_PORT,
REDIS_DB)
BROKER_URL = 'redis://'+REDIS_URL
# We also use Redis as result backend.
CELERY_RESULT_BACKEND = BROKER_URL
# Redis is not mandatory, this client is reserved for deposits.
try:
import redis
redis_client = redis.StrictRedis(
host=REDIS_HOST, port=REDIS_PORT,
db=REDIS_DB, password=REDIS_PASSWORD)
except ImportError:
pass
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
CELERY_IMPORTS = ['backend.tasks']
CELERYBEAT_SCHEDULE = {
'update_all_stats_but_researchers': {
'task': 'update_all_stats_but_researchers',
'schedule': timedelta(minutes=30),
},
'update_journal_stats': {
'task':'update_journal_stats',
'schedule': timedelta(days=1),
},
'remove_empty_profiles': {
'task': 'remove_empty_profiles',
'schedule': timedelta(hours=2),
},
}
# This is the time in seconds before an unacknowledged task is re-sent to
# another worker. It should exceed the length of the longest task, otherwise
# it will be executed twice ! 43200 is one day.
BROKER_TRANSPORT_OPTIONS = {'visibility_timeout': 43200}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
POSSIBLE_LANGUAGE_CODES = ['en','fr']
LANGUAGES = [
('en', _('English')),
('fr', _('French')),
]
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = (os.path.join(BASE_DIR, 'locale'),)
# Login and athentication
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
# Settings for our own API
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
'PAGE_SIZE': 10,
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
}
# Custom backend for haystack with Elasticsearch
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'search.SearchEngine',
'URL': 'http://localhost:9200/',
'INDEX_NAME': 'dissemin',
},
}
| agpl-3.0 | 6,541,271,561,845,858,000 | 31.027778 | 189 | 0.671509 | false |
ressu/SickGear | lib/shove/store/zodb.py | 3 | 1202 | # -*- coding: utf-8 -*-
'''
Zope Object Database store frontend.
shove's psuedo-URL for ZODB stores follows the form:
zodb:<path>
Where the path is a URL path to a ZODB FileStorage database. Alternatively, a
native pathname to a ZODB database can be passed as the 'engine' argument.
'''
try:
import transaction
from ZODB import FileStorage, DB
except ImportError:
raise ImportError('Requires ZODB library')
from shove.store import SyncStore
class ZodbStore(SyncStore):
'''ZODB store front end.'''
init = 'zodb://'
def __init__(self, engine, **kw):
super(ZodbStore, self).__init__(engine, **kw)
# Handle psuedo-URL
self._storage = FileStorage.FileStorage(self._engine)
self._db = DB(self._storage)
self._connection = self._db.open()
self._store = self._connection.root()
# Keeps DB in synch through commits of transactions
self.sync = transaction.commit
def close(self):
'''Closes all open storage and connections.'''
self.sync()
super(ZodbStore, self).close()
self._connection.close()
self._db.close()
self._storage.close()
__all__ = ['ZodbStore']
| gpl-3.0 | -6,133,945,084,229,217,000 | 24.041667 | 77 | 0.638103 | false |
elitest/mitmproxy | test/test_cmdline.py | 16 | 3441 | import argparse
from libmproxy import cmdline
import tutils
def test_parse_replace_hook():
x = cmdline.parse_replace_hook("/foo/bar/voing")
assert x == ("foo", "bar", "voing")
x = cmdline.parse_replace_hook("/foo/bar/vo/ing/")
assert x == ("foo", "bar", "vo/ing/")
x = cmdline.parse_replace_hook("/bar/voing")
assert x == (".*", "bar", "voing")
tutils.raises(
cmdline.ParseException,
cmdline.parse_replace_hook,
"/foo"
)
tutils.raises(
"replacement regex",
cmdline.parse_replace_hook,
"patt/[/rep"
)
tutils.raises(
"filter pattern",
cmdline.parse_replace_hook,
"/~/foo/rep"
)
tutils.raises(
"empty clause",
cmdline.parse_replace_hook,
"//"
)
def test_parse_server_spec():
tutils.raises("Invalid server specification", cmdline.parse_server_spec, "")
assert cmdline.parse_server_spec(
"http://foo.com:88") == [False, False, "foo.com", 88]
assert cmdline.parse_server_spec(
"http://foo.com") == [False, False, "foo.com", 80]
assert cmdline.parse_server_spec(
"https://foo.com") == [True, True, "foo.com", 443]
assert cmdline.parse_server_spec_special(
"https2http://foo.com") == [True, False, "foo.com", 80]
assert cmdline.parse_server_spec_special(
"http2https://foo.com") == [False, True, "foo.com", 443]
tutils.raises(
"Invalid server specification",
cmdline.parse_server_spec,
"foo.com")
tutils.raises(
"Invalid server specification",
cmdline.parse_server_spec,
"http://")
def test_parse_setheaders():
x = cmdline.parse_setheader("/foo/bar/voing")
assert x == ("foo", "bar", "voing")
def test_common():
parser = argparse.ArgumentParser()
cmdline.common_options(parser)
opts = parser.parse_args(args=[])
assert cmdline.get_common_options(opts)
opts.stickycookie_filt = "foo"
opts.stickyauth_filt = "foo"
v = cmdline.get_common_options(opts)
assert v["stickycookie"] == "foo"
assert v["stickyauth"] == "foo"
opts.setheader = ["/foo/bar/voing"]
v = cmdline.get_common_options(opts)
assert v["setheaders"] == [("foo", "bar", "voing")]
opts.setheader = ["//"]
tutils.raises(
"empty clause",
cmdline.get_common_options,
opts
)
opts.setheader = []
opts.replace = ["/foo/bar/voing"]
v = cmdline.get_common_options(opts)
assert v["replacements"] == [("foo", "bar", "voing")]
opts.replace = ["//"]
tutils.raises(
"empty clause",
cmdline.get_common_options,
opts
)
opts.replace = []
opts.replace_file = [("/foo/bar/nonexistent")]
tutils.raises(
"could not read replace file",
cmdline.get_common_options,
opts
)
opts.replace_file = [("/~/bar/nonexistent")]
tutils.raises(
"filter pattern",
cmdline.get_common_options,
opts
)
p = tutils.test_data.path("data/replace")
opts.replace_file = [("/foo/bar/%s" % p)]
v = cmdline.get_common_options(opts)["replacements"]
assert len(v) == 1
assert v[0][2].strip() == "replacecontents"
def test_mitmproxy():
ap = cmdline.mitmproxy()
assert ap
def test_mitmdump():
ap = cmdline.mitmdump()
assert ap
def test_mitmweb():
ap = cmdline.mitmweb()
assert ap
| mit | 4,864,075,088,720,568,000 | 24.488889 | 80 | 0.584133 | false |
jneves/awsfabrictasks | awsfabrictasks/hostslist.py | 2 | 1454 | from awsfabrictasks.utils import sudo_upload_string_to_file
hostsfile_template = """
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
{custom_hosts}
"""
class Host(object):
def __init__(self, hostname, ip, suffix=''):
self.hostname = hostname
self.ip = ip
self.suffix = suffix
def __str__(self):
return '{ip} {hostname}{suffix}'.format(**self.__dict__)
class HostsList(list):
def __str__(self):
return '\n'.join(str(host) for host in self)
def create_hostslist_from_ec2instancewrappers(instancewrappers):
hostslist = HostsList()
for instancewrapper in instancewrappers:
if not instancewrapper.is_running():
raise ValueError('EC2 instance "{0}" is not RUNNING.'.format(instancewrapper))
ip = instancewrapper.instance.private_ip_address
role = instancewrapper.instance.tags['hostname']
hostslist.append(Host(hostname=role, ip=ip, suffix='.ec2'))
return hostslist
def create_hostsfile_from_ec2instancewrappers(instancewrappers):
hostslist = create_hostslist_from_ec2instancewrappers(instancewrappers)
return hostsfile_template.format(custom_hosts=hostslist)
def upload_hostsfile(hostsfile_string):
sudo_upload_string_to_file(hostsfile_string, '/etc/hosts')
| bsd-3-clause | -861,856,165,845,763,600 | 31.311111 | 90 | 0.707015 | false |
omarocegueda/dipy | dipy/align/imwarp.py | 3 | 62384 | """ Classes and functions for Symmetric Diffeomorphic Registration """
from __future__ import print_function
import abc
from dipy.utils.six import with_metaclass
import numpy as np
import numpy.linalg as npl
import scipy as sp
import nibabel as nib
from dipy.align import vector_fields as vfu
from dipy.align import floating
from dipy.align import VerbosityLevels
from dipy.align import Bunch
from dipy.align.scalespace import ScaleSpace
RegistrationStages = Bunch(INIT_START=0,
INIT_END=1,
OPT_START=2,
OPT_END=3,
SCALE_START=4,
SCALE_END=5,
ITER_START=6,
ITER_END=7)
r"""Registration Stages
This enum defines the different stages which the Volumetric Registration
may be in. The value of the stage is passed as a parameter to the call-back
function so that it can react accordingly.
INIT_START: optimizer initialization starts
INIT_END: optimizer initialization ends
OPT_START: optimization starts
OPT_END: optimization ends
SCALE_START: optimization at a new scale space resolution starts
SCALE_END: optimization at the current scale space resolution ends
ITER_START: a new iteration starts
ITER_END: the current iteration ends
"""
def mult_aff(A, B):
r"""Returns the matrix product A.dot(B) considering None as the identity
Parameters
----------
A : array, shape (n,k)
B : array, shape (k,m)
Returns
-------
The matrix product A.dot(B). If any of the input matrices is None, it is
treated as the identity matrix. If both matrices are None, None is returned
"""
if A is None:
return B
elif B is None:
return A
return A.dot(B)
def get_direction_and_spacings(affine, dim):
r"""Extracts the rotational and spacing components from a matrix
Extracts the rotational and spacing (voxel dimensions) components from a
matrix. An image gradient represents the local variation of the image's
gray values per voxel. Since we are iterating on the physical space, we
need to compute the gradients as variation per millimeter, so we need to
divide each gradient's component by the voxel size along the corresponding
axis, that's what the spacings are used for. Since the image's gradients
are oriented along the grid axes, we also need to re-orient the gradients
to be given in physical space coordinates.
Parameters
----------
affine : array, shape (k, k), k = 3, 4
the matrix transforming grid coordinates to physical space.
Returns
-------
direction : array, shape (k-1, k-1)
the rotational component of the input matrix
spacings : array, shape (k-1,)
the scaling component (voxel size) of the matrix
"""
if affine is None:
return np.eye(dim), np.ones(dim)
dim = affine.shape[1]-1
# Temporary hack: get the zooms by building a nifti image
affine4x4 = np.eye(4)
empty_volume = np.zeros((0, 0, 0))
affine4x4[:dim, :dim] = affine[:dim, :dim]
affine4x4[:dim, 3] = affine[:dim, dim-1]
nib_nifti = nib.Nifti1Image(empty_volume, affine4x4)
scalings = np.asarray(nib_nifti.header.get_zooms())
scalings = np.asarray(scalings[:dim], dtype=np.float64)
A = affine[:dim, :dim]
return A.dot(np.diag(1.0/scalings)), scalings
class DiffeomorphicMap(object):
def __init__(self,
dim,
disp_shape,
disp_grid2world=None,
domain_shape=None,
domain_grid2world=None,
codomain_shape=None,
codomain_grid2world=None,
prealign=None):
r""" DiffeomorphicMap
Implements a diffeomorphic transformation on the physical space. The
deformation fields encoding the direct and inverse transformations
share the same domain discretization (both the discretization grid
shape and voxel-to-space matrix). The input coordinates (physical
coordinates) are first aligned using prealign, and then displaced
using the corresponding vector field interpolated at the aligned
coordinates.
Parameters
----------
dim : int, 2 or 3
the transformation's dimension
disp_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the deformation
field's discretization
disp_grid2world : the voxel-to-space transform between the def. fields
grid and space
domain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the default
discretizatio of this map's domain
domain_grid2world : array, shape (dim+1, dim+1)
the default voxel-to-space transformation between this map's
discretization and physical space
codomain_shape : array, shape (dim,)
the number of slices (if 3D), rows and columns of the images that
are 'normally' warped using this transformation in the forward
direction (this will provide default transformation parameters to
warp images under this transformation). By default, we assume that
the inverse transformation is 'normally' used to warp images with
the same discretization and voxel-to-space transformation as the
deformation field grid.
codomain_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of images that are 'normally'
warped using this transformation (in the forward direction).
prealign : array, shape (dim+1, dim+1)
the linear transformation to be applied to align input images to
the reference space before warping under the deformation field.
"""
self.dim = dim
if(disp_shape is None):
raise ValueError("Invalid displacement field discretization")
self.disp_shape = np.asarray(disp_shape, dtype=np.int32)
# If the discretization affine is None, we assume it's the identity
self.disp_grid2world = disp_grid2world
if(self.disp_grid2world is None):
self.disp_world2grid = None
else:
self.disp_world2grid = npl.inv(self.disp_grid2world)
# If domain_shape isn't provided, we use the map's discretization shape
if(domain_shape is None):
self.domain_shape = self.disp_shape
else:
self.domain_shape = np.asarray(domain_shape, dtype=np.int32)
self.domain_grid2world = domain_grid2world
if(domain_grid2world is None):
self.domain_world2grid = None
else:
self.domain_world2grid = npl.inv(domain_grid2world)
# If codomain shape was not provided, we assume it is an endomorphism:
# use the same domain_shape and codomain_grid2world as the field domain
if codomain_shape is None:
self.codomain_shape = self.domain_shape
else:
self.codomain_shape = np.asarray(codomain_shape, dtype=np.int32)
self.codomain_grid2world = codomain_grid2world
if codomain_grid2world is None:
self.codomain_world2grid = None
else:
self.codomain_world2grid = npl.inv(codomain_grid2world)
self.prealign = prealign
if prealign is None:
self.prealign_inv = None
else:
self.prealign_inv = npl.inv(prealign)
self.is_inverse = False
self.forward = None
self.backward = None
def interpret_matrix(self, obj):
''' Try to interpret `obj` as a matrix
Some operations are performed faster if we know in advance if a matrix
is the identity (so we can skip the actual matrix-vector
multiplication). This function returns None if the given object
is None or the 'identity' string. It returns the same object if it is
a numpy array. It raises an exception otherwise.
Parameters
----------
obj : object
any object
Returns
----------
obj : object
the same object given as argument if `obj` is None or a numpy
array. None if `obj` is the 'identity' string.
'''
if (obj is None) or isinstance(obj, np.ndarray):
return obj
if isinstance(obj, str) and (obj == 'identity'):
return None
raise ValueError('Invalid matrix')
def get_forward_field(self):
r"""Deformation field to transform an image in the forward direction
Returns the deformation field that must be used to warp an image under
this transformation in the forward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.backward
else:
return self.forward
def get_backward_field(self):
r"""Deformation field to transform an image in the backward direction
Returns the deformation field that must be used to warp an image under
this transformation in the backward direction (note the 'is_inverse'
flag).
"""
if self.is_inverse:
return self.forward
else:
return self.backward
def allocate(self):
r"""Creates a zero displacement field
Creates a zero displacement field (the identity transformation).
"""
self.forward = np.zeros(tuple(self.disp_shape) + (self.dim,),
dtype=floating)
self.backward = np.zeros(tuple(self.disp_shape) + (self.dim,),
dtype=floating)
def _get_warping_function(self, interpolation):
r"""Appropriate warping function for the given interpolation type
Returns the right warping function from vector_fields that must be
called for the specified data dimension and interpolation type
"""
if self.dim == 2:
if interpolation == 'linear':
return vfu.warp_2d
else:
return vfu.warp_2d_nn
else:
if interpolation == 'linear':
return vfu.warp_3d
else:
return vfu.warp_3d_nn
def _warp_forward(self, image, interpolation='linear',
image_world2grid=None, out_shape=None,
out_grid2world=None):
r"""Warps an image in the forward direction
Deforms the input image under this diffeomorphic map in the forward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'forward' is
precisely mapping coordinates i from the input image to coordinates j
from reference image, which has the effect of warping an image with
reference discretization (typically, the "static image") "towards" an
image with input discretization (typically, the "moving image"). More
precisely, the warped image is produced by the following interpolation:
warped[i] = image[W * forward[Dinv * P * S * i] + W * P * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, P is the pre-aligning matrix (transforming input
points to reference points), S is the voxel-to-space transformation of
the sampling grid (see comment below) and forward is the forward
deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'out_shape' ) to space coordinates.
"""
# if no world-to-image transform is provided, we use the codomain info
if image_world2grid is None:
image_world2grid = self.codomain_world2grid
# if no sampling info is provided, we use the domain info
if out_shape is None:
if self.domain_shape is None:
raise ValueError('Unable to infer sampling info. '
'Provide a valid out_shape.')
out_shape = self.domain_shape
else:
out_shape = np.asarray(out_shape, dtype=np.int32)
if out_grid2world is None:
out_grid2world = self.domain_grid2world
W = self.interpret_matrix(image_world2grid)
Dinv = self.disp_world2grid
P = self.prealign
S = self.interpret_matrix(out_grid2world)
# this is the matrix which we need to multiply the voxel coordinates
# to interpolate on the forward displacement field ("in"side the
# 'forward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, mult_aff(P, S))
# this is the matrix which we need to multiply the voxel coordinates
# to add to the displacement ("out"side the 'forward' brackets in the
# expression above)
affine_idx_out = mult_aff(W, mult_aff(P, S))
# this is the matrix which we need to multiply the displacement vector
# prior to adding to the transformed input point
affine_disp = W
# Convert the data to required types to use the cythonized functions
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.forward, affine_idx_in, affine_idx_out,
affine_disp, out_shape)
return warped
def _warp_backward(self, image, interpolation='linear',
image_world2grid=None, out_shape=None,
out_grid2world=None):
r"""Warps an image in the backward direction
Deforms the input image under this diffeomorphic map in the backward
direction. Since the mapping is defined in the physical space, the user
must specify the sampling grid shape and its space-to-voxel mapping.
By default, the transformation will use the discretization information
given at initialization.
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the backward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.domain_shape if None
the warped image under this transformation in the backward
direction
Notes
-----
A diffeomorphic map must be thought as a mapping between points
in space. Warping an image J towards an image I means transforming
each voxel with (discrete) coordinates i in I to (floating-point) voxel
coordinates j in J. The transformation we consider 'backward' is
precisely mapping coordinates i from the reference grid to coordinates
j from the input image (that's why it's "backward"), which has the
effect of warping the input image (moving) "towards" the reference.
More precisely, the warped image is produced by the following
interpolation:
warped[i]=image[W * Pinv * backward[Dinv * S * i] + W * Pinv * S * i )]
where i denotes the coordinates of a voxel in the input grid, W is
the world-to-grid transformation of the image given as input, Dinv
is the world-to-grid transformation of the deformation field
discretization, Pinv is the pre-aligning matrix's inverse (transforming
reference points to input points), S is the grid-to-space
transformation of the sampling grid (see comment below) and backward is
the backward deformation field.
If we want to warp an image, we also must specify on what grid we
want to sample the resulting warped image (the images are considered as
points in space and its representation on a grid depends on its
grid-to-space transform telling us for each grid voxel what point in
space we need to bring via interpolation). So, S is the matrix that
converts the sampling grid (whose shape is given as parameter
'out_shape' ) to space coordinates.
"""
# if no world-to-image transform is provided, we use the domain info
if image_world2grid is None:
image_world2grid = self.domain_world2grid
# if no sampling info is provided, we use the codomain info
if out_shape is None:
if self.codomain_shape is None:
msg = 'Unknown sampling info. Provide a valid out_shape.'
raise ValueError(msg)
out_shape = self.codomain_shape
if out_grid2world is None:
out_grid2world = self.codomain_grid2world
W = self.interpret_matrix(image_world2grid)
Dinv = self.disp_world2grid
Pinv = self.prealign_inv
S = self.interpret_matrix(out_grid2world)
# this is the matrix which we need to multiply the voxel coordinates
# to interpolate on the backward displacement field ("in"side the
# 'backward' brackets in the expression above)
affine_idx_in = mult_aff(Dinv, S)
# this is the matrix which we need to multiply the voxel coordinates
# to add to the displacement ("out"side the 'backward' brackets in the
# expression above)
affine_idx_out = mult_aff(W, mult_aff(Pinv, S))
# this is the matrix which we need to multiply the displacement vector
# prior to adding to the transformed input point
affine_disp = mult_aff(W, Pinv)
if interpolation == 'nearest':
if image.dtype is np.dtype('float64') and floating is np.float32:
image = image.astype(floating)
elif image.dtype is np.dtype('int64'):
image = image.astype(np.int32)
else:
image = np.asarray(image, dtype=floating)
warp_f = self._get_warping_function(interpolation)
warped = warp_f(image, self.backward, affine_idx_in, affine_idx_out,
affine_disp, out_shape)
return warped
def transform(self, image, interpolation='linear', image_world2grid=None,
out_shape=None, out_grid2world=None):
r"""Warps an image in the forward direction
Transforms the input image under this transformation in the forward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform(...) warps the
image forwards, else it warps the image backwards).
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
the warped image under this transformation in the forward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if out_shape is not None:
out_shape = np.asarray(out_shape, dtype=np.int32)
if self.is_inverse:
warped = self._warp_backward(image, interpolation,
image_world2grid, out_shape,
out_grid2world)
else:
warped = self._warp_forward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
return np.asarray(warped)
def transform_inverse(self, image, interpolation='linear',
image_world2grid=None, out_shape=None,
out_grid2world=None):
r"""Warps an image in the backward direction
Transforms the input image under this transformation in the backward
direction. It uses the "is_inverse" flag to switch between "forward"
and "backward" (if is_inverse is False, then transform_inverse(...)
warps the image backwards, else it warps the image forwards)
Parameters
----------
image : array, shape (s, r, c) if dim = 3 or (r, c) if dim = 2
the image to be warped under this transformation in the forward
direction
interpolation : string, either 'linear' or 'nearest'
the type of interpolation to be used for warping, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_world2grid : array, shape (dim+1, dim+1)
the transformation bringing world (space) coordinates to voxel
coordinates of the image given as input
out_shape : array, shape (dim,)
the number of slices, rows and columns of the desired warped image
out_grid2world : the transformation bringing voxel coordinates of the
warped image to physical space
Returns
-------
warped : array, shape = out_shape or self.codomain_shape if None
warped image under this transformation in the backward direction
Notes
-----
See _warp_forward and _warp_backward documentation for further
information.
"""
if self.is_inverse:
warped = self._warp_forward(image, interpolation, image_world2grid,
out_shape, out_grid2world)
else:
warped = self._warp_backward(image, interpolation,
image_world2grid, out_shape,
out_grid2world)
return np.asarray(warped)
def inverse(self):
r"""Inverse of this DiffeomorphicMap instance
Returns a diffeomorphic map object representing the inverse of this
transformation. The internal arrays are not copied but just referenced.
Returns
-------
inv : DiffeomorphicMap object
the inverse of this diffeomorphic map.
"""
inv = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_grid2world,
self.domain_shape,
self.domain_grid2world,
self.codomain_shape,
self.codomain_grid2world,
self.prealign)
inv.forward = self.forward
inv.backward = self.backward
inv.is_inverse = True
return inv
def expand_fields(self, expand_factors, new_shape):
r"""Expands the displacement fields from current shape to new_shape
Up-samples the discretization of the displacement fields to be of
new_shape shape.
Parameters
----------
expand_factors : array, shape (dim,)
the factors scaling current spacings (voxel sizes) to spacings in
the expanded discretization.
new_shape : array, shape (dim,)
the shape of the arrays holding the up-sampled discretization
"""
if self.dim == 2:
expand_f = vfu.resample_displacement_field_2d
else:
expand_f = vfu.resample_displacement_field_3d
expanded_forward = expand_f(self.forward, expand_factors, new_shape)
expanded_backward = expand_f(self.backward, expand_factors, new_shape)
expand_factors = np.append(expand_factors, [1])
expanded_grid2world = mult_aff(self.disp_grid2world,
np.diag(expand_factors))
expanded_world2grid = npl.inv(expanded_grid2world)
self.forward = expanded_forward
self.backward = expanded_backward
self.disp_shape = new_shape
self.disp_grid2world = expanded_grid2world
self.disp_world2grid = expanded_world2grid
def compute_inversion_error(self):
r"""Inversion error of the displacement fields
Estimates the inversion error of the displacement fields by computing
statistics of the residual vectors obtained after composing the forward
and backward displacement fields.
Returns
-------
residual : array, shape (R, C) or (S, R, C)
the displacement field resulting from composing the forward and
backward displacement fields of this transformation (the residual
should be zero for a perfect diffeomorphism)
stats : array, shape (3,)
statistics from the norms of the vectors of the residual
displacement field: maximum, mean and standard deviation
Notes
-----
Since the forward and backward displacement fields have the same
discretization, the final composition is given by
comp[i] = forward[ i + Dinv * backward[i]]
where Dinv is the space-to-grid transformation of the displacement
fields
"""
Dinv = self.disp_world2grid
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
residual, stats = compose_f(self.backward, self.forward,
None, Dinv, 1.0, None)
return np.asarray(residual), np.asarray(stats)
def shallow_copy(self):
r"""Shallow copy of this DiffeomorphicMap instance
Creates a shallow copy of this diffeomorphic map (the arrays are not
copied but just referenced)
Returns
-------
new_map : DiffeomorphicMap object
the shallow copy of this diffeomorphic map
"""
new_map = DiffeomorphicMap(self.dim,
self.disp_shape,
self.disp_grid2world,
self.domain_shape,
self.domain_grid2world,
self.codomain_shape,
self.codomain_grid2world,
self.prealign)
new_map.forward = self.forward
new_map.backward = self.backward
new_map.is_inverse = self.is_inverse
return new_map
def warp_endomorphism(self, phi):
r"""Composition of this DiffeomorphicMap with a given endomorphism
Creates a new DiffeomorphicMap C with the same properties as self and
composes its displacement fields with phi's corresponding fields.
The resulting diffeomorphism is of the form C(x) = phi(self(x)) with
inverse C^{-1}(y) = self^{-1}(phi^{-1}(y)). We assume that phi is an
endomorphism with the same discretization and domain affine as self
to ensure that the composition inherits self's properties (we also
assume that the pre-aligning matrix of phi is None or identity).
Parameters
----------
phi : DiffeomorphicMap object
the endomorphism to be warped by this diffeomorphic map
Returns
-------
composition : the composition of this diffeomorphic map with the
endomorphism given as input
Notes
-----
The problem with our current representation of a DiffeomorphicMap is
that the set of Diffeomorphism that can be represented this way (a
pre-aligning matrix followed by a non-linear endomorphism given as a
displacement field) is not closed under the composition operation.
Supporting a general DiffeomorphicMap class, closed under composition,
may be extremely costly computationally, and the kind of
transformations we actually need for Avants' mid-point algorithm (SyN)
are much simpler.
"""
# Compose the forward deformation fields
d1 = self.get_forward_field()
d2 = phi.get_forward_field()
d1_inv = self.get_backward_field()
d2_inv = phi.get_backward_field()
premult_disp = self.disp_world2grid
if self.dim == 2:
compose_f = vfu.compose_vector_fields_2d
else:
compose_f = vfu.compose_vector_fields_3d
forward, stats = compose_f(d1, d2, None, premult_disp, 1.0, None)
backward, stats, = compose_f(d2_inv, d1_inv, None, premult_disp, 1.0,
None)
composition = self.shallow_copy()
composition.forward = forward
composition.backward = backward
return composition
def get_simplified_transform(self):
r""" Constructs a simplified version of this Diffeomorhic Map
The simplified version incorporates the pre-align transform, as well as
the domain and codomain affine transforms into the displacement field.
The resulting transformation may be regarded as operating on the
image spaces given by the domain and codomain discretization. As a
result, self.prealign, self.disp_grid2world, self.domain_grid2world and
self.codomain affine will be None (denoting Identity) in the resulting
diffeomorphic map.
"""
if self.dim == 2:
simplify_f = vfu.simplify_warp_function_2d
else:
simplify_f = vfu.simplify_warp_function_3d
# Simplify the forward transform
D = self.domain_grid2world
P = self.prealign
Rinv = self.disp_world2grid
Cinv = self.codomain_world2grid
# this is the matrix which we need to multiply the voxel coordinates
# to interpolate on the forward displacement field ("in"side the
# 'forward' brackets in the expression above)
affine_idx_in = mult_aff(Rinv, mult_aff(P, D))
# this is the matrix which we need to multiply the voxel coordinates
# to add to the displacement ("out"side the 'forward' brackets in the
# expression above)
affine_idx_out = mult_aff(Cinv, mult_aff(P, D))
# this is the matrix which we need to multiply the displacement vector
# prior to adding to the transformed input point
affine_disp = Cinv
new_forward = simplify_f(self.forward, affine_idx_in,
affine_idx_out, affine_disp,
self.domain_shape)
# Simplify the backward transform
C = self.codomain_world2grid
Pinv = self.prealign_inv
Dinv = self.domain_world2grid
affine_idx_in = mult_aff(Rinv, C)
affine_idx_out = mult_aff(Dinv, mult_aff(Pinv, C))
affine_disp = mult_aff(Dinv, Pinv)
new_backward = simplify_f(self.backward, affine_idx_in,
affine_idx_out, affine_disp,
self.codomain_shape)
simplified = DiffeomorphicMap(self.dim,
self.disp_shape,
None,
self.domain_shape,
None,
self.codomain_shape,
None,
None)
simplified.forward = new_forward
simplified.backward = new_backward
return simplified
class DiffeomorphicRegistration(with_metaclass(abc.ABCMeta, object)):
def __init__(self, metric=None):
r""" Diffeomorphic Registration
This abstract class defines the interface to be implemented by any
optimization algorithm for diffeomorphic registration.
Parameters
----------
metric : SimilarityMetric object
the object measuring the similarity of the two images. The
registration algorithm will minimize (or maximize) the provided
similarity.
"""
if metric is None:
raise ValueError('The metric cannot be None')
self.metric = metric
self.dim = metric.dim
def set_level_iters(self, level_iters):
r"""Sets the number of iterations at each pyramid level
Establishes the maximum number of iterations to be performed at each
level of the Gaussian pyramid, similar to ANTS.
Parameters
----------
level_iters : list
the number of iterations at each level of the Gaussian pyramid.
level_iters[0] corresponds to the finest level, level_iters[n-1]
the coarsest, where n is the length of the list
"""
self.levels = len(level_iters) if level_iters else 0
self.level_iters = level_iters
@abc.abstractmethod
def optimize(self):
r"""Starts the metric optimization
This is the main function each specialized class derived from this must
implement. Upon completion, the deformation field must be available
from the forward transformation model.
"""
@abc.abstractmethod
def get_map(self):
r"""
Returns the resulting diffeomorphic map after optimization
"""
class SymmetricDiffeomorphicRegistration(DiffeomorphicRegistration):
def __init__(self,
metric,
level_iters=None,
step_length=0.25,
ss_sigma_factor=0.2,
opt_tol=1e-5,
inv_iter=20,
inv_tol=1e-3,
callback=None):
r""" Symmetric Diffeomorphic Registration (SyN) Algorithm
Performs the multi-resolution optimization algorithm for non-linear
registration using a given similarity metric.
Parameters
----------
metric : SimilarityMetric object
the metric to be optimized
level_iters : list of int
the number of iterations at each level of the Gaussian Pyramid (the
length of the list defines the number of pyramid levels to be
used)
opt_tol : float
the optimization will stop when the estimated derivative of the
energy profile w.r.t. time falls below this threshold
inv_iter : int
the number of iterations to be performed by the displacement field
inversion algorithm
step_length : float
the length of the maximum displacement vector of the update
displacement field at each iteration
ss_sigma_factor : float
parameter of the scale-space smoothing kernel. For example, the
std. dev. of the kernel will be factor*(2^i) in the isotropic case
where i = 0, 1, ..., n_scales is the scale
inv_tol : float
the displacement field inversion algorithm will stop iterating
when the inversion error falls below this threshold
callback : function(SymmetricDiffeomorphicRegistration)
a function receiving a SymmetricDiffeomorphicRegistration object
to be called after each iteration (this optimizer will call this
function passing self as parameter)
"""
super(SymmetricDiffeomorphicRegistration, self).__init__(metric)
if level_iters is None:
level_iters = [100, 100, 25]
if len(level_iters) == 0:
raise ValueError('The iterations list cannot be empty')
self.set_level_iters(level_iters)
self.step_length = step_length
self.ss_sigma_factor = ss_sigma_factor
self.opt_tol = opt_tol
self.inv_tol = inv_tol
self.inv_iter = inv_iter
self.energy_window = 12
self.energy_list = []
self.full_energy_profile = []
self.verbosity = VerbosityLevels.STATUS
self.callback = callback
self.moving_ss = None
self.static_ss = None
self.static_direction = None
self.moving_direction = None
self.mask0 = metric.mask0
def update(self, current_displacement, new_displacement,
disp_world2grid, time_scaling):
r"""Composition of the current displacement field with the given field
Interpolates new displacement at the locations defined by
current_displacement. Equivalently, computes the composition C of the
given displacement fields as C(x) = B(A(x)), where A is
current_displacement and B is new_displacement. This function is
intended to be used with deformation fields of the same sampling
(e.g. to be called by a registration algorithm).
Parameters
----------
current_displacement : array, shape (R', C', 2) or (S', R', C', 3)
the displacement field defining where to interpolate
new_displacement
new_displacement : array, shape (R, C, 2) or (S, R, C, 3)
the displacement field to be warped by current_displacement
disp_world2grid : array, shape (dim+1, dim+1)
the space-to-grid transform associated with the displacements'
grid (we assume that both displacements are discretized over the
same grid)
time_scaling : float
scaling factor applied to d2. The effect may be interpreted as
moving d1 displacements along a factor (`time_scaling`) of d2.
Returns
-------
updated : array, shape (the same as new_displacement)
the warped displacement field
mean_norm : the mean norm of all vectors in current_displacement
"""
sq_field = np.sum((np.array(current_displacement) ** 2), -1)
mean_norm = np.sqrt(sq_field).mean()
# We assume that both displacement fields have the same
# grid2world transform, which implies premult_index=Identity
# and premult_disp is the world2grid transform associated with
# the displacements' grid
self.compose(current_displacement, new_displacement, None,
disp_world2grid, time_scaling, current_displacement)
return np.array(current_displacement), np.array(mean_norm)
def get_map(self):
r"""Returns the resulting diffeomorphic map
Returns the DiffeomorphicMap registering the moving image towards
the static image.
"""
return self.static_to_ref
def _connect_functions(self):
r"""Assign the methods to be called according to the image dimension
Assigns the appropriate functions to be called for displacement field
inversion, Gaussian pyramid, and affine / dense deformation composition
according to the dimension of the input images e.g. 2D or 3D.
"""
if self.dim == 2:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_2d
self.compose = vfu.compose_vector_fields_2d
else:
self.invert_vector_field = vfu.invert_vector_field_fixed_point_3d
self.compose = vfu.compose_vector_fields_3d
def _init_optimizer(self, static, moving,
static_grid2world, moving_grid2world, prealign):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images and allocating the required memory for the transformation models
at the coarsest scale.
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving : array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign'
matrix
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign : array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
"""
self._connect_functions()
# Extract information from affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
# the images' directions don't change with scale
self.static_direction = np.eye(self.dim + 1)
self.moving_direction = np.eye(self.dim + 1)
self.static_direction[:self.dim, :self.dim] = static_direction
self.moving_direction[:self.dim, :self.dim] = moving_direction
# Build the scale space of the input images
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Applying zero mask: ' + str(self.mask0))
if self.verbosity >= VerbosityLevels.STATUS:
print('Creating scale space from the moving image. Levels: %d. '
'Sigma factor: %f.' % (self.levels, self.ss_sigma_factor))
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.STATUS:
print('Creating scale space from the static image. Levels: %d. '
'Sigma factor: %f.' % (self.levels, self.ss_sigma_factor))
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
self.mask0)
if self.verbosity >= VerbosityLevels.DEBUG:
print('Moving scale space:')
for level in range(self.levels):
self.moving_ss.print_level(level)
print('Static scale space:')
for level in range(self.levels):
self.static_ss.print_level(level)
# Get the properties of the coarsest level from the static image. These
# properties will be taken as the reference discretization.
disp_shape = self.static_ss.get_domain_shape(self.levels-1)
disp_grid2world = self.static_ss.get_affine(self.levels-1)
# The codomain discretization of both diffeomorphic maps is
# precisely the discretization of the static image
codomain_shape = static.shape
codomain_grid2world = static_grid2world
# The forward model transforms points from the static image
# to points on the reference (which is the static as well). So the
# domain properties are taken from the static image. Since its the same
# as the reference, we don't need to pre-align.
domain_shape = static.shape
domain_grid2world = static_grid2world
self.static_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_grid2world,
domain_shape,
domain_grid2world,
codomain_shape,
codomain_grid2world,
None)
self.static_to_ref.allocate()
# The backward model transforms points from the moving image
# to points on the reference (which is the static). So the input
# properties are taken from the moving image, and we need to pre-align
# points on the moving physical space to the reference physical space
# by applying the inverse of pre-align. This is done this way to make
# it clear for the user: the pre-align matrix is usually obtained by
# doing affine registration of the moving image towards the static
# image, which results in a matrix transforming points in the static
# physical space to points in the moving physical space
prealign_inv = None if prealign is None else npl.inv(prealign)
domain_shape = moving.shape
domain_grid2world = moving_grid2world
self.moving_to_ref = DiffeomorphicMap(self.dim,
disp_shape,
disp_grid2world,
domain_shape,
domain_grid2world,
codomain_shape,
codomain_grid2world,
prealign_inv)
self.moving_to_ref.allocate()
def _end_optimizer(self):
r"""Frees the resources allocated during initialization
"""
del self.moving_ss
del self.static_ss
def _iterate(self):
r"""Performs one symmetric iteration
Performs one iteration of the SyN algorithm:
1.Compute forward
2.Compute backward
3.Update forward
4.Update backward
5.Compute inverses
6.Invert the inverses
Returns
-------
der : float
the derivative of the energy profile, computed by fitting a
quadratic function to the energy values at the latest T iterations,
where T = self.energy_window. If the current iteration is less than
T then np.inf is returned instead.
"""
# Acquire current resolution information from scale spaces
current_moving = self.moving_ss.get_image(self.current_level)
current_static = self.static_ss.get_image(self.current_level)
current_disp_shape = \
self.static_ss.get_domain_shape(self.current_level)
current_disp_grid2world = \
self.static_ss.get_affine(self.current_level)
current_disp_world2grid = \
self.static_ss.get_affine_inv(self.current_level)
current_disp_spacing = \
self.static_ss.get_spacing(self.current_level)
# Warp the input images (smoothed to the current scale) to the common
# (reference) space at the current resolution
wstatic = self.static_to_ref.transform_inverse(current_static,
'linear',
None,
current_disp_shape,
current_disp_grid2world)
wmoving = self.moving_to_ref.transform_inverse(current_moving,
'linear',
None,
current_disp_shape,
current_disp_grid2world)
# Pass both images to the metric. Now both images are sampled on the
# reference grid (equal to the static image's grid) and the direction
# doesn't change across scales
self.metric.set_moving_image(wmoving, current_disp_grid2world,
current_disp_spacing,
self.static_direction)
self.metric.use_moving_image_dynamics(
current_moving, self.moving_to_ref.inverse())
self.metric.set_static_image(wstatic, current_disp_grid2world,
current_disp_spacing,
self.static_direction)
self.metric.use_static_image_dynamics(
current_static, self.static_to_ref.inverse())
# Initialize the metric for a new iteration
self.metric.initialize_iteration()
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_START)
# Compute the forward step (to be used to update the forward transform)
fw_step = np.array(self.metric.compute_forward())
# set zero displacements at the boundary
fw_step[0, ...] = 0
fw_step[:, 0, ...] = 0
fw_step[-1, ...] = 0
fw_step[:, -1, ...] = 0
if(self.dim == 3):
fw_step[:, :, 0, ...] = 0
fw_step[:, :, -1, ...] = 0
# Normalize the forward step
nrm = np.sqrt(np.sum((fw_step/current_disp_spacing)**2, -1)).max()
if nrm > 0:
fw_step /= nrm
# Add to current total field
self.static_to_ref.forward, md_forward = self.update(
self.static_to_ref.forward, fw_step,
current_disp_world2grid, self.step_length)
del fw_step
# Keep track of the forward energy
fw_energy = self.metric.get_energy()
# Compose backward step (to be used to update the backward transform)
bw_step = np.array(self.metric.compute_backward())
# set zero displacements at the boundary
bw_step[0, ...] = 0
bw_step[:, 0, ...] = 0
if(self.dim == 3):
bw_step[:, :, 0, ...] = 0
# Normalize the backward step
nrm = np.sqrt(np.sum((bw_step/current_disp_spacing) ** 2, -1)).max()
if nrm > 0:
bw_step /= nrm
# Add to current total field
self.moving_to_ref.forward, md_backward = self.update(
self.moving_to_ref.forward, bw_step,
current_disp_world2grid, self.step_length)
del bw_step
# Keep track of the energy
bw_energy = self.metric.get_energy()
der = np.inf
n_iter = len(self.energy_list)
if len(self.energy_list) >= self.energy_window:
der = self._get_energy_derivative()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
ch = '-' if np.isnan(der) else der
print('%d:\t%0.6f\t%0.6f\t%0.6f\t%s' %
(n_iter, fw_energy, bw_energy, fw_energy + bw_energy, ch))
self.energy_list.append(fw_energy + bw_energy)
# Invert the forward model's forward field
self.static_to_ref.backward = np.array(
self.invert_vector_field(
self.static_to_ref.forward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.static_to_ref.backward))
# Invert the backward model's forward field
self.moving_to_ref.backward = np.array(
self.invert_vector_field(
self.moving_to_ref.forward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.moving_to_ref.backward))
# Invert the forward model's backward field
self.static_to_ref.forward = np.array(
self.invert_vector_field(
self.static_to_ref.backward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.static_to_ref.forward))
# Invert the backward model's backward field
self.moving_to_ref.forward = np.array(
self.invert_vector_field(
self.moving_to_ref.backward,
current_disp_world2grid,
current_disp_spacing,
self.inv_iter, self.inv_tol, self.moving_to_ref.forward))
# Free resources no longer needed to compute the forward and backward
# steps
if self.callback is not None:
self.callback(self, RegistrationStages.ITER_END)
self.metric.free_iteration()
return der
def _approximate_derivative_direct(self, x, y):
r"""Derivative of the degree-2 polynomial fit of the given x, y pairs
Directly computes the derivative of the least-squares-fit quadratic
function estimated from (x[...],y[...]) pairs.
Parameters
----------
x : array, shape (n,)
increasing array representing the x-coordinates of the points to
be fit
y : array, shape (n,)
array representing the y-coordinates of the points to be fit
Returns
-------
y0 : float
the estimated derivative at x0 = 0.5*len(x)
"""
x = np.asarray(x)
y = np.asarray(y)
X = np.row_stack((x**2, x, np.ones_like(x)))
XX = (X).dot(X.T)
b = X.dot(y)
beta = npl.solve(XX, b)
x0 = 0.5 * len(x)
y0 = 2.0 * beta[0] * (x0) + beta[1]
return y0
def _get_energy_derivative(self):
r"""Approximate derivative of the energy profile
Returns the derivative of the estimated energy as a function of "time"
(iterations) at the last iteration
"""
n_iter = len(self.energy_list)
if n_iter < self.energy_window:
raise ValueError('Not enough data to fit the energy profile')
x = range(self.energy_window)
y = self.energy_list[(n_iter - self.energy_window):n_iter]
ss = sum(y)
if(ss > 0):
ss *= -1
y = [v / ss for v in y]
der = self._approximate_derivative_direct(x, y)
return der
def _optimize(self):
r"""Starts the optimization
The main multi-scale symmetric optimization algorithm
"""
self.full_energy_profile = []
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_START)
for level in range(self.levels - 1, -1, -1):
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d' % level)
self.current_level = level
self.metric.set_levels_below(self.levels - level)
self.metric.set_levels_above(level)
if level < self.levels - 1:
expand_factors = \
self.static_ss.get_expand_factors(level+1, level)
new_shape = self.static_ss.get_domain_shape(level)
self.static_to_ref.expand_fields(expand_factors, new_shape)
self.moving_to_ref.expand_fields(expand_factors, new_shape)
self.niter = 0
self.energy_list = []
derivative = np.inf
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_START)
while ((self.niter < self.level_iters[self.levels - 1 - level]) and
(self.opt_tol < derivative)):
derivative = self._iterate()
self.niter += 1
self.full_energy_profile.extend(self.energy_list)
if self.callback is not None:
self.callback(self, RegistrationStages.SCALE_END)
# Reporting mean and std in stats[1] and stats[2]
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Static-Reference Residual error: %0.6f (%0.6f)'
% (stats[1], stats[2]))
residual, stats = self.moving_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Moving-Reference Residual error :%0.6f (%0.6f)'
% (stats[1], stats[2]))
# Compose the two partial transformations
self.static_to_ref = self.moving_to_ref.warp_endomorphism(
self.static_to_ref.inverse()).inverse()
# Report mean and std for the composed deformation field
residual, stats = self.static_to_ref.compute_inversion_error()
if self.verbosity >= VerbosityLevels.DIAGNOSE:
print('Final residual error: %0.6f (%0.6f)' % (stats[1], stats[2]))
if self.callback is not None:
self.callback(self, RegistrationStages.OPT_END)
def optimize(self, static, moving, static_grid2world=None,
moving_grid2world=None, prealign=None):
r"""
Starts the optimization
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization. The
displacement fields will have the same discretization as the static
image.
moving : array, shape (S, R, C) or (R, C)
the image to be used as "moving" during optimization. Since the
deformation fields' discretization is the same as the static image,
it is necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the 'prealign'
matrix
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated to the moving image
prealign : array, shape (dim+1, dim+1)
the affine transformation (operating on the physical space)
pre-aligning the moving image towards the static
Returns
-------
static_to_ref : DiffeomorphicMap object
the diffeomorphic map that brings the moving image towards the
static one in the forward direction (i.e. by calling
static_to_ref.transform) and the static image towards the
moving one in the backward direction (i.e. by calling
static_to_ref.transform_inverse).
"""
if self.verbosity >= VerbosityLevels.DEBUG:
print("Pre-align:", prealign)
self._init_optimizer(static.astype(floating), moving.astype(floating),
static_grid2world, moving_grid2world, prealign)
self._optimize()
self._end_optimizer()
self.static_to_ref.forward = np.array(self.static_to_ref.forward)
self.static_to_ref.backward = np.array(self.static_to_ref.backward)
return self.static_to_ref
| bsd-3-clause | -5,623,101,186,973,737,000 | 41.582935 | 79 | 0.602895 | false |
Nikea/VisTrails | vistrails/core/system/__init__.py | 2 | 14245 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import with_statement
import datetime
import functools
import getpass
import locale
import os
import platform
import socket
import subprocess
import sys
import time
import urllib2
from vistrails.core import debug
from vistrails.core.utils import unimplemented, VistrailsInternalError, Chdir
###############################################################################
from common import *
def with_c_locale(func):
@functools.wraps(func)
def newfunc(*args, **kwargs):
previous_locale = locale.setlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, 'C')
try:
return func(*args, **kwargs)
finally:
locale.setlocale(locale.LC_TIME, previous_locale)
return newfunc
@with_c_locale
def strptime(*args, **kwargs):
"""Version of datetime.strptime that always uses the C locale.
This is because date strings are used internally in the database, and
should not be localized.
"""
return datetime.datetime.strptime(*args, **kwargs)
@with_c_locale
def time_strptime(*args, **kwargs):
"""Version of time.strptime that always uses the C locale.
This is because date strings are used internally in the database, and
should not be localized.
"""
return time.strptime(*args, **kwargs)
@with_c_locale
def strftime(dt, *args, **kwargs):
"""Version of datetime.strftime that always uses the C locale.
This is because date strings are used internally in the database, and
should not be localized.
"""
if hasattr(dt, 'strftime'):
return dt.strftime(*args, **kwargs)
else:
return time.strftime(dt, *args, **kwargs)
##############################################################################
systemType = platform.system()
if systemType in ['Windows', 'Microsoft']:
from vistrails.core.system.windows import *
elif systemType in ['Linux']:
from vistrails.core.system.linux import *
elif systemType in ['Darwin']:
from vistrails.core.system.osx import *
else:
debug.critical("VisTrails could not detect your operating system.")
sys.exit(1)
###############################################################################
# Makes sure root directory is sensible.
if __name__ == '__main__':
_thisDir = sys.argv[0]
else:
_thisDir = sys.modules[__name__].__file__
_thisDir = os.path.split(_thisDir)[0]
__rootDir = os.path.realpath(os.path.join(_thisDir,
'..',
'..'))
__dataDir = os.path.realpath(os.path.join(__rootDir,
'data'))
__fileDir = os.path.realpath(os.path.join(__rootDir,
'..','examples'))
if systemType in ['Darwin'] and not os.path.exists(__fileDir):
# Assume we are running from py2app
__fileDir = os.path.realpath(os.path.join(__rootDir,
'/'.join(['..']*6),'examples'))
__examplesDir = __fileDir
__defaultFileType = '.vt'
__defaultPkgPrefix = 'org.vistrails.vistrails'
def get_vistrails_default_pkg_prefix():
return __defaultPkgPrefix
def get_vistrails_basic_pkg_id():
return "%s.basic" % get_vistrails_default_pkg_prefix()
def get_vistrails_directory(config_key, conf=None):
if conf is None:
from vistrails.core.configuration import get_vistrails_configuration
conf = get_vistrails_configuration()
if conf.has_deep_value(config_key):
d = conf.get_deep_value(config_key)
if os.path.isabs(d):
return d
else:
return os.path.join(current_dot_vistrails(conf), d)
return None
def set_vistrails_data_directory(d):
""" set_vistrails_data_directory(d:str) -> None
Sets vistrails data directory taking into account environment variables
"""
global __dataDir
new_d = os.path.expanduser(d)
new_d = os.path.expandvars(new_d)
while new_d != d:
d = new_d
new_d = os.path.expandvars(d)
__dataDir = os.path.realpath(d)
def set_vistrails_file_directory(d):
""" set_vistrails_file_directory(d: str) -> None
Sets vistrails file directory taking into accoun environment variables
"""
global __fileDir
new_d = os.path.expanduser(d)
new_d = os.path.expandvars(new_d)
while new_d != d:
d = new_d
new_d = os.path.expandvars(d)
__fileDir = os.path.realpath(d)
def set_vistrails_root_directory(d):
""" set_vistrails_root_directory(d:str) -> None
Sets vistrails root directory taking into account environment variables
"""
global __rootDir
new_d = os.path.expanduser(d)
new_d = os.path.expandvars(new_d)
while new_d != d:
d = new_d
new_d = os.path.expandvars(d)
__rootDir = os.path.realpath(d)
def set_vistrails_default_file_type(t):
""" set_vistrails_default_file_type(t:str) -> None
Which file type to use when the user doesn't provide a file extension
"""
global __defaultFileType
if t in ['.vt', '.xml']:
__defaultFileType = t
else:
__defaultFileType = '.vt'
def vistrails_root_directory():
""" vistrails_root_directory() -> str
Returns vistrails root directory
"""
return __rootDir
def vistrails_file_directory():
""" vistrails_file_directory() -> str
Returns current vistrails file directory
"""
return __fileDir
def vistrails_examples_directory():
""" vistrails_file_directory() -> str
Returns vistrails examples directory
"""
return __examplesDir
def vistrails_data_directory():
""" vistrails_data_directory() -> str
Returns vistrails data directory
"""
return __dataDir
def vistrails_default_file_type():
""" vistrails_default_file_type() -> str
Returns vistrails file type
"""
return __defaultFileType
def packages_directory():
""" packages_directory() -> str
Returns vistrails packages directory
"""
return os.path.join(vistrails_root_directory(),'packages')
def blank_vistrail_file():
unimplemented()
def resource_directory():
""" resource_directory() -> str
Returns vistrails gui resource directory
"""
return os.path.join(vistrails_root_directory(),
'gui', 'resources')
def default_options_file():
""" default_options_file() -> str
Returns vistrails default options file
"""
return os.path.join(home_directory(), ".vistrailsrc")
def default_dot_vistrails():
""" default_dot_vistrails() -> str
Returns the default VisTrails per-user directory.
"""
return os.path.join(home_directory(), '.vistrails')
def current_dot_vistrails(conf=None):
""" current_dot_vistrails() -> str
Returns the VisTrails per-user directory.
"""
if conf is None:
from vistrails.core.configuration import get_vistrails_configuration
conf = get_vistrails_configuration()
return conf.dotVistrails
def default_connections_file():
""" default_connections_file() -> str
Returns default Vistrails per-user connections file
"""
return os.path.join(current_dot_vistrails(), 'connections.xml')
VERSION = '2.2'
def vistrails_version():
"""vistrails_version() -> string - Returns the current VisTrails version."""
# 0.1 was the Vis2005 version
# 0.2 was the SIGMOD demo version
# 0.3 was the plugin/vtk version
# 0.4 is cleaned up version with new GUI
# 1.0 is version with new schema
return VERSION
def get_latest_vistrails_version():
"""get_latest_vistrails_version() -> string - Returns latest vistrails
release version as queried from vistrails.org"""
version = ''
version_url = \
"http://www.vistrails.org/download/download.php?id=release_version.txt"
try:
request = urllib2.Request(version_url)
get_latest_version = urllib2.urlopen(request)
version = get_latest_version.read().strip()
except urllib2.HTTPError, err:
debug.warning("Unable to check for updates: %s" % str(err))
return version
return version
def new_vistrails_release_exists():
""" new_vistrail_release_exists() -> (bool, str)
Returns (True, new_version_str) if newer version exists
"""
local_version = [int(x) for x in vistrails_version().split('.')]
remote_str = get_latest_vistrails_version()
if remote_str:
remote_version = [int(x) for x in remote_str.split('.')]
else:
remote_version = [0]
if cmp(local_version, remote_version) is -1:
return (True, remote_str)
return (False, None)
def vistrails_revision():
"""vistrails_revision() -> str
When run on a working copy, shows the current svn revision else
shows the latest release revision
"""
git_dir = os.path.join(vistrails_root_directory(), '..')
with Chdir(git_dir):
release = vistrails_version()
import vistrails.core.requirements
if vistrails.core.requirements.executable_file_exists('git'):
lines = []
result = execute_cmdline(
['git', 'describe', '--always'],
lines)
if len(lines) == 1:
if result == 0:
release = lines[0].strip(" \n")
return release
_registry = None
def get_module_registry():
global _registry
if _registry is None:
from vistrails.core.modules.module_registry import get_module_registry
_registry = get_module_registry()
return _registry
def short_about_string():
return """VisTrails version %s (%s) -- [email protected]""" % \
(vistrails_version(), vistrails_revision())
def about_string():
"""about_string() -> string - Returns the about string for VisTrails."""
return """VisTrails version %s (%s) -- [email protected]
Copyright (C) 2011-2014 NYU-Poly. Copyright (C) 2006-2011 University of Utah.
All rights reserved.
http://www.vistrails.org
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of Utah nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.""" % (vistrails_version(),
vistrails_revision())
###############################################################################
import unittest
if __name__ == '__main__':
unittest.main()
class TestSystem(unittest.TestCase):
def test_vistrails_revision(self):
r = vistrails_root_directory()
with Chdir(r):
v1 = vistrails_revision()
try:
with Chdir(os.path.join(r, '..')):
self.assertEquals(v1, vistrails_revision())
except AssertionError:
raise
except Exception:
pass
try:
with Chdir(os.path.join(r, '..', '..')):
self.assertEquals(v1, vistrails_revision())
except AssertionError:
raise
except Exception:
pass
| bsd-3-clause | -543,454,255,651,092,200 | 32.051044 | 83 | 0.635872 | false |
mulkieran/anaconda | pyanaconda/ui/gui/spokes/datetime_spoke.py | 6 | 39276 | # Datetime configuration spoke class
#
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <[email protected]>
#
import logging
log = logging.getLogger("anaconda")
from gi.repository import GLib, Gdk, Gtk, TimezoneMap
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.categories.localization import LocalizationCategory
from pyanaconda.ui.gui.utils import gtk_action_nowait, gtk_action_wait, gtk_call_once, override_cell_property
from pyanaconda.ui.gui.helpers import GUIDialogInputCheckHandler
from pyanaconda.ui.helpers import InputCheck
from pyanaconda.i18n import _, CN_
from pyanaconda.timezone import NTP_SERVICE, get_all_regions_and_timezones, get_timezone, is_valid_timezone
from pyanaconda.localization import get_xlated_timezone, resolve_date_format
from pyanaconda import iutil
from pyanaconda import isys
from pyanaconda import network
from pyanaconda import nm
from pyanaconda import ntp
from pyanaconda import flags
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
import datetime
import re
import threading
import locale as locale_mod
__all__ = ["DatetimeSpoke"]
SERVER_OK = 0
SERVER_NOK = 1
SERVER_QUERY = 2
DEFAULT_TZ = "America/New_York"
SPLIT_NUMBER_SUFFIX_RE = re.compile(r'([^0-9]*)([-+])([0-9]+)')
def _compare_regions(reg_xlated1, reg_xlated2):
"""Compare two pairs of regions and their translations."""
reg1, xlated1 = reg_xlated1
reg2, xlated2 = reg_xlated2
# sort the Etc timezones to the end
if reg1 == "Etc" and reg2 == "Etc":
return 0
elif reg1 == "Etc":
return 1
elif reg2 == "Etc":
return -1
else:
# otherwise compare the translated names
return locale_mod.strcoll(xlated1, xlated2)
def _compare_cities(city_xlated1, city_xlated2):
"""Compare two paris of cities and their translations."""
# if there are "cities" ending with numbers (like GMT+-X), we need to sort
# them based on their numbers
val1 = city_xlated1[1]
val2 = city_xlated2[1]
match1 = SPLIT_NUMBER_SUFFIX_RE.match(val1)
match2 = SPLIT_NUMBER_SUFFIX_RE.match(val2)
if match1 is None and match2 is None:
# no +-X suffix, just compare the strings
return locale_mod.strcoll(val1, val2)
if match1 is None or match2 is None:
# one with the +-X suffix, compare the prefixes
if match1:
prefix, _sign, _suffix = match1.groups()
return locale_mod.strcoll(prefix, val2)
else:
prefix, _sign, _suffix = match2.groups()
return locale_mod.strcoll(val1, prefix)
# both have the +-X suffix
prefix1, sign1, suffix1 = match1.groups()
prefix2, sign2, suffix2 = match2.groups()
if prefix1 == prefix2:
# same prefixes, let signs determine
return cmp(int(sign1 + suffix1), int(sign2 + suffix2))
else:
# compare prefixes
return locale_mod.strcoll(prefix1, prefix2)
def _new_date_field_box(store):
"""
Creates new date field box (a combobox and a label in a horizontal box) for
a given store.
"""
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
suffix_label = Gtk.Label()
renderer = Gtk.CellRendererText()
combo = Gtk.ComboBox(model=store)
combo.pack_start(renderer, False)
# idx is column 0, string we want to show is 1
combo.add_attribute(renderer, "text", 1)
box.pack_start(combo, False, False, 0)
box.pack_start(suffix_label, False, False, 0)
return (box, combo, suffix_label)
class NTPconfigDialog(GUIObject, GUIDialogInputCheckHandler):
builderObjects = ["ntpConfigDialog", "addImage", "serversStore"]
mainWidgetName = "ntpConfigDialog"
uiFile = "spokes/datetime_spoke.glade"
def __init__(self, *args):
GUIObject.__init__(self, *args)
GUIDialogInputCheckHandler.__init__(self)
#epoch is increased when serversStore is repopulated
self._epoch = 0
self._epoch_lock = threading.Lock()
@property
def working_server(self):
for row in self._serversStore:
if row[1] == SERVER_OK and row[2]:
#server is checked and working
return row[0]
return None
@property
def servers(self):
ret = list()
for row in self._serversStore:
if row[2]:
#server checked
ret.append(row[0])
return ret
def _render_working(self, column, renderer, model, itr, user_data=None):
#get the value in the second column
value = model[itr][1]
if value == SERVER_QUERY:
return "dialog-question"
elif value == SERVER_OK:
return "emblem-default"
else:
return "dialog-error"
def initialize(self):
self.window.set_size_request(500, 400)
workingColumn = self.builder.get_object("workingColumn")
workingRenderer = self.builder.get_object("workingRenderer")
override_cell_property(workingColumn, workingRenderer, "icon-name",
self._render_working)
self._serverEntry = self.builder.get_object("serverEntry")
self._serversStore = self.builder.get_object("serversStore")
self._addButton = self.builder.get_object("addButton")
# Validate the server entry box
self._serverCheck = self.add_check(self._serverEntry, self._validateServer)
self._serverCheck.update_check_status()
self._initialize_store_from_config()
def _initialize_store_from_config(self):
self._serversStore.clear()
if self.data.timezone.ntpservers:
for server in self.data.timezone.ntpservers:
self._add_server(server)
else:
try:
for server in ntp.get_servers_from_config():
self._add_server(server)
except ntp.NTPconfigError:
log.warning("Failed to load NTP servers configuration")
def _validateServer(self, inputcheck):
server = self.get_input(inputcheck.input_obj)
# If not set, fail the check to keep the button insensitive, but don't
# display an error
if not server:
return InputCheck.CHECK_SILENT
(valid, error) = network.sanityCheckHostname(server)
if not valid:
return "'%s' is not a valid hostname: %s" % (server, error)
else:
return InputCheck.CHECK_OK
def set_status(self, inputcheck):
# Use GUIDialogInputCheckHandler to set the error message
GUIDialogInputCheckHandler.set_status(self, inputcheck)
# Set the sensitivity of the add button based on the result
self._addButton.set_sensitive(inputcheck.check_status == InputCheck.CHECK_OK)
def refresh(self):
self._serverEntry.grab_focus()
def refresh_servers_state(self):
itr = self._serversStore.get_iter_first()
while itr:
self._refresh_server_working(itr)
itr = self._serversStore.iter_next(itr)
def run(self):
self.window.show()
rc = self.window.run()
self.window.hide()
#OK clicked
if rc == 1:
new_servers = list()
for row in self._serversStore:
#if server checked
if row[2]:
new_servers.append(row[0])
if flags.can_touch_runtime_system("save NTP servers configuration"):
ntp.save_servers_to_config(new_servers)
iutil.restart_service(NTP_SERVICE)
#Cancel clicked, window destroyed...
else:
self._epoch_lock.acquire()
self._epoch += 1
self._epoch_lock.release()
self._initialize_store_from_config()
return rc
def _set_server_ok_nok(self, itr, epoch_started):
"""
If the server is working, set its data to SERVER_OK, otherwise set its
data to SERVER_NOK.
:param itr: iterator of the $server's row in the self._serversStore
"""
@gtk_action_nowait
def set_store_value(arg_tuple):
"""
We need a function for this, because this way it can be added to
the MainLoop with thread-safe GLib.idle_add (but only with one
argument).
:param arg_tuple: (store, itr, column, value)
"""
(store, itr, column, value) = arg_tuple
store.set_value(itr, column, value)
orig_hostname = self._serversStore[itr][0]
server_working = ntp.ntp_server_working(self._serversStore[itr][0])
#do not let dialog change epoch while we are modifying data
self._epoch_lock.acquire()
#check if we are in the same epoch as the dialog (and the serversStore)
#and if the server wasn't changed meanwhile
if epoch_started == self._epoch:
actual_hostname = self._serversStore[itr][0]
if orig_hostname == actual_hostname:
if server_working:
set_store_value((self._serversStore,
itr, 1, SERVER_OK))
else:
set_store_value((self._serversStore,
itr, 1, SERVER_NOK))
self._epoch_lock.release()
@gtk_action_nowait
def _refresh_server_working(self, itr):
""" Runs a new thread with _set_server_ok_nok(itr) as a taget. """
self._serversStore.set_value(itr, 1, SERVER_QUERY)
threadMgr.add(AnacondaThread(prefix="AnaNTPserver",
target=self._set_server_ok_nok,
args=(itr, self._epoch)))
def _add_server(self, server):
"""
Checks if a given server is a valid hostname and if yes, adds it
to the list of servers.
:param server: string containing hostname
"""
for row in self._serversStore:
if row[0] == server:
#do not add duplicate items
return
itr = self._serversStore.append([server, SERVER_QUERY, True])
#do not block UI while starting thread (may take some time)
self._refresh_server_working(itr)
def on_entry_activated(self, entry, *args):
# Check that the input check has passed
if self._serverCheck.check_status == InputCheck.CHECK_OK:
self._add_server(entry.get_text())
entry.set_text("")
def on_add_clicked(self, *args):
self._serverEntry.emit("activate")
def on_use_server_toggled(self, renderer, path, *args):
itr = self._serversStore.get_iter(path)
old_value = self._serversStore[itr][2]
self._serversStore.set_value(itr, 2, not old_value)
def on_server_edited(self, renderer, path, new_text, *args):
if not path:
return
(valid, error) = network.sanityCheckHostname(new_text)
if not valid:
log.error("'%s' is not a valid hostname: %s", new_text, error)
return
itr = self._serversStore.get_iter(path)
if self._serversStore[itr][0] == new_text:
return
self._serversStore.set_value(itr, 0, new_text)
self._serversStore.set_value(itr, 1, SERVER_QUERY)
self._refresh_server_working(itr)
class DatetimeSpoke(FirstbootSpokeMixIn, NormalSpoke):
builderObjects = ["datetimeWindow",
"days", "months", "years", "regions", "cities",
"upImage", "upImage1", "upImage2", "downImage",
"downImage1", "downImage2", "downImage3", "configImage",
"citiesFilter", "daysFilter",
"cityCompletion", "regionCompletion",
]
mainWidgetName = "datetimeWindow"
uiFile = "spokes/datetime_spoke.glade"
helpFile = "DateTimeSpoke.xml"
category = LocalizationCategory
icon = "preferences-system-time-symbolic"
title = CN_("GUI|Spoke", "_TIME & DATE")
# Hack to get libtimezonemap loaded for GtkBuilder
# see https://bugzilla.gnome.org/show_bug.cgi?id=712184
_hack = TimezoneMap.TimezoneMap()
del(_hack)
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
# taking values from the kickstart file?
self._kickstarted = flags.flags.automatedInstall
self._update_datetime_timer_id = None
self._start_updating_timer_id = None
self._shown = False
self._tz = None
def initialize(self):
NormalSpoke.initialize(self)
self._daysStore = self.builder.get_object("days")
self._monthsStore = self.builder.get_object("months")
self._yearsStore = self.builder.get_object("years")
self._regionsStore = self.builder.get_object("regions")
self._citiesStore = self.builder.get_object("cities")
self._tzmap = self.builder.get_object("tzmap")
self._dateBox = self.builder.get_object("dateBox")
# we need to know it the new value is the same as previous or not
self._old_region = None
self._old_city = None
self._regionCombo = self.builder.get_object("regionCombobox")
self._cityCombo = self.builder.get_object("cityCombobox")
self._daysFilter = self.builder.get_object("daysFilter")
self._daysFilter.set_visible_func(self.existing_date, None)
self._citiesFilter = self.builder.get_object("citiesFilter")
self._citiesFilter.set_visible_func(self.city_in_region, None)
self._hoursLabel = self.builder.get_object("hoursLabel")
self._minutesLabel = self.builder.get_object("minutesLabel")
self._amPmUp = self.builder.get_object("amPmUpButton")
self._amPmDown = self.builder.get_object("amPmDownButton")
self._amPmLabel = self.builder.get_object("amPmLabel")
self._radioButton24h = self.builder.get_object("timeFormatRB")
self._amPmRevealer = self.builder.get_object("amPmRevealer")
# create widgets for displaying/configuring date
day_box, self._dayCombo, day_label = _new_date_field_box(self._daysFilter)
self._dayCombo.connect("changed", self.on_day_changed)
month_box, self._monthCombo, month_label = _new_date_field_box(self._monthsStore)
self._monthCombo.connect("changed", self.on_month_changed)
year_box, self._yearCombo, year_label = _new_date_field_box(self._yearsStore)
self._yearCombo.connect("changed", self.on_year_changed)
# get the right order for date widgets and respective formats and put
# widgets in place
widgets, formats = resolve_date_format(year_box, month_box, day_box)
for widget in widgets:
self._dateBox.pack_start(widget, False, False, 0)
self._day_format, suffix = formats[widgets.index(day_box)]
day_label.set_text(suffix)
self._month_format, suffix = formats[widgets.index(month_box)]
month_label.set_text(suffix)
self._year_format, suffix = formats[widgets.index(year_box)]
year_label.set_text(suffix)
self._ntpSwitch = self.builder.get_object("networkTimeSwitch")
self._regions_zones = get_all_regions_and_timezones()
# Set the initial sensitivity of the AM/PM toggle based on the time-type selected
self._radioButton24h.emit("toggled")
if not flags.can_touch_runtime_system("modify system time and date"):
self._set_date_time_setting_sensitive(False)
self._config_dialog = NTPconfigDialog(self.data)
self._config_dialog.initialize()
threadMgr.add(AnacondaThread(name=constants.THREAD_DATE_TIME,
target=self._initialize))
def _initialize(self):
# a bit hacky way, but should return the translated strings
for i in range(1, 32):
day = datetime.date(2000, 1, i).strftime(self._day_format)
self.add_to_store_idx(self._daysStore, i, day)
for i in range(1, 13):
month = datetime.date(2000, i, 1).strftime(self._month_format)
self.add_to_store_idx(self._monthsStore, i, month)
for i in range(1990, 2051):
year = datetime.date(i, 1, 1).strftime(self._year_format)
self.add_to_store_idx(self._yearsStore, i, year)
cities = set()
xlated_regions = ((region, get_xlated_timezone(region))
for region in self._regions_zones.keys())
for region, xlated in sorted(xlated_regions, cmp=_compare_regions):
self.add_to_store_xlated(self._regionsStore, region, xlated)
for city in self._regions_zones[region]:
cities.add((city, get_xlated_timezone(city)))
for city, xlated in sorted(cities, cmp=_compare_cities):
self.add_to_store_xlated(self._citiesStore, city, xlated)
self._update_datetime_timer_id = None
if is_valid_timezone(self.data.timezone.timezone):
self._set_timezone(self.data.timezone.timezone)
elif not flags.flags.automatedInstall:
log.warning("%s is not a valid timezone, falling back to default (%s)",
self.data.timezone.timezone, DEFAULT_TZ)
self._set_timezone(DEFAULT_TZ)
self.data.timezone.timezone = DEFAULT_TZ
time_init_thread = threadMgr.get(constants.THREAD_TIME_INIT)
if time_init_thread is not None:
hubQ.send_message(self.__class__.__name__,
_("Restoring hardware time..."))
threadMgr.wait(constants.THREAD_TIME_INIT)
hubQ.send_ready(self.__class__.__name__, False)
@property
def status(self):
if self.data.timezone.timezone:
if is_valid_timezone(self.data.timezone.timezone):
return _("%s timezone") % get_xlated_timezone(self.data.timezone.timezone)
else:
return _("Invalid timezone")
else:
location = self._tzmap.get_location()
if location and location.get_property("zone"):
return _("%s timezone") % get_xlated_timezone(location.get_property("zone"))
else:
return _("Nothing selected")
def apply(self):
self._shown = False
# we could use self._tzmap.get_timezone() here, but it returns "" if
# Etc/XXXXXX timezone is selected
region = self._get_active_region()
city = self._get_active_city()
# nothing selected, just leave the spoke and
# return to hub without changing anything
if not region or not city:
return
old_tz = self.data.timezone.timezone
new_tz = region + "/" + city
self.data.timezone.timezone = new_tz
if old_tz != new_tz:
# new values, not from kickstart
self.data.timezone.seen = False
self._kickstarted = False
self.data.timezone.nontp = not self._ntpSwitch.get_active()
def execute(self):
if self._update_datetime_timer_id is not None:
GLib.source_remove(self._update_datetime_timer_id)
self._update_datetime_timer_id = None
self.data.timezone.setup(self.data)
@property
def ready(self):
return not threadMgr.get("AnaDateTimeThread")
@property
def completed(self):
if self._kickstarted and not self.data.timezone.seen:
# taking values from kickstart, but not specified
return False
else:
return is_valid_timezone(self.data.timezone.timezone)
@property
def mandatory(self):
return True
def refresh(self):
self._shown = True
#update the displayed time
self._update_datetime_timer_id = GLib.timeout_add_seconds(1,
self._update_datetime)
self._start_updating_timer_id = None
if is_valid_timezone(self.data.timezone.timezone):
self._tzmap.set_timezone(self.data.timezone.timezone)
self._update_datetime()
has_active_network = nm.nm_is_connected()
if not has_active_network:
self._show_no_network_warning()
else:
self.clear_info()
gtk_call_once(self._config_dialog.refresh_servers_state)
if flags.can_touch_runtime_system("get NTP service state"):
ntp_working = has_active_network and iutil.service_running(NTP_SERVICE)
else:
ntp_working = not self.data.timezone.nontp
self._ntpSwitch.set_active(ntp_working)
@gtk_action_wait
def _set_timezone(self, timezone):
"""
Sets timezone to the city/region comboboxes and the timezone map.
:param timezone: timezone to set
:type timezone: str
:return: if successfully set or not
:rtype: bool
"""
parts = timezone.split("/", 1)
if len(parts) != 2:
# invalid timezone cannot be set
return False
region, city = parts
self._set_combo_selection(self._regionCombo, region)
self._set_combo_selection(self._cityCombo, city)
return True
@gtk_action_nowait
def add_to_store_xlated(self, store, item, xlated):
store.append([item, xlated])
@gtk_action_nowait
def add_to_store(self, store, item):
store.append([item])
@gtk_action_nowait
def add_to_store_idx(self, store, idx, item):
store.append([idx, item])
def existing_date(self, days_model, days_iter, user_data=None):
if not days_iter:
return False
day = days_model[days_iter][0]
#days 1-28 are in every month every year
if day < 29:
return True
months_model = self._monthCombo.get_model()
months_iter = self._monthCombo.get_active_iter()
if not months_iter:
return True
years_model = self._yearCombo.get_model()
years_iter = self._yearCombo.get_active_iter()
if not years_iter:
return True
try:
datetime.date(years_model[years_iter][0],
months_model[months_iter][0], day)
return True
except ValueError:
return False
def _get_active_city(self):
cities_model = self._cityCombo.get_model()
cities_iter = self._cityCombo.get_active_iter()
if not cities_iter:
return None
return cities_model[cities_iter][0]
def _get_active_region(self):
regions_model = self._regionCombo.get_model()
regions_iter = self._regionCombo.get_active_iter()
if not regions_iter:
return None
return regions_model[regions_iter][0]
def city_in_region(self, model, itr, user_data=None):
if not itr:
return False
city = model[itr][0]
region = self._get_active_region()
if not region:
return False
return city in self._regions_zones[region]
def _set_amPm_part_sensitive(self, sensitive):
for widget in (self._amPmUp, self._amPmDown, self._amPmLabel):
widget.set_sensitive(sensitive)
def _to_amPm(self, hours):
if hours >= 12:
day_phase = _("PM")
else:
day_phase = _("AM")
new_hours = ((hours - 1) % 12) + 1
return (new_hours, day_phase)
def _to_24h(self, hours, day_phase):
correction = 0
if day_phase == _("AM") and hours == 12:
correction = -12
elif day_phase == _("PM") and hours != 12:
correction = 12
return (hours + correction) % 24
def _update_datetime(self):
now = datetime.datetime.now(self._tz)
if self._radioButton24h.get_active():
self._hoursLabel.set_text("%0.2d" % now.hour)
else:
hours, amPm = self._to_amPm(now.hour)
self._hoursLabel.set_text("%0.2d" % hours)
self._amPmLabel.set_text(amPm)
self._minutesLabel.set_text("%0.2d" % now.minute)
self._set_combo_selection(self._dayCombo, now.day)
self._set_combo_selection(self._monthCombo, now.month)
self._set_combo_selection(self._yearCombo, now.year)
#GLib's timer is driven by the return value of the function.
#It runs the fuction periodically while the returned value
#is True.
return True
def _save_system_time(self):
"""
Returning False from this method removes the timer that would
otherwise call it again and again.
"""
self._start_updating_timer_id = None
if not flags.can_touch_runtime_system("save system time"):
return False
month = self._get_combo_selection(self._monthCombo)[0]
if not month:
return False
year = self._get_combo_selection(self._yearCombo)[0]
if not year:
return False
hours = int(self._hoursLabel.get_text())
if not self._radioButton24h.get_active():
hours = self._to_24h(hours, self._amPmLabel.get_text())
minutes = int(self._minutesLabel.get_text())
day = self._get_combo_selection(self._dayCombo)[0]
#day may be None if there is no such in the selected year and month
if day:
isys.set_system_date_time(year, month, day, hours, minutes, tz=self._tz)
#start the timer only when the spoke is shown
if self._shown and not self._update_datetime_timer_id:
self._update_datetime_timer_id = GLib.timeout_add_seconds(1,
self._update_datetime)
#run only once (after first 2 seconds of inactivity)
return False
def _stop_and_maybe_start_time_updating(self, interval=2):
"""
This method is called in every date/time-setting button's callback.
It removes the timer for updating displayed date/time (do not want to
change it while user does it manually) and allows us to set new system
date/time only after $interval seconds long idle on time-setting buttons.
This is done by the _start_updating_timer that is reset in this method.
So when there is $interval seconds long idle on date/time-setting
buttons, self._save_system_time method is invoked. Since it returns
False, this timer is then removed and only reactivated in this method
(thus in some date/time-setting button's callback).
"""
#do not start timers if the spoke is not shown
if not self._shown:
self._update_datetime()
self._save_system_time()
return
#stop time updating
if self._update_datetime_timer_id:
GLib.source_remove(self._update_datetime_timer_id)
self._update_datetime_timer_id = None
#stop previous $interval seconds timer (see below)
if self._start_updating_timer_id:
GLib.source_remove(self._start_updating_timer_id)
#let the user change date/time and after $interval seconds of inactivity
#save it as the system time and start updating the displayed date/time
self._start_updating_timer_id = GLib.timeout_add_seconds(interval,
self._save_system_time)
def _set_combo_selection(self, combo, item):
model = combo.get_model()
if not model:
return False
itr = model.get_iter_first()
while itr:
if model[itr][0] == item:
combo.set_active_iter(itr)
return True
itr = model.iter_next(itr)
return False
def _get_combo_selection(self, combo):
"""
Get the selected item of the combobox.
:return: selected item or None
"""
model = combo.get_model()
itr = combo.get_active_iter()
if not itr or not model:
return None, None
return model[itr][0], model[itr][1]
def _restore_old_city_region(self):
"""Restore stored "old" (or last valid) values."""
# check if there are old values to go back to
if self._old_region and self._old_city:
self._set_timezone(self._old_region + "/" + self._old_city)
def on_up_hours_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
hours = int(self._hoursLabel.get_text())
if self._radioButton24h.get_active():
new_hours = (hours + 1) % 24
else:
amPm = self._amPmLabel.get_text()
#let's not deal with magical AM/PM arithmetics
new_hours = self._to_24h(hours, amPm)
new_hours, new_amPm = self._to_amPm((new_hours + 1) % 24)
self._amPmLabel.set_text(new_amPm)
new_hours_str = "%0.2d" % new_hours
self._hoursLabel.set_text(new_hours_str)
def on_down_hours_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
hours = int(self._hoursLabel.get_text())
if self._radioButton24h.get_active():
new_hours = (hours - 1) % 24
else:
amPm = self._amPmLabel.get_text()
#let's not deal with magical AM/PM arithmetics
new_hours = self._to_24h(hours, amPm)
new_hours, new_amPm = self._to_amPm((new_hours - 1) % 24)
self._amPmLabel.set_text(new_amPm)
new_hours_str = "%0.2d" % new_hours
self._hoursLabel.set_text(new_hours_str)
def on_up_minutes_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
minutes = int(self._minutesLabel.get_text())
minutes_str = "%0.2d" % ((minutes + 1) % 60)
self._minutesLabel.set_text(minutes_str)
def on_down_minutes_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
minutes = int(self._minutesLabel.get_text())
minutes_str = "%0.2d" % ((minutes - 1) % 60)
self._minutesLabel.set_text(minutes_str)
def on_updown_ampm_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
if self._amPmLabel.get_text() == _("AM"):
self._amPmLabel.set_text(_("PM"))
else:
self._amPmLabel.set_text(_("AM"))
def on_region_changed(self, combo, *args):
"""
:see: on_city_changed
"""
region = self._get_active_region()
if not region or region == self._old_region:
# region entry being edited or old_value chosen, no action needed
# @see: on_city_changed
return
self._citiesFilter.refilter()
# Set the city to the first one available in this newly selected region.
zone = self._regions_zones[region]
firstCity = sorted(list(zone))[0]
self._set_combo_selection(self._cityCombo, firstCity)
self._old_region = region
self._old_city = firstCity
def on_city_changed(self, combo, *args):
"""
ComboBox emits ::changed signal not only when something is selected, but
also when its entry's text is changed. We need to distinguish between
those two cases ('London' typed in the entry => no action until ENTER is
hit etc.; 'London' chosen in the expanded combobox => update timezone
map and do all necessary actions). Fortunately when entry is being
edited, self._get_active_city returns None.
"""
timezone = None
region = self._get_active_region()
city = self._get_active_city()
if not region or not city or (region == self._old_region and
city == self._old_city):
# entry being edited or no change, no actions needed
return
if city and region:
timezone = region + "/" + city
else:
# both city and region are needed to form a valid timezone
return
if region == "Etc":
# Etc timezones cannot be displayed on the map, so let's reset the
# location and manually set a highlight with no location pin.
self._tzmap.clear_location()
if city in ("GMT", "UTC"):
offset = 0.0
# The tzdb data uses POSIX-style signs for the GMT zones, which is
# the opposite of whatever everyone else expects. GMT+4 indicates a
# zone four hours west of Greenwich; i.e., four hours before. Reverse
# the sign to match the libtimezone map.
else:
# Take the part after "GMT"
offset = -float(city[3:])
self._tzmap.set_selected_offset(offset)
else:
# we don't want the timezone-changed signal to be emitted
self._tzmap.set_timezone(timezone)
# update "old" values
self._old_city = city
def on_entry_left(self, entry, *args):
# user clicked somewhere else or hit TAB => finished editing
entry.emit("activate")
def on_city_region_key_released(self, entry, event, *args):
if event.type == Gdk.EventType.KEY_RELEASE and \
event.keyval == Gdk.KEY_Escape:
# editing canceled
self._restore_old_city_region()
def on_completion_match_selected(self, combo, model, itr):
item = None
if model and itr:
item = model[itr][0]
if item:
self._set_combo_selection(combo, item)
def on_city_region_text_entry_activated(self, entry):
combo = entry.get_parent()
model = combo.get_model()
entry_text = entry.get_text().lower()
for row in model:
if entry_text == row[0].lower():
self._set_combo_selection(combo, row[0])
return
# non-matching value entered, reset to old values
self._restore_old_city_region()
def on_month_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
self._daysFilter.refilter()
def on_day_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
def on_year_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
self._daysFilter.refilter()
def on_location_changed(self, tz_map, location):
if not location:
return
timezone = location.get_property('zone')
if self._set_timezone(timezone):
# timezone successfully set
self._tz = get_timezone(timezone)
self._update_datetime()
def on_timeformat_changed(self, button24h, *args):
hours = int(self._hoursLabel.get_text())
amPm = self._amPmLabel.get_text()
#connected to 24-hour radio button
if button24h.get_active():
self._set_amPm_part_sensitive(False)
new_hours = self._to_24h(hours, amPm)
self._amPmRevealer.set_reveal_child(False)
else:
self._set_amPm_part_sensitive(True)
new_hours, new_amPm = self._to_amPm(hours)
self._amPmLabel.set_text(new_amPm)
self._amPmRevealer.set_reveal_child(True)
self._hoursLabel.set_text("%0.2d" % new_hours)
def _set_date_time_setting_sensitive(self, sensitive):
#contains all date/time setting widgets
footer_alignment = self.builder.get_object("footerAlignment")
footer_alignment.set_sensitive(sensitive)
def _show_no_network_warning(self):
self.set_warning(_("You need to set up networking first if you "\
"want to use NTP"))
def _show_no_ntp_server_warning(self):
self.set_warning(_("You have no working NTP server configured"))
def on_ntp_switched(self, switch, *args):
if switch.get_active():
#turned ON
if not flags.can_touch_runtime_system("start NTP service"):
#cannot touch runtime system, not much to do here
return
if not nm.nm_is_connected():
self._show_no_network_warning()
switch.set_active(False)
return
else:
self.clear_info()
working_server = self._config_dialog.working_server
if working_server is None:
self._show_no_ntp_server_warning()
else:
#we need a one-time sync here, because chronyd would not change
#the time as drastically as we need
ntp.one_time_sync_async(working_server)
ret = iutil.start_service(NTP_SERVICE)
self._set_date_time_setting_sensitive(False)
#if starting chronyd failed and chronyd is not running,
#set switch back to OFF
if (ret != 0) and not iutil.service_running(NTP_SERVICE):
switch.set_active(False)
else:
#turned OFF
if not flags.can_touch_runtime_system("stop NTP service"):
#cannot touch runtime system, nothing to do here
return
self._set_date_time_setting_sensitive(True)
ret = iutil.stop_service(NTP_SERVICE)
#if stopping chronyd failed and chronyd is running,
#set switch back to ON
if (ret != 0) and iutil.service_running(NTP_SERVICE):
switch.set_active(True)
self.clear_info()
def on_ntp_config_clicked(self, *args):
self._config_dialog.refresh()
with self.main_window.enlightbox(self._config_dialog.window):
response = self._config_dialog.run()
if response == 1:
self.data.timezone.ntpservers = self._config_dialog.servers
if self._config_dialog.working_server is None:
self._show_no_ntp_server_warning()
else:
self.clear_info()
| gpl-2.0 | 6,774,794,064,002,626,000 | 34.193548 | 109 | 0.602556 | false |
kickstandproject/asterisk-testsuite-temporary | tests/bridge/atxfer_nominal/transfer.py | 3 | 4667 | #!/usr/bin/env python
'''
Copyright (C) 2013, Digium, Inc.
Matt Jordan <[email protected]>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
'''
import sys
import logging
sys.path.append("lib/python")
from version import AsteriskVersion
LOGGER = logging.getLogger(__name__)
class Transfer(object):
__singleton_instance = None
@staticmethod
def get_instance():
''' Return the singleton instance of the application test_object
Keyword Arguments:
path The full path to the location of the test
test_config The test's YAML configuration object
'''
if (Transfer.__singleton_instance is None):
# Note that the constructor sets the singleton instance.
# This is a tad backwards, but is needed for the pluggable
# framework. If we get a get_instance call before its been set,
# blow up - that really shouldn't ever happen
raise Exception()
return Transfer.__singleton_instance
def __init__(self, module_config, test_object):
''' Constructor
Keyword Arguments:
module_config The module configuration
test_object The test object. Must be of type BridgeTestCase
'''
self.module_config = module_config
self.test_object = test_object
self._current_feature = None
self.test_object.register_feature_start_observer(self._handle_feature_start)
if AsteriskVersion() >= AsteriskVersion('12'):
self.test_object.register_ami_observer(self._handle_ami_connect)
else:
self.test_object.register_feature_end_observer(self._handle_feature_end)
if (Transfer.__singleton_instance == None):
Transfer.__singleton_instance = self
def _handle_ami_connect(self, ami):
''' Handle AMI connect events '''
if (ami.id != 0):
return
ami.registerEvent('AttendedTransfer', self._handle_attended_transfer)
def _handle_feature_start(self, test_object, feature):
''' Callback for the BridgeTestCase feature detected event
Keyword Arguments:
test_object The BridgeTestCase object
feature The specific feature that was executed
'''
LOGGER.debug('Setting current feature to %s' % str(feature))
self._current_feature = feature
def _handle_feature_end(self, test_object, feature):
''' Callback for the BridgeTestCase feature detected event
Keyword Arguments:
test_object The BridgeTestCase object
feature The specific feature that was executed
'''
LOGGER.debug("current_feature: %s\n" % self._current_feature)
if self._current_feature['who'] == 'alice':
ami = self.test_object.ami_bob
channel = self.test_object.bob_channel
self.test_object.check_identities(bob_connected_line='"Charlie" <5678>')
elif self._current_feature['who'] == 'bob':
ami = self.test_object.ami_alice
channel = self.test_object.alice_channel
self.test_object.check_identities(alice_connected_line='"Charlie" <5678>')
else:
raise Exception()
LOGGER.info('Hanging up channel %s' % channel)
ami.hangup(channel)
def _handle_attended_transfer(self, ami, event):
''' Handle the AttendedTransfer event. Once the event has
triggered, the call can be torn down. '''
LOGGER.debug('ami %d: received event %s' % (ami.id, event))
self._handle_feature_end(None, None)
def complete_attended_transfer(self):
'''
Called when we've detected that the Attended Transfer should
complete
'''
if self._current_feature is None:
raise Exception()
if self._current_feature['who'] == 'alice':
ami = self.test_object.ami_alice
channel = self.test_object.alice_channel
elif self._current_feature['who'] == 'bob':
ami = self.test_object.ami_bob
channel = self.test_object.bob_channel
else:
raise Exception()
LOGGER.info('Hanging up channel %s' % channel)
ami.hangup(channel)
def complete_attended_transfer(ami, event):
'''
Callback that occurs during an attended transfer.
This callback signals that the test should complete the attended
transfer by hanging up the transferer.
'''
LOGGER.debug('ami %d: received event %s' % (ami.id, event))
transfer = Transfer.get_instance()
transfer.complete_attended_transfer()
return True
| gpl-2.0 | -8,971,531,841,085,130,000 | 34.356061 | 86 | 0.634883 | false |
mjenrungrot/competitive_programming | UVa Online Judge/10500.py | 1 | 1742 | # =============================================================================
# Author: Teerapat Jenrungrot - https://github.com/mjenrungrot/
# FileName: 10500.py
# Description: UVa Online Judge - 10500
# =============================================================================
def f(boards, x, y, nn):
boards[y][x] = '*0'
for (dy, dx) in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
newy = y + dy
newx = x + dx
if newy < 0 or newy >= len(boards): continue
if newx < 0 or newx >= len(boards[y]): continue
boards[newy][newx] = '{}*'.format(boards[newy][newx])
for (dy, dx) in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
newy = y + dy
newx = x + dx
if newy < 0 or newy >= len(boards): continue
if newx < 0 or newx >= len(boards[y]): continue
if boards[newy][newx][0] == '*': continue
if '0' in boards[newy][newx]:
nn[0] += 1
f(boards, newx, newy, nn)
return
while True:
N, M = list(map(int, input().split()))
if N == M == 0: break
y0, x0 = list(map(int, input().split()))
x0 -= 1
y0 -= 1
boards = []
for i in range(N):
boards.append(input().split())
n_movements = [0]
f(boards, x0, y0, n_movements)
print("")
print("---".join(list("|" * (M + 1))))
for i in range(N):
print("|", end="")
for j in range(M):
val = boards[i][j]
if "*" not in val: val = "?"
else: val = val.replace('*', '')
print(" {} |".format(val), end="")
print("")
print("---".join(list("|" * (M + 1))))
print("")
print("NUMBER OF MOVEMENTS: {}".format(n_movements[0])) | mit | -240,404,931,939,146,530 | 31.277778 | 79 | 0.41504 | false |
inares/edx-platform | lms/lib/xblock/test/test_mixin.py | 11 | 14498 | """
Tests of the LMS XBlock Mixin
"""
import ddt
from xblock.validation import ValidationMessage
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory, ToyCourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_MIXED_TOY_MODULESTORE
from xmodule.partitions.partitions import Group, UserPartition
class LmsXBlockMixinTestCase(ModuleStoreTestCase):
"""
Base class for XBlock mixin tests cases. A simple course with a single user partition is created
in setUp for all subclasses to use.
"""
def build_course(self):
"""
Build up a course tree with a UserPartition.
"""
# pylint: disable=attribute-defined-outside-init
self.user_partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(0, 'alpha'),
Group(1, 'beta')
]
)
self.group1 = self.user_partition.groups[0]
self.group2 = self.user_partition.groups[1]
self.course = CourseFactory.create(user_partitions=[self.user_partition])
section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
subsection = ItemFactory.create(parent=section, category='sequential', display_name='Test Subsection')
vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='Test Unit')
video = ItemFactory.create(parent=vertical, category='video', display_name='Test Video 1')
self.section_location = section.location
self.subsection_location = subsection.location
self.vertical_location = vertical.location
self.video_location = video.location
def set_group_access(self, block_location, access_dict):
"""
Sets the group_access dict on the block referenced by block_location.
"""
block = self.store.get_item(block_location)
block.group_access = access_dict
self.store.update_item(block, 1)
class XBlockValidationTest(LmsXBlockMixinTestCase):
"""
Unit tests for XBlock validation
"""
def setUp(self):
super(XBlockValidationTest, self).setUp()
self.build_course()
def verify_validation_message(self, message, expected_message, expected_message_type):
"""
Verify that the validation message has the expected validation message and type.
"""
self.assertEqual(message.text, expected_message)
self.assertEqual(message.type, expected_message_type)
def test_validate_full_group_access(self):
"""
Test the validation messages produced for an xblock with full group access.
"""
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 0)
def test_validate_restricted_group_access(self):
"""
Test the validation messages produced for an xblock with a valid group access restriction
"""
self.set_group_access(self.video_location, {self.user_partition.id: [self.group1.id, self.group2.id]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 0)
def test_validate_invalid_user_partitions(self):
"""
Test the validation messages produced for an xblock referring to non-existent user partitions.
"""
self.set_group_access(self.video_location, {999: [self.group1.id]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
u"This component refers to deleted or invalid content group configurations.",
ValidationMessage.ERROR,
)
# Now add a second invalid user partition and validate again.
# Note that even though there are two invalid configurations,
# only a single error message will be returned.
self.set_group_access(self.video_location, {998: [self.group2.id]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
u"This component refers to deleted or invalid content group configurations.",
ValidationMessage.ERROR,
)
def test_validate_invalid_groups(self):
"""
Test the validation messages produced for an xblock referring to non-existent groups.
"""
self.set_group_access(self.video_location, {self.user_partition.id: [self.group1.id, 999]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
u"This component refers to deleted or invalid content groups.",
ValidationMessage.ERROR,
)
# Now try again with two invalid group ids
self.set_group_access(self.video_location, {self.user_partition.id: [self.group1.id, 998, 999]})
validation = self.store.get_item(self.video_location).validate()
self.assertEqual(len(validation.messages), 1)
self.verify_validation_message(
validation.messages[0],
u"This component refers to deleted or invalid content groups.",
ValidationMessage.ERROR,
)
class OpenAssessmentBlockMixinTestCase(ModuleStoreTestCase):
"""
Tests for OpenAssessmentBlock mixin.
"""
def setUp(self):
super(OpenAssessmentBlockMixinTestCase, self).setUp()
self.course = CourseFactory.create()
self.section = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.open_assessment = ItemFactory.create(
parent=self.section,
category="openassessment",
display_name="untitled",
)
def test_has_score(self):
"""
Test has_score is true for ora2 problems.
"""
self.assertTrue(self.open_assessment.has_score)
@ddt.ddt
class XBlockGetParentTest(LmsXBlockMixinTestCase):
"""
Test that XBlock.get_parent returns correct results with each modulestore
backend.
"""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.xml)
def test_parents(self, modulestore_type):
with self.store.default_store(modulestore_type):
# setting up our own local course tree here, since it needs to be
# created with the correct modulestore type.
if modulestore_type == 'xml':
course_key = self.store.make_course_key('edX', 'toy', '2012_Fall')
else:
course_key = ToyCourseFactory.create(run='2012_Fall_copy').id
course = self.store.get_course(course_key)
self.assertIsNone(course.get_parent())
def recurse(parent):
"""
Descend the course tree and ensure the result of get_parent()
is the expected one.
"""
visited = []
for child in parent.get_children():
self.assertEqual(parent.location, child.get_parent().location)
visited.append(child)
visited += recurse(child)
return visited
visited = recurse(course)
self.assertEqual(len(visited), 28)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_parents_draft_content(self, modulestore_type):
# move the video to the new vertical
with self.store.default_store(modulestore_type):
self.build_course()
subsection = self.store.get_item(self.subsection_location)
new_vertical = ItemFactory.create(parent=subsection, category='vertical', display_name='New Test Unit')
child_to_move_location = self.video_location.for_branch(None)
new_parent_location = new_vertical.location.for_branch(None)
old_parent_location = self.vertical_location.for_branch(None)
with self.store.branch_setting(ModuleStoreEnum.Branch.draft_preferred):
self.assertIsNone(self.course.get_parent())
with self.store.bulk_operations(self.course.id):
user_id = ModuleStoreEnum.UserID.test
old_parent = self.store.get_item(old_parent_location)
old_parent.children.remove(child_to_move_location)
self.store.update_item(old_parent, user_id)
new_parent = self.store.get_item(new_parent_location)
new_parent.children.append(child_to_move_location)
self.store.update_item(new_parent, user_id)
# re-fetch video from draft store
video = self.store.get_item(child_to_move_location)
self.assertEqual(
new_parent_location,
video.get_parent().location
)
with self.store.branch_setting(ModuleStoreEnum.Branch.published_only):
# re-fetch video from published store
video = self.store.get_item(child_to_move_location)
self.assertEqual(
old_parent_location,
video.get_parent().location.for_branch(None)
)
class RenamedTuple(tuple):
"""
This class is only used to allow overriding __name__ on the tuples passed
through ddt, in order to have the generated test names make sense.
"""
pass
def ddt_named(parent, child):
"""
Helper to get more readable dynamically-generated test names from ddt.
"""
args = RenamedTuple([parent, child])
setattr(args, '__name__', 'parent_{}_child_{}'.format(parent, child))
return args
@ddt.ddt
class XBlockMergedGroupAccessTest(LmsXBlockMixinTestCase):
"""
Test that XBlock.merged_group_access is computed correctly according to
our access control rules.
"""
PARTITION_1 = 1
PARTITION_1_GROUP_1 = 11
PARTITION_1_GROUP_2 = 12
PARTITION_2 = 2
PARTITION_2_GROUP_1 = 21
PARTITION_2_GROUP_2 = 22
PARENT_CHILD_PAIRS = (
ddt_named('section_location', 'subsection_location'),
ddt_named('section_location', 'vertical_location'),
ddt_named('section_location', 'video_location'),
ddt_named('subsection_location', 'vertical_location'),
ddt_named('subsection_location', 'video_location'),
)
def setUp(self):
super(XBlockMergedGroupAccessTest, self).setUp()
self.build_course()
def verify_group_access(self, block_location, expected_dict):
"""
Verify the expected value for the block's group_access.
"""
block = self.store.get_item(block_location)
self.assertEqual(block.merged_group_access, expected_dict)
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
def test_intersecting_groups(self, parent, child):
"""
When merging group_access on a block, the resulting group IDs for each
partition is the intersection of the group IDs defined for that
partition across all ancestor blocks (including this one).
"""
parent_block = getattr(self, parent)
child_block = getattr(self, child)
self.set_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1, self.PARTITION_1_GROUP_2]})
self.set_group_access(child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
self.verify_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1, self.PARTITION_1_GROUP_2]})
self.verify_group_access(child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
def test_disjoint_groups(self, parent, child):
"""
When merging group_access on a block, if the intersection of group IDs
for a partition is empty, the merged value for that partition is False.
"""
parent_block = getattr(self, parent)
child_block = getattr(self, child)
self.set_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.set_group_access(child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
self.verify_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.verify_group_access(child_block, {self.PARTITION_1: False})
def test_disjoint_groups_no_override(self):
"""
Special case of the above test - ensures that `False` propagates down
to the block being queried even if blocks further down in the hierarchy
try to override it.
"""
self.set_group_access(self.section_location, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.set_group_access(self.subsection_location, {self.PARTITION_1: [self.PARTITION_1_GROUP_2]})
self.set_group_access(
self.vertical_location, {self.PARTITION_1: [self.PARTITION_1_GROUP_1, self.PARTITION_1_GROUP_2]}
)
self.verify_group_access(self.vertical_location, {self.PARTITION_1: False})
self.verify_group_access(self.video_location, {self.PARTITION_1: False})
@ddt.data(*PARENT_CHILD_PAIRS)
@ddt.unpack
def test_union_partitions(self, parent, child):
"""
When merging group_access on a block, the result's keys (partitions)
are the union of all partitions specified across all ancestor blocks
(including this one).
"""
parent_block = getattr(self, parent)
child_block = getattr(self, child)
self.set_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.set_group_access(child_block, {self.PARTITION_2: [self.PARTITION_1_GROUP_2]})
self.verify_group_access(parent_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1]})
self.verify_group_access(
child_block, {self.PARTITION_1: [self.PARTITION_1_GROUP_1], self.PARTITION_2: [self.PARTITION_1_GROUP_2]}
)
| agpl-3.0 | -3,216,552,329,690,785,000 | 40.541547 | 120 | 0.64264 | false |
mith1979/ansible_automation | applied_python/applied_python/lib/python2.7/site-packages/pylint/reporters/ureports/__init__.py | 3 | 3558 | # copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of pylint.
#
# pylint is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# pylint is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with pylint. If not, see <http://www.gnu.org/licenses/>.
"""Universal report objects and some formatting drivers.
A way to create simple reports using python objects, primarily designed to be
formatted as text and html.
"""
import os
import sys
import six
class BaseWriter(object):
"""base class for ureport writers"""
def format(self, layout, stream=None, encoding=None):
"""format and write the given layout into the stream object
unicode policy: unicode strings may be found in the layout;
try to call stream.write with it, but give it back encoded using
the given encoding if it fails
"""
if stream is None:
stream = sys.stdout
if not encoding:
encoding = getattr(stream, 'encoding', 'UTF-8')
self.encoding = encoding or 'UTF-8'
self.out = stream
self.begin_format()
layout.accept(self)
self.end_format()
def format_children(self, layout):
"""recurse on the layout children and call their accept method
(see the Visitor pattern)
"""
for child in getattr(layout, 'children', ()):
child.accept(self)
def writeln(self, string=u''):
"""write a line in the output buffer"""
self.write(string + os.linesep)
def write(self, string):
"""write a string in the output buffer"""
self.out.write(string)
def begin_format(self):
"""begin to format a layout"""
self.section = 0
def end_format(self):
"""finished to format a layout"""
def get_table_content(self, table):
"""trick to get table content without actually writing it
return an aligned list of lists containing table cells values as string
"""
result = [[]]
cols = table.cols
for cell in self.compute_content(table):
if cols == 0:
result.append([])
cols = table.cols
cols -= 1
result[-1].append(cell)
# fill missing cells
while len(result[-1]) < cols:
result[-1].append(u'')
return result
def compute_content(self, layout):
"""trick to compute the formatting of children layout before actually
writing it
return an iterator on strings (one for each child element)
"""
# Patch the underlying output stream with a fresh-generated stream,
# which is used to store a temporary representation of a child
# node.
out = self.out
try:
for child in layout.children:
stream = six.StringIO()
self.out = stream
child.accept(self)
yield stream.getvalue()
finally:
self.out = out
| apache-2.0 | 6,371,420,976,397,837,000 | 32.566038 | 81 | 0.624227 | false |
kyleabeauchamp/pymbar | setup.py | 3 | 5145 | """
The pymbar package contains the pymbar suite of tools for the analysis of
simulated and experimental data with the multistate Bennett acceptance
ratio (MBAR) estimator.
"""
#from distutils.sysconfig import get_config_var
from distutils.core import setup, Extension
from setuptools import setup, Extension
import numpy
import glob
import os
import subprocess
import six
##########################
VERSION = "3.0.0.dev0"
ISRELEASED = False
__version__ = VERSION
##########################
################################################################################
# Writing version control information to the module
################################################################################
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(filename='pymbar/version.py'):
cnt = """
# This file is automatically generated by setup.py
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
else:
GIT_REVISION = 'Unknown'
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
################################################################################
# Installation
################################################################################
write_version_py()
CMBAR = Extension('_pymbar',
sources = ["pymbar/_pymbar.c"],
extra_compile_args=["-std=c99","-O2","-shared","-msse2","-msse3"],
include_dirs = [numpy.get_include(),numpy.get_include()+"/numpy/"]
)
def buildKeywordDictionary():
from distutils.core import Extension
setupKeywords = {}
setupKeywords["name"] = "pymbar"
setupKeywords["version"] = VERSION
setupKeywords["author"] = "Michael R. Shirts and John D. Chodera"
setupKeywords["author_email"] = "[email protected], [email protected]"
setupKeywords["license"] = "GPL 2.0"
setupKeywords["url"] = "http://github.com/choderalab/pymbar"
setupKeywords["download_url"] = "http://github.com/choderalab/pymbar"
setupKeywords["packages"] = ['pymbar', 'pymbar.testsystems', 'pymbar.tests']
setupKeywords["package_dir"] = {'pymbar' : 'pymbar', 'pymbar.tests' : 'pymbar/tests'}
setupKeywords["zip_safe"] = False
#setupKeywords["py_modules"] = ["pymbar", "timeseries", "testsystems", "confidenceintervals"]
setupKeywords["data_files"] = []
setupKeywords["ext_modules"] = [CMBAR] if six.PY2 else []
# setupKeywords["test_suite"] = "tests" # requires we migrate to setuptools
setupKeywords["platforms"] = ["Linux", "Mac OS X", "Windows"]
setupKeywords["description"] = "Python implementation of the multistate Bennett acceptance ratio (MBAR) method."
setupKeywords["requires"] = ["numpy", "scipy", "nose", "numexpr"]
setupKeywords["long_description"] = """
Pymbar (https://simtk.org/home/pymbar) is a library
that provides tools for optimally combining simulations
from multiple thermodynamic states using maximum likelihood
methods to compute free energies (normalization constants)
and expectation values from all of the samples simultaneously.
"""
outputString=""
firstTab = 40
secondTab = 60
for key in sorted(setupKeywords.keys()):
value = setupKeywords[key]
outputString += key.rjust(firstTab) + str( value ).rjust(secondTab) + "\n"
print("%s" % outputString)
#get_config_var(None) # this line is necessary to fix the imports Mac OS X
return setupKeywords
def main():
setupKeywords = buildKeywordDictionary()
setup(**setupKeywords)
if __name__ == '__main__':
main()
| lgpl-2.1 | 2,325,693,704,332,234,000 | 34.482759 | 122 | 0.569096 | false |
CGATOxford/Optic | Optic/WrapperBl2Seq.py | 1 | 5775 | ##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
WrapperBl2Seq.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import os
import sys
import string
import tempfile
import subprocess
from CGAT import Experiment as Experiment
from CGAT import FastaIterator as FastaIterator
class Bl2SeqError(Exception):
pass
class Bl2Seq:
mOptions = ""
mExecutable = "bl2seq"
mStderr = sys.stderr
def __init__(self, options=""):
self.mOptions = options
def CreateTemporaryFiles(self):
"""create temporary files."""
self.mTempDirectory = tempfile.mkdtemp()
self.mFilenameTempInput = self.mTempDirectory + "/input"
self.mFilenameTempOutput = self.mTempDirectory + "/output"
def DeleteTemporaryFiles(self):
"""clean up."""
os.remove(self.mFilenameTempInput)
os.remove(self.mFilenameTempOutput)
os.rmdir(self.mTempDirectory)
def SetStderr(self, file=None):
"""set file for dumping stderr."""
self.mStderr = file
def WriteOutput(self, lines, filename_output=None):
"""write output to file.
If file is not given, lines are written to stdout.
"""
if filename_output:
outfile = open(filename_output, "w")
else:
outfile = sys.stdout
outfile.write(string.join(lines, ""))
if filename_output:
outfile.close()
def ParseResult(self, trace_file=None, information_file=None):
result = AdaptiveCAIResult()
result.Read(trace_file, information_file)
return result
def RunOnFile(self, infile, outfile, errfile):
self.CreateTemporaryFiles()
statement = string.join((self.mExecutable,
self.mFilenameTempInput,
self.mFilenameTempOutput),
" ")
i = FastaIterator.FastaIterator(infile)
outfile.write("GENE\tBl2Seq\n")
while 1:
f = i.next()
if f is None:
break
file = open(self.mFilenameTempInput, "w")
file.write(">%s\n%s" % (f.title, f.sequence))
file.close()
s = subprocess.Popen(statement,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.mTempDirectory,
close_fds=True)
(out, err) = s.communicate()
if s.returncode != 0:
raise Bl2SeqError, "Error in calculating Bl2Seq\n%s" % err
d = open(self.mFilenameTempOutput).readlines()[2][:-1]
enc = d.split(" ")[2]
outfile.write((string.join((f.title, enc), "\t")) + "\n")
errfile.write(err)
self.DeleteTemporaryFiles()
if __name__ == "__main__":
parser = E.OptionParser(
version="%prog version: $Id: WrapperBl2Seq.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-f", "--input-file", dest="input_filename", type="string",
help="input filename. If '-', stdin is used [default=%default].",
metavar="FILE")
parser.add_option("-o", "--output-file", dest="output_filename", type="string",
help="output filename for codon usage. If '-', output is stdout [default=%default].",
metavar="FILE")
parser.add_option("-e", "--error-file", dest="error_filename", type="string",
help="output filename for error messages. If '-', output is stderr [default=%default].",
metavar="FILE")
parser.set_defaults(
input_filename="-",
output_filename="-",
error_filename="/dev/null",
)
(options, args) = Experiment.Start(parser)
wrapper = Bl2Seq()
if options.input_filename == "-":
file_stdin = sys.stdin
else:
file_stdin = open(options.input_filename, "r")
if options.output_filename:
if options.output_filename == "-":
file_stdout = sys.stdout
else:
file_stdout = open(options.output_filename, "w")
if options.error_filename:
if options.error_filename == "-":
file_stderr = sys.stderr
else:
file_stderr = open(options.error_filename, "w")
wrapper.RunOnFile(file_stdin, file_stdout, file_stderr)
if file_stdin and file_stdin != sys.stdin:
file_stdin.close()
if file_stdout and file_stdout != sys.stdout:
file_stdout.close()
if file_stderr and file_stderr != sys.stderr:
file_stderr.close()
Experiment.Stop()
| mit | 2,278,739,067,990,019,800 | 28.166667 | 110 | 0.561732 | false |
ntuecon/server | pyenv/Lib/site-packages/pythonwin/pywin/framework/sgrepmdi.py | 2 | 19175 | #SGrepMDI is by Gordon McMillan ([email protected])
#It does basically what Find In Files does in MSVC with a couple enhancements.
# - It saves any directories in the app's ini file (if you want to get rid
# of them you'll have to edit the file)
# - "Directories" can be directories,
# - semicolon separated lists of "directories",
# - environment variables that evaluate to "directories",
# - registry path names that evaluate to "directories",
# - all of which is recursive, so you can mix them all up.
# - It is MDI, so you can 'nest' greps and return to earlier ones,
# (ie, have multiple results open at the same time)
# - Like FIF, double clicking a line opens an editor and takes you to the line.
# - You can highlight text, right click and start a new grep with the selected
# text as search pattern and same directories etc as before.
# - You can save grep parameters (so you don't lose your hardearned pattern)
# from File|Save
# - You can save grep results by right clicking in the result window.
# Hats off to Mark Hammond for providing an environment where I could cobble
# something like this together in a couple evenings!
import win32ui
import win32api
from pywin.mfc import docview, dialog, window
import win32con
import string
import re
import glob
import os
import stat
import glob
import scriptutils
def getsubdirs(d):
dlist = []
flist = glob.glob(d+'\\*')
for f in flist:
if os.path.isdir(f):
dlist.append(f)
dlist = dlist + getsubdirs(f)
return dlist
class dirpath:
def __init__(self, str, recurse=0):
dp = str.split(';')
dirs = {}
for d in dp:
if os.path.isdir(d):
d = d.lower()
if d not in dirs:
dirs[d] = None
if recurse:
subdirs = getsubdirs(d)
for sd in subdirs:
sd = sd.lower()
if sd not in dirs:
dirs[sd] = None
elif os.path.isfile(d):
pass
else:
x = None
if d in os.environ:
x = dirpath(os.environ[d])
elif d[:5] == 'HKEY_':
keystr = d.split('\\')
try:
root = eval('win32con.'+keystr[0])
except:
win32ui.MessageBox("Can't interpret registry key name '%s'" % keystr[0])
try:
subkey = '\\'.join(keystr[1:])
val = win32api.RegQueryValue(root, subkey)
if val:
x = dirpath(val)
else:
win32ui.MessageBox("Registry path '%s' did not return a path entry" % d)
except:
win32ui.MessageBox("Can't interpret registry key value: %s" % keystr[1:])
else:
win32ui.MessageBox("Directory '%s' not found" % d)
if x:
for xd in x:
if xd not in dirs:
dirs[xd] = None
if recurse:
subdirs = getsubdirs(xd)
for sd in subdirs:
sd = sd.lower()
if sd not in dirs:
dirs[sd] = None
self.dirs = []
for d in dirs.keys():
self.dirs.append(d)
def __getitem__(self, key):
return self.dirs[key]
def __len__(self):
return len(self.dirs)
def __setitem__(self, key, value):
self.dirs[key] = value
def __delitem__(self, key):
del self.dirs[key]
def __getslice__(self, lo, hi):
return self.dirs[lo:hi]
def __setslice__(self, lo, hi, seq):
self.dirs[lo:hi] = seq
def __delslice__(self, lo, hi):
del self.dirs[lo:hi]
def __add__(self, other):
if type(other) == type(self) or type(other) == type([]):
return self.dirs + other.dirs
def __radd__(self, other):
if type(other) == type(self) or type(other) == type([]):
return other.dirs + self.dirs
# Group(1) is the filename, group(2) is the lineno.
#regexGrepResult=regex.compile("^\\([a-zA-Z]:.*\\)(\\([0-9]+\\))")
regexGrep=re.compile(r"^([a-zA-Z]:[^(]*)\(([0-9]+)\)")
#these are the atom numbers defined by Windows for basic dialog controls
BUTTON = 0x80
EDIT = 0x81
STATIC = 0x82
LISTBOX = 0x83
SCROLLBAR = 0x84
COMBOBOX = 0x85
class GrepTemplate(docview.RichEditDocTemplate):
def __init__(self):
docview.RichEditDocTemplate.__init__(self, win32ui.IDR_TEXTTYPE, GrepDocument, GrepFrame, GrepView)
self.SetDocStrings("\nGrep\nGrep\nGrep params (*.grep)\n.grep\n\n\n")
win32ui.GetApp().AddDocTemplate(self)
self.docparams = None
def MatchDocType(self, fileName, fileType):
doc = self.FindOpenDocument(fileName)
if doc: return doc
ext = os.path.splitext(fileName)[1].lower()
if ext =='.grep':
return win32ui.CDocTemplate_Confidence_yesAttemptNative
return win32ui.CDocTemplate_Confidence_noAttempt
def setParams(self, params):
self.docparams = params
def readParams(self):
tmp = self.docparams
self.docparams = None
return tmp
class GrepFrame(window.MDIChildWnd):
# The template and doc params will one day be removed.
def __init__(self, wnd = None):
window.MDIChildWnd.__init__(self, wnd)
class GrepDocument(docview.RichEditDoc):
def __init__(self, template):
docview.RichEditDoc.__init__(self, template)
self.dirpattern = ''
self.filpattern = ''
self.greppattern = ''
self.casesensitive = 1
self.recurse = 1
self.verbose = 0
def OnOpenDocument(self, fnm):
#this bizarre stuff with params is so right clicking in a result window
#and starting a new grep can communicate the default parameters to the
#new grep.
try:
params = open(fnm,'r').read()
except:
params = None
self.setInitParams(params)
return self.OnNewDocument()
def OnCloseDocument(self):
try:
win32ui.GetApp().DeleteIdleHandler(self.SearchFile)
except:
pass
return self._obj_.OnCloseDocument()
def saveInitParams(self):
# Only save the flags, not the text boxes.
paramstr = "\t%s\t\t%d\t%d" % (self.filpattern, self.casesensitive, self.recurse)
win32ui.WriteProfileVal("Grep", "Params", paramstr)
def setInitParams(self, paramstr):
if paramstr is None:
paramstr = win32ui.GetProfileVal("Grep", "Params", '\t\t\t1\t0\t0')
params = paramstr.split('\t')
if len(params) < 3:
params = params + ['']*(3-len(params))
if len(params) < 6:
params = params + [0]*(6-len(params))
self.dirpattern = params[0]
self.filpattern = params[1]
self.greppattern = params[2]
self.casesensitive = int(params[3])
self.recurse = int(params[4])
self.verbose = int(params[5])
# setup some reasonable defaults.
if not self.dirpattern:
try:
editor=win32ui.GetMainFrame().MDIGetActive()[0].GetEditorView()
self.dirpattern=os.path.abspath(os.path.dirname(editor.GetDocument().GetPathName()))
except (AttributeError, win32ui.error):
self.dirpattern = os.getcwd()
if not self.filpattern:
self.filpattern = "*.py"
def OnNewDocument(self):
if self.dirpattern == '':
self.setInitParams(greptemplate.readParams())
d = GrepDialog(self.dirpattern, self.filpattern, self.greppattern, self.casesensitive, self.recurse, self.verbose)
if d.DoModal() == win32con.IDOK:
self.dirpattern = d['dirpattern']
self.filpattern = d['filpattern']
self.greppattern = d['greppattern']
self.casesensitive = d['casesensitive']
self.recurse = d['recursive']
self.verbose = d['verbose']
self.doSearch()
self.saveInitParams()
return 1
return 0 # cancelled - return zero to stop frame creation.
def doSearch(self):
self.dp = dirpath(self.dirpattern, self.recurse)
self.SetTitle("Grep for %s in %s" % (self.greppattern, self.filpattern))
#self.text = []
self.GetFirstView().Append('#Search '+self.dirpattern+'\n')
if self.verbose:
self.GetFirstView().Append('# ='+repr(self.dp.dirs)+'\n')
self.GetFirstView().Append('# Files '+self.filpattern+'\n')
self.GetFirstView().Append('# For '+self.greppattern+'\n')
self.fplist = self.filpattern.split(';')
if self.casesensitive:
self.pat = re.compile(self.greppattern)
else:
self.pat = re.compile(self.greppattern, re.IGNORECASE)
win32ui.SetStatusText("Searching. Please wait...", 0)
self.dpndx = self.fpndx = 0
self.fndx = -1
if not self.dp:
self.GetFirstView().Append("# ERROR: '%s' does not resolve to any search locations" % self.dirpattern)
self.SetModifiedFlag(0)
else:
self.flist = glob.glob(self.dp[0]+'\\'+self.fplist[0])
win32ui.GetApp().AddIdleHandler(self.SearchFile)
def SearchFile(self, handler, count):
self.fndx = self.fndx + 1
if self.fndx < len(self.flist):
f = self.flist[self.fndx]
if self.verbose:
self.GetFirstView().Append('# ..'+f+'\n')
# Directories may match the file type pattern, and files may be removed
# while grep is running
if os.path.isfile(f):
win32ui.SetStatusText("Searching "+f, 0)
lines = open(f, 'r').readlines()
for i in range(len(lines)):
line = lines[i]
if self.pat.search(line) != None:
self.GetFirstView().Append(f+'('+repr(i+1) + ') '+line)
else:
self.fndx = -1
self.fpndx = self.fpndx + 1
if self.fpndx < len(self.fplist):
self.flist = glob.glob(self.dp[self.dpndx] + '\\' + self.fplist[self.fpndx])
else:
self.fpndx = 0
self.dpndx = self.dpndx + 1
if self.dpndx < len(self.dp):
self.flist = glob.glob(self.dp[self.dpndx] + '\\' + self.fplist[self.fpndx])
else:
win32ui.SetStatusText("Search complete.", 0)
self.SetModifiedFlag(0) # default to not modified.
try:
win32ui.GetApp().DeleteIdleHandler(self.SearchFile)
except:
pass
return 0
return 1
def GetParams(self):
return self.dirpattern+'\t'+self.filpattern+'\t'+self.greppattern+'\t'+repr(self.casesensitive)+'\t'+repr(self.recurse)+'\t'+repr(self.verbose)
def OnSaveDocument(self, filename):
# print 'OnSaveDocument() filename=',filename
savefile = open(filename,"wb")
txt = self.GetParams()+'\n'
# print 'writing',txt
savefile.write(txt)
savefile.close()
self.SetModifiedFlag(0)
return 1
ID_OPEN_FILE = 0xe400
ID_GREP = 0xe401
ID_SAVERESULTS = 0x402
ID_TRYAGAIN = 0x403
class GrepView(docview.RichEditView):
def __init__(self, doc):
docview.RichEditView.__init__(self, doc)
self.SetWordWrap(win32ui.CRichEditView_WrapNone)
self.HookHandlers()
def OnInitialUpdate(self):
rc = self._obj_.OnInitialUpdate()
format = (-402653169, 0, 200, 0, 0, 0, 49, 'Courier New')
self.SetDefaultCharFormat(format)
return rc
def HookHandlers(self):
self.HookMessage(self.OnRClick, win32con.WM_RBUTTONDOWN)
self.HookCommand(self.OnCmdOpenFile, ID_OPEN_FILE)
self.HookCommand(self.OnCmdGrep, ID_GREP)
self.HookCommand(self.OnCmdSave, ID_SAVERESULTS)
self.HookCommand(self.OnTryAgain, ID_TRYAGAIN)
self.HookMessage(self.OnLDblClick,win32con.WM_LBUTTONDBLCLK)
def OnLDblClick(self,params):
line = self.GetLine()
regexGrepResult = regexGrep.match(line)
if regexGrepResult:
fname = regexGrepResult.group(1)
line = int(regexGrepResult.group(2))
scriptutils.JumpToDocument(fname, line)
return 0 # dont pass on
return 1 # pass it on by default.
def OnRClick(self, params):
menu = win32ui.CreatePopupMenu()
flags=win32con.MF_STRING|win32con.MF_ENABLED
lineno = self._obj_.LineFromChar(-1) #selection or current line
line = self._obj_.GetLine(lineno)
regexGrepResult = regexGrep.match(line)
if regexGrepResult:
self.fnm = regexGrepResult.group(1)
self.lnnum = int(regexGrepResult.group(2))
menu.AppendMenu(flags, ID_OPEN_FILE, "&Open "+self.fnm)
menu.AppendMenu(win32con.MF_SEPARATOR)
menu.AppendMenu(flags, ID_TRYAGAIN, "&Try Again")
charstart, charend = self._obj_.GetSel()
if charstart != charend:
linestart = self._obj_.LineIndex(lineno)
self.sel = line[charstart-linestart:charend-linestart]
menu.AppendMenu(flags, ID_GREP, "&Grep for "+self.sel)
menu.AppendMenu(win32con.MF_SEPARATOR)
menu.AppendMenu(flags, win32ui.ID_EDIT_CUT, 'Cu&t')
menu.AppendMenu(flags, win32ui.ID_EDIT_COPY, '&Copy')
menu.AppendMenu(flags, win32ui.ID_EDIT_PASTE, '&Paste')
menu.AppendMenu(flags, win32con.MF_SEPARATOR);
menu.AppendMenu(flags, win32ui.ID_EDIT_SELECT_ALL, '&Select all')
menu.AppendMenu(flags, win32con.MF_SEPARATOR);
menu.AppendMenu(flags, ID_SAVERESULTS, 'Sa&ve results')
menu.TrackPopupMenu(params[5])
return 0
def OnCmdOpenFile(self, cmd, code):
doc = win32ui.GetApp().OpenDocumentFile(self.fnm)
if doc:
vw = doc.GetFirstView()
#hope you have an editor that implements GotoLine()!
try:
vw.GotoLine(int(self.lnnum))
except:
pass
return 0
def OnCmdGrep(self, cmd, code):
curparamsstr = self.GetDocument().GetParams()
params = curparamsstr.split('\t')
params[2] = self.sel
greptemplate.setParams('\t'.join(params))
greptemplate.OpenDocumentFile()
return 0
def OnTryAgain(self, cmd, code):
greptemplate.setParams(self.GetDocument().GetParams())
greptemplate.OpenDocumentFile()
return 0
def OnCmdSave(self, cmd, code):
flags = win32con.OFN_OVERWRITEPROMPT
dlg = win32ui.CreateFileDialog(0, None, None, flags, "Text Files (*.txt)|*.txt||", self)
dlg.SetOFNTitle("Save Results As")
if dlg.DoModal() == win32con.IDOK:
pn = dlg.GetPathName()
self._obj_.SaveTextFile(pn)
return 0
def Append(self, strng):
numlines = self.GetLineCount()
endpos = self.LineIndex(numlines-1) + len(self.GetLine(numlines-1))
self.SetSel(endpos, endpos)
self.ReplaceSel(strng)
class GrepDialog(dialog.Dialog):
def __init__(self, dp, fp, gp, cs, r, v):
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
CS = win32con.WS_CHILD | win32con.WS_VISIBLE
tmp = [ ["Grep", (0, 0, 210, 90), style, None, (8, "MS Sans Serif")], ]
tmp.append([STATIC, "Grep For:", -1, (7, 7, 50, 9), CS ])
tmp.append([EDIT, gp, 101, (52, 7, 144, 11), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER])
tmp.append([STATIC, "Directories:", -1, (7, 20, 50, 9), CS ])
tmp.append([EDIT, dp, 102, (52, 20, 128, 11), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER])
tmp.append([BUTTON, '...', 110, (182,20, 16, 11), CS | win32con.BS_PUSHBUTTON | win32con.WS_TABSTOP])
tmp.append([STATIC, "File types:", -1, (7, 33, 50, 9), CS ])
tmp.append([EDIT, fp, 103, (52, 33, 128, 11), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER ])
tmp.append([BUTTON, '...', 111, (182,33, 16, 11), CS | win32con.BS_PUSHBUTTON | win32con.WS_TABSTOP])
tmp.append([BUTTON,'Case sensitive', 104, (7, 45, 72, 9), CS | win32con.BS_AUTOCHECKBOX | win32con.BS_LEFTTEXT| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Subdirectories', 105, (7, 56, 72, 9), CS | win32con.BS_AUTOCHECKBOX | win32con.BS_LEFTTEXT| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Verbose', 106, (7, 67, 72, 9), CS | win32con.BS_AUTOCHECKBOX | win32con.BS_LEFTTEXT| win32con.WS_TABSTOP])
tmp.append([BUTTON,'OK', win32con.IDOK, (166,53, 32, 12), CS | win32con.BS_DEFPUSHBUTTON| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Cancel', win32con.IDCANCEL, (166,67, 32, 12), CS | win32con.BS_PUSHBUTTON| win32con.WS_TABSTOP])
dialog.Dialog.__init__(self, tmp)
self.AddDDX(101,'greppattern')
self.AddDDX(102,'dirpattern')
self.AddDDX(103,'filpattern')
self.AddDDX(104,'casesensitive')
self.AddDDX(105,'recursive')
self.AddDDX(106,'verbose')
self._obj_.data['greppattern'] = gp
self._obj_.data['dirpattern'] = dp
self._obj_.data['filpattern'] = fp
self._obj_.data['casesensitive'] = cs
self._obj_.data['recursive'] = r
self._obj_.data['verbose'] = v
self.HookCommand(self.OnMoreDirectories, 110)
self.HookCommand(self.OnMoreFiles, 111)
def OnMoreDirectories(self, cmd, code):
self.getMore('Grep\\Directories', 'dirpattern')
def OnMoreFiles(self, cmd, code):
self.getMore('Grep\\File Types', 'filpattern')
def getMore(self, section, key):
self.UpdateData(1)
#get the items out of the ini file
ini = win32ui.GetProfileFileName()
secitems = win32api.GetProfileSection(section, ini)
items = []
for secitem in secitems:
items.append(secitem.split('=')[1])
dlg = GrepParamsDialog(items)
if dlg.DoModal() == win32con.IDOK:
itemstr = ';'.join(dlg.getItems())
self._obj_.data[key] = itemstr
#update the ini file with dlg.getNew()
i = 0
newitems = dlg.getNew()
if newitems:
items = items + newitems
for item in items:
win32api.WriteProfileVal(section, repr(i), item, ini)
i = i + 1
self.UpdateData(0)
def OnOK(self):
self.UpdateData(1)
for id, name in [(101,'greppattern'), (102,'dirpattern'), (103,'filpattern')]:
if not self[name]:
self.GetDlgItem(id).SetFocus()
win32api.MessageBeep()
win32ui.SetStatusText("Please enter a value")
return
self._obj_.OnOK()
class GrepParamsDialog(dialog.Dialog):
def __init__(self, items):
self.items = items
self.newitems = []
style = win32con.DS_MODALFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT
CS = win32con.WS_CHILD | win32con.WS_VISIBLE
tmp = [ ["Grep Parameters", (0, 0, 205, 100), style, None, (8, "MS Sans Serif")], ]
tmp.append([LISTBOX, '', 107, (7, 7, 150, 72), CS | win32con.LBS_MULTIPLESEL| win32con.LBS_STANDARD | win32con.LBS_HASSTRINGS | win32con.WS_TABSTOP | win32con.LBS_NOTIFY])
tmp.append([BUTTON,'OK', win32con.IDOK, (167, 7, 32, 12), CS | win32con.BS_DEFPUSHBUTTON| win32con.WS_TABSTOP])
tmp.append([BUTTON,'Cancel', win32con.IDCANCEL, (167,23, 32, 12), CS | win32con.BS_PUSHBUTTON| win32con.WS_TABSTOP])
tmp.append([STATIC,'New:', -1, (2, 83, 15, 12), CS])
tmp.append([EDIT, '', 108, (18, 83, 139, 12), CS | win32con.WS_TABSTOP | win32con.ES_AUTOHSCROLL | win32con.WS_BORDER])
tmp.append([BUTTON,'Add', 109, (167,83, 32, 12), CS | win32con.BS_PUSHBUTTON| win32con.WS_TABSTOP])
dialog.Dialog.__init__(self, tmp)
self.HookCommand(self.OnAddItem, 109)
self.HookCommand(self.OnListDoubleClick, 107)
def OnInitDialog(self):
lb = self.GetDlgItem(107)
for item in self.items:
lb.AddString(item)
return self._obj_.OnInitDialog()
def OnAddItem(self, cmd, code):
eb = self.GetDlgItem(108)
item = eb.GetLine(0)
self.newitems.append(item)
lb = self.GetDlgItem(107)
i = lb.AddString(item)
lb.SetSel(i, 1)
return 1
def OnListDoubleClick(self, cmd, code):
if code == win32con.LBN_DBLCLK:
self.OnOK()
return 1
def OnOK(self):
lb = self.GetDlgItem(107)
self.selections = lb.GetSelTextItems()
self._obj_.OnOK()
def getItems(self):
return self.selections
def getNew(self):
return self.newitems
try:
win32ui.GetApp().RemoveDocTemplate(greptemplate)
except NameError:
pass
greptemplate = GrepTemplate()
| bsd-3-clause | -6,305,385,083,322,870,000 | 34.179245 | 195 | 0.651786 | false |
crossbario/autobahn-testsuite | autobahntestsuite/autobahntestsuite/case/case9_5_3.py | 2 | 1128 | ###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case9_5_1 import Case9_5_1
class Case9_5_3(Case9_5_1):
DESCRIPTION = """Send text message message with payload of length 1 * 2**20 (1M). Sent out data in chops of 256 octets."""
EXPECTATION = """Receive echo'ed text message (with payload as sent)."""
def setChopSize(self):
self.chopsize = 256
| apache-2.0 | 7,409,975,711,324,653,000 | 38.285714 | 125 | 0.591312 | false |
mansenfranzen/tssim | tssim/core/series.py | 1 | 3770 | """This module contains the TimeSeries"""
import itertools
import pandas as pd
from bokeh.palettes import Category10
from bokeh.plotting import figure, show, save
from . import track
class TimeSeriesResult:
"""Contains the result of generated TimeTrack instance. Provides convenient
plotting functions to visualize time series values.
"""
def __init__(self, tracks, values):
self.values = values
self.tracks = tracks
def plot(self, action="show", filename="plot.html", **kwargs):
"""Plot the result time series values.
"""
p = figure(x_axis_type="datetime", **kwargs)
p.line(self.values.index, self.values)
return self._handle_plot_action(p, action, filename)
def plot_tracks(self, action="show", filename="plot.html", **kwargs):
"""Plot all TimeTrackResult values in one figure.
"""
p = figure(x_axis_type="datetime", **kwargs)
colors = itertools.cycle(Category10[10])
for (name, result), color in zip(self.tracks.items(), colors):
values = result.values
p.line(values.index, values, color=color, legend=name)
return self._handle_plot_action(p, action, filename)
def _handle_plot_action(self, p, action, filename):
"""Helper function to evaluate required action for resulting plot.
Plot may either be shown, saved or returned.
"""
if action == "show":
show(p)
elif action == "save":
save(p, filename)
elif action == "raw":
return p
else:
raise ValueError("Wrong plot 'action' provided. Please use 'show',"
" 'save' or 'raw' (return figure object).")
class TimeSeries:
"""Resembles a time series with defined DatetimeIndex. A TimeSeries is
defined by adding TimeTracks to it. Each TimeTrack may hold different
TimeFunctions which provide values per time interval.
Summing up all time tracks yields the final result of the TimeSeries.
"""
def __init__(self, index=None, freq="h", **kwargs):
"""Initialize TimeSeries. Expect pd.DatetimeIndex as `index`. If not
provided, delegates to pd.date_range to create a valid DatetimeIndex.
"""
if isinstance(index, pd.DatetimeIndex):
self.index = index
else:
self.index = pd.date_range(freq=freq, **kwargs)
self.freq = freq
self.tracks = {}
def add(self, name, *args, **kwargs):
"""Add a TimeTrack to the current TimeSeries. A name must be given to
uniquely identify the given track.
"""
if name in self.tracks:
raise ValueError("TimeTrack with name '{}' is already given.")
self.tracks[name] = track.TimeTrack(self, *args, **kwargs)
def generate(self):
"""Generate actual time series values for all TimeTracks. Return
TimeSeriesResult with TimeTracks definitions and summed time series
values of all TimeTracks.
"""
tracks = {name: track.generate() for name, track in self.tracks.items()}
track_values = [x.values for x in tracks.values()]
time_series_values = pd.concat(track_values, axis=1).sum(axis=1)
return TimeSeriesResult(tracks, time_series_values)
def __getitem__(self, item):
"""Provide convenient label access to TimeTracks of current TimeSeries.
"""
return self.tracks[item]
def __repr__(self):
"""Provide convenient print representation.
"""
tpl = "{} ({})"
return tpl.format(self.__class__.__name__, self.tracks)
| mit | 3,622,098,269,315,808,000 | 28.224806 | 80 | 0.603714 | false |
evanphx/yoke | src/VBox/Additions/common/crOpenGL/tsfuncs.py | 22 | 1065 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE GENERATED BY THE tsfuncs.py SCRIPT */
#include "stub.h"
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print "static %s SPULOAD_APIENTRY ts_%s( %s )" % (return_type, func_name, apiutil.MakeDeclarationString(params) )
print "{"
print "\tSPUDispatchTable *tab = (SPUDispatchTable *) crGetTSD(&stub.dispatchTSD);"
if return_type != "void":
print "\treturn ",
print "\ttab->%s( %s );" % (func_name, apiutil.MakeCallString(params))
print "}"
print ""
print "SPUDispatchTable stubThreadsafeDispatch = {"
for func_name in keys:
print "\tts_%s," % func_name
print "\tNULL, /* copyList */"
print "\tNULL, /* copy_of */"
print "\t0, /* mark */"
print "\tNULL /* server */"
print "};"
| gpl-2.0 | -8,223,986,496,108,026,000 | 22.152174 | 114 | 0.68169 | false |
stamen/maptcha-v2 | edit_ui/relative_time.py | 1 | 2415 | #http://jehiah.cz/download/relativeDates.py.txt
import datetime
def ungettext(a,b,count):
if count:
return b
return a
def ugettext(a):
return a
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
(60 * 60, lambda n: ungettext('hour', 'hours', n)),
(60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'now' #u'0 ' + ugettext('minutes')
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
return s | isc | -6,009,420,817,337,678,000 | 37.349206 | 94 | 0.596273 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.main-0.13.0-py2.7.egg/openmdao/main/test/test_caseiter.py | 1 | 1422 | import unittest
from openmdao.lib.casehandlers.listcase import ListCaseIterator
from openmdao.main.api import Case
from openmdao.main.uncertain_distributions import NormalDistribution
from openmdao.main.caseiter import caseiter_to_dict
from openmdao.main.container import _get_entry_group
class CaseIterTestCase(unittest.TestCase):
def setUp(self):
cases = []
for i in range(20):
inputs = [('comp1.x', float(i)), ('comp1.y', i*2.)]
outputs = [('comp1.z', i*1.5),
('comp2.normal', NormalDistribution(float(i), 0.5))]
case = Case(inputs=inputs)
case._outputs = dict(outputs)
cases.append(case)
self.caseiter = ListCaseIterator(cases)
self.varnames = ['comp2.normal', 'comp1.x', 'comp1.z']
def test_caseiter_to_dict(self):
dct = caseiter_to_dict(self.caseiter, self.varnames)
self.assertEqual(len(dct), 3)
for name, value in dct.items():
self.assertEqual(len(value), 20)
if name == 'comp2.normal':
self.assertTrue(isinstance(value[0], NormalDistribution))
else:
self.assertTrue(isinstance(value[0], float))
def test_get_entry_group(self):
self.assertEqual(_get_entry_group(self.caseiter),
'openmdao.case_iterator')
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -1,051,742,229,240,915,800 | 32.069767 | 75 | 0.609001 | false |
sendgrid/sendgrid-python | examples/user/user.py | 1 | 7790 | import sendgrid
import os
sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
##################################################
# Get a user's account information. #
# GET /user/account #
response = sg.client.user.account.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve your credit balance #
# GET /user/credits #
response = sg.client.user.credits.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update your account email address #
# PUT /user/email #
data = {
"email": "[email protected]"
}
response = sg.client.user.email.put(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve your account email address #
# GET /user/email #
response = sg.client.user.email.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update your password #
# PUT /user/password #
data = {
"new_password": "new_password",
"old_password": "old_password"
}
response = sg.client.user.password.put(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update a user's profile #
# PATCH /user/profile #
data = {
"city": "Orange",
"first_name": "Example",
"last_name": "User"
}
response = sg.client.user.profile.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Get a user's profile #
# GET /user/profile #
response = sg.client.user.profile.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Cancel or pause a scheduled send #
# POST /user/scheduled_sends #
data = {
"batch_id": "YOUR_BATCH_ID",
"status": "pause"
}
response = sg.client.user.scheduled_sends.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve all scheduled sends #
# GET /user/scheduled_sends #
response = sg.client.user.scheduled_sends.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update user scheduled send information #
# PATCH /user/scheduled_sends/{batch_id} #
data = {
"status": "pause"
}
batch_id = "test_url_param"
response = sg.client.user.scheduled_sends._(batch_id).patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve scheduled send #
# GET /user/scheduled_sends/{batch_id} #
batch_id = "test_url_param"
response = sg.client.user.scheduled_sends._(batch_id).get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Delete a cancellation or pause of a scheduled send #
# DELETE /user/scheduled_sends/{batch_id} #
batch_id = "test_url_param"
response = sg.client.user.scheduled_sends._(batch_id).delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update Enforced TLS settings #
# PATCH /user/settings/enforced_tls #
data = {
"require_tls": True,
"require_valid_cert": False
}
response = sg.client.user.settings.enforced_tls.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve current Enforced TLS settings. #
# GET /user/settings/enforced_tls #
response = sg.client.user.settings.enforced_tls.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update your username #
# PUT /user/username #
data = {
"username": "test_username"
}
response = sg.client.user.username.put(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve your username #
# GET /user/username #
response = sg.client.user.username.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update Event Notification Settings #
# PATCH /user/webhooks/event/settings #
data = {
"bounce": True,
"click": True,
"deferred": True,
"delivered": True,
"dropped": True,
"enabled": True,
"group_resubscribe": True,
"group_unsubscribe": True,
"open": True,
"processed": True,
"spam_report": True,
"unsubscribe": True,
"url": "url"
}
response = sg.client.user.webhooks.event.settings.patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve Event Webhook settings #
# GET /user/webhooks/event/settings #
response = sg.client.user.webhooks.event.settings.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Test Event Notification Settings #
# POST /user/webhooks/event/test #
data = {
"url": "url"
}
response = sg.client.user.webhooks.event.test.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Create a parse setting #
# POST /user/webhooks/parse/settings #
data = {
"hostname": "myhostname.com",
"send_raw": False,
"spam_check": True,
"url": "http://email.myhosthame.com"
}
response = sg.client.user.webhooks.parse.settings.post(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve all parse settings #
# GET /user/webhooks/parse/settings #
response = sg.client.user.webhooks.parse.settings.get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Update a parse setting #
# PATCH /user/webhooks/parse/settings/{hostname} #
data = {
"send_raw": True,
"spam_check": False,
"url": "http://newdomain.com/parse"
}
hostname = "test_url_param"
response = sg.client.user.webhooks.parse.settings._(
hostname).patch(request_body=data)
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieve a specific parse setting #
# GET /user/webhooks/parse/settings/{hostname} #
hostname = "test_url_param"
response = sg.client.user.webhooks.parse.settings._(hostname).get()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Delete a parse setting #
# DELETE /user/webhooks/parse/settings/{hostname} #
hostname = "test_url_param"
response = sg.client.user.webhooks.parse.settings._(hostname).delete()
print(response.status_code)
print(response.body)
print(response.headers)
##################################################
# Retrieves Inbound Parse Webhook statistics. #
# GET /user/webhooks/parse/stats #
params = {'aggregated_by': 'day',
'limit': 'test_string',
'start_date': '2016-01-01',
'end_date': '2016-04-01',
'offset': 'test_string'}
response = sg.client.user.webhooks.parse.stats.get(query_params=params)
print(response.status_code)
print(response.body)
print(response.headers)
| mit | 6,187,842,547,047,629,000 | 25.228956 | 78 | 0.605648 | false |
lawzou/shoop | shoop/core/taxing/_price.py | 6 | 1025 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shoop.core.pricing import TaxfulPrice, TaxlessPrice
class TaxedPrice(object):
def __init__(self, taxful, taxless, taxes=None):
"""
Initialize from given prices and taxes.
:type taxful: shoop.core.pricing.TaxfulPrice
:type taxless: shoop.core.pricing.TaxlessPrice
:type taxes: list[LineTax]|None
"""
assert isinstance(taxful, TaxfulPrice)
assert isinstance(taxless, TaxlessPrice)
if not taxes:
taxes = ()
self.taxful = taxful
self.taxless = taxless
self.taxes = taxes
assert not taxes or (
taxful.amount == (taxless.amount + sum(x.amount for x in taxes))
)
@property
def tax_rate(self):
return (self.taxful.amount / self.taxless.amount) - 1
| agpl-3.0 | -7,750,720,185,673,848,000 | 31.03125 | 76 | 0.639024 | false |
aam-at/tensorflow | tensorflow/python/framework/function.py | 5 | 48384 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: At this time, functions are experimental and subject to change!. Proceed
with caution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python.client import pywrap_tf_session as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function, it adds the `call` ops to the
default graph. In addition, it adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions in a graph are frozen as soon as the graph is used to
create a session. However, new functions and new calls to existing functions
may be added to the graph, with the new functions themselves becoming
immediately frozen.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is must be a
`_DefinedFunction` object. The gradient
function must satisfy the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("function %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError(
"function with argument defaults or keywords arguments are not"
" supported. {} has defaults {} and keywords {}.".format(
func, argspec.defaults, argspec.keywords))
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class _DefinedFunctionDeleter(object):
"""Unregister function from eager context."""
__slots__ = ["name"]
def __init__(self, name):
self.name = name
def __del__(self):
try:
context.remove_function(self.name)
except TypeError:
# Suppress some exceptions, mainly for the case when we're running on
# module deletion. Things that can go wrong include the context module
# already being unloaded, self._handle._handle_data no longer being
# valid, and so on. Printing warnings in these cases is silly
# (exceptions raised from __del__ are printed as warnings to stderr).
pass # 'NoneType' object is not callable when the handle has been
# partially unloaded.
except AttributeError:
pass # 'NoneType' object has no attribute 'eager_mode' when context has
# been unloaded. Will catch other module unloads as well.
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
allowlisted_stateful_ops=None,
capture_resource_var_by_value=True,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
allowlisted_stateful_ops: A set of ops that if stateful we ignore and
copy into the function body, when `capture_by_value` is True.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._allowlisted_stateful_ops = allowlisted_stateful_ops
if self._allowlisted_stateful_ops is None:
self._allowlisted_stateful_ops = set()
self._capture_resource_var_by_value = capture_resource_var_by_value
self._extra_kwargs = kwargs
# Constructed only when C API is disabled, lazily
self._definition = None
# Constructed only when C API is enabled, lazily
self._c_func = None
self._function_deleter = None
self._sub_functions = {} # Constructed with _definition or _c_func
# pylint: disable=protected-access
device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
# Get the innermost device if possible.
self._caller_device = device_funcs[-1] if device_funcs else None
# Cached OpDef for this function. When C API is enabled, this is
# the only part of FunctionDef that we cache in Python. When C API
# is disabled the whole _definition is available and this is simply
# another reference to _definition.signature
self._op_def = None
assert isinstance(input_types, (list, tuple))
self._arg_types = input_types
self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
for i in range(len(input_types))]
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
if self._c_func:
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(self._c_func.func, buf)
fdef = function_pb2.FunctionDef()
proto_data = c_api.TF_GetBuffer(buf)
fdef.ParseFromString(compat.as_bytes(proto_data))
with ops.init_scope():
if context.executing_eagerly():
context.add_function(self._c_func.func)
self._function_deleter = _DefinedFunctionDeleter(
fdef.signature.name)
return fdef
return self._definition
@property
def _signature(self):
self._create_definition_if_needed()
return self._op_def
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Returns the name of the gradient function."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
@property
def stateful_ops(self):
"""Returns the list of stateful ops in function definition.
Returns:
A list of (op.name, op.type) pairs.
"""
self._create_definition_if_needed()
return self._stateful_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None or self._c_func is not None:
return
# Copy variable collections (by reference) from the parent graph such that
# name based variable sharing (e.g. via tf.make_template) works between the
# func graph and parent graph.
variable_keys = []
variable_keys.extend(ops.GraphKeys._VARIABLE_COLLECTIONS) # pylint: disable=protected-access
variable_keys.append(vs._VARSTORE_KEY) # pylint: disable=protected-access
collections_ref = {}
parent_collections_ref = ops.get_default_graph()._collections # pylint: disable=protected-access
for key in variable_keys:
if key not in parent_collections_ref:
parent_collections_ref[key] = collections_ref[key] = []
else:
collections_ref[key] = parent_collections_ref[key]
temp_graph = func_graph_from_py_func(
self._func,
self._arg_names,
self._arg_types,
self._func_name,
self._capture_by_value,
self._caller_device,
collections_ref=collections_ref,
allowlisted_stateful_ops=self._allowlisted_stateful_ops,
capture_resource_var_by_value=self._capture_resource_var_by_value)
self._extra_inputs = temp_graph.extra_inputs
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Extra kwargs are treated as attrs on the function def.
if self._func_name:
base_func_name = self._func_name
else:
base_func_name = function_utils.get_func_name(self._func)
if self._grad_func:
base_func_name += ("_%s" % self._grad_func.name)
kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)
if not temp_graph._c_graph: # pylint: disable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
temp_graph.inputs,
temp_graph.outputs,
out_names=self._out_names)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([base_func_name, self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
self._op_def = self._definition.signature
else: # C API is enabled
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
# pylint: disable=protected-access
c_func = c_api.TF_GraphToFunction_wrapper(
temp_graph._c_graph,
base_func_name,
self._func_name is None, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in temp_graph.inputs],
[t._as_tf_output() for t in temp_graph.outputs],
output_names,
[], # control_outputs
[], # control_output_names
None, # opts
description)
self._c_func = c_api_util.ScopedTFFunction(c_func)
# pylint: enable=protected-access
self._set_c_attrs(kwargs_attr)
# Set cached fields: _op_def and _func_name (if not already set)
self._op_def = self.definition.signature
if self._func_name:
assert self._func_name == self._op_def.name
else:
self._func_name = compat.as_str(self._op_def.name)
self._stateful_ops = [(op.name, op.type)
for op in temp_graph.get_operations()
if op._is_stateful] # pylint: disable=protected-access
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_FunctionSetAttrValueProto(self._c_func.func, compat.as_str(name),
serialized)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# Adds this function into 'g'.
# pylint: disable=protected-access
if context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._signature, *args, **kwargs)
# Set a hidden attr in 'op' so that gradients_impl can refer back
# to this _DefinedFunction instance to access python_grad_func.
assert isinstance(op, ops.Operation)
setattr(op, "__defun", self)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type) for _ in defined._signature.output_arg # pylint: disable=protected-access
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class _FuncGraph(ops.Graph):
"""A helper for constructing a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of all inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, name, capture_by_value, allowlisted_stateful_ops,
capture_resource_var_by_value, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._allowlisted_stateful_ops = allowlisted_stateful_ops
self._capture_resource_var_by_value = capture_resource_var_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
# The name of the function.
self.name = name
# Placeholder tensors representing the inputs to this function. The tensors
# are in this _FuncGraph.
self.inputs = []
# Tensors that will be returned this function. The tensors are in this
# _FuncGraph.
self.outputs = []
# Maps external tensor -> internal tensor (e.g. input placeholder).
self._captured = {}
# The external tensors that have been captured as inputs and must be passed
# to this function (empty if capturing by value, otherwise these are the
# keys of _captured).
self.extra_inputs = []
# Input placeholders that been added for captured values (empty if capturing
# by value).
self.extra_args = []
# Captured variables.
# TODO(skyewm): is this needed?
self.extra_vars = []
# pylint: disable=g-doc-return-or-yield
@property
def outer_graph(self):
"""The graph active when this _FuncGraph was created."""
return self._outer_graph
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
# pylint: disable=protected-access
with ops.init_scope():
original_init_container = ops.get_default_graph()._container
try:
self._container = container_name
with ops.init_scope():
ops.get_default_graph()._container = container_name
yield self._container
finally:
self._container = original_container
with ops.init_scope():
ops.get_default_graph()._container = original_init_container
# pylint: enable=protected-access
# pylint: enable=g-doc-return-or-yield
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if (isinstance(var, resource_variable_ops.BaseResourceVariable) and
self._capture_resource_var_by_value):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
for i, x in enumerate(inputs):
if isinstance(x, ops.EagerTensor) or x.graph is not self:
inputs[i] = self.capture(x)
return super(_FuncGraph, self)._create_op_internal(
op_type,
inputs,
dtypes=dtypes,
input_types=input_types,
name=name,
attrs=attrs,
op_def=op_def,
compute_device=compute_device)
def capture(self, tensor, name=None):
"""Adds the given tensor to this graph and returns the captured tensor."""
if tensor.ref() in self._captured:
# Captured already.
return self._captured[tensor.ref()]
elif self._capture_by_value:
return self._add_tensor_and_parents(tensor)
else:
return self._capture_tensor_as_extra_input(tensor, name)
@property
def captures(self):
"""Pairs of tensors and captured tensor."""
return [(k.deref(), v) for k, v in self._captured.items()]
def _capture_tensor_as_extra_input(self, tensor, name=None):
# Substitute with a placeholder.
self.extra_inputs.append(tensor)
# Hoist the new input placeholder out of any control flow context
# we're currently in.
with ops.control_dependencies(None):
ph = array_ops.placeholder(
tensor.dtype, shape=tensor.get_shape(), name=name)
# pylint: disable=protected-access
if isinstance(tensor, ops.EagerTensor):
handle_data = tensor._handle_data
if handle_data:
handle_data = handle_data.SerializeToString()
else:
handle_data = c_api.GetHandleShapeAndType(tensor.graph._c_graph,
tensor._as_tf_output())
if handle_data:
c_api.SetHandleShapeAndType(ph.graph._c_graph, ph._as_tf_output(),
compat.as_bytes(handle_data))
# pylint: enable=protected-access
self.inputs.append(ph)
self._captured[tensor.ref()] = ph
self.extra_args.append(ph)
if _is_guaranteed_const(tensor):
with ops.control_dependencies(None):
return array_ops.guarantee_const(ph)
else:
return ph
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
# pylint: disable=protected-access
op_def = graph_to_function_def._get_op_def(op)
if op._is_stateful and op not in self._allowlisted_stateful_ops:
raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
"by value." % (op.name, op.type))
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
"by value." % (op.name, op.type))
# pylint: enable=protected-access
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self._create_op_internal(
op.type,
captured_inputs, [o.dtype for o in op.outputs],
name=op.name,
attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t.ref()] = captured_t
return captured_op
def func_graph_from_py_func(func,
arg_names,
arg_types,
name=None,
capture_by_value=False,
device=None,
colocation_stack=None,
container=None,
collections_ref=None,
arg_shapes=None,
allowlisted_stateful_ops=None,
capture_resource_var_by_value=True):
"""Returns a _FuncGraph generated from `func`.
Args:
func: A Python callable which constructs a TF function body. The arguments
must correspond to `arg_types`. Returns a value or list/tuple of values.
No returned value can be None.
arg_names: A sequence of strings for the function argument names.
arg_types: A sequence of the function's argument types.
name: The function name. If None, the name is derived from `func`.
capture_by_value: boolean. If True, captured values will be copied into the
function body.
device: device name or function.
colocation_stack: A colocation stack (list) the _FuncGraph should use.
container: A container name the _FuncGraph should start with.
collections_ref: A reference to a collections dict the _FuncGraph should
use internally.
arg_shapes: A sequence of the function's argument shapes.
allowlisted_stateful_ops: A set of ops that if stateful we ignore and
re-create.
capture_resource_var_by_value: Boolean (defaults to True). If False,
captured resource variable returns the handle instead of value.
Returns:
A _FuncGraph.
Raises:
ValueError: if func returns None.
"""
if not name:
name = function_utils.get_func_name(func)
func_graph = _FuncGraph(name, capture_by_value, allowlisted_stateful_ops,
capture_resource_var_by_value)
with func_graph.as_default(), ops.device(device):
# pylint: disable=protected-access
if collections_ref is not None:
func_graph._collections = collections_ref
if container is not None:
func_graph._container = container
if colocation_stack is not None:
func_graph._colocation_stack = colocation_stack
# pylint: enable=protected-access
if arg_shapes is None:
arg_shapes = [None] * len(arg_types)
# Create placeholders for the function arguments.
for (argname, argtype, argshape) in zip(arg_names, arg_types, arg_shapes):
argholder = array_ops.placeholder(argtype, shape=argshape, name=argname)
func_graph.inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=func_graph.getvar):
outputs = func(*func_graph.inputs)
# There is no way of distinguishing between a function not returning
# anything and a function returning None in Python.
# We need to allow the former and ideally want to forbid the latter as
# it is most likely user error.
# TODO(iga): Consider adding a @NoOutput decorator on top of @Defun to
# allow users to explicitly mark the function as not returning anything.
# For now, we allow a single None return and interpret it as a function
# with no output.
if outputs is None:
outputs = []
else:
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any(_ is None for _ in outputs):
raise ValueError("Function %s can not return None." % name)
# Ensures each output is a Tensor in the function graph.
outputs = [ops.convert_to_tensor(t) for t in outputs]
outputs = [func_graph.capture(t) if t.graph is not func_graph else t
for t in outputs]
func_graph.outputs = outputs
return func_graph
def _is_guaranteed_const(tensor):
"""Determines whether `tensor` is guaranteed to be a constant.
A tensor is guaranteed to be a constant if either it was produced by
a `GuaranteeConst` op or if all of its children are guaranteed to be
constants.
Args:
tensor: The tensor for which to determine const-ness.
Returns:
True if `tensor` is guaranteed to be a constant, False otherwise.
"""
if isinstance(tensor, ops.EagerTensor):
return False
class Work(object):
def __init__(self, op, leaving):
self.op = op
self.leaving = leaving
is_guaranteed_const = lambda op: op.node_def.op == "GuaranteeConst"
constants = set([])
def all_inputs_const(op):
# If all inputs of an op are guaranteed constants, then we can infer that
# the op produces a constant as well.
return op.inputs and all(inp.op in constants for inp in op.inputs)
visited = set([])
stack = [Work(tensor.op, leaving=False)]
while stack:
work = stack.pop()
if work.leaving:
if all_inputs_const(work.op):
constants.add(work.op)
continue
visited.add(work.op)
if is_guaranteed_const(work.op):
constants.add(work.op)
continue
# This op will be revisited after all its inputs are checked for const-ness.
stack.append(Work(work.op, leaving=True))
for inp in work.op.inputs:
if inp.op not in visited:
stack.append(Work(inp.op, leaving=False))
return tensor.op in constants
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" % (len(
sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
if name is None:
name = func_name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
op = g._create_op_internal( # pylint: disable=protected-access
func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# TODO(iga): This method does major surgery on _DefinedFunction.
# Make it a named constructor using @classmethod of _DefinedFunction.
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
serialized = fdef.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
result._c_func = c_api_util.ScopedTFFunction(c_func)
result._extra_inputs = []
result._op_def = fdef.signature
# pylint: enable=protected-access
return result
def from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError(
"FunctionDefLibrary contains cyclic gradient functions!\n" + str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
def _get_experimental_kwarg_as_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, bool):
return attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
return attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
return attr_value_pb2.AttrValue(f=value)
elif isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(attr_name, type(value)))
def _get_kwarg_as_str_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(attr_name, type(value)))
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
# For compatibility with previous behavior, Defun does not perform shape
# inference through its function call operations.
attrs["_disable_call_shape_inference"] = attr_value_pb2.AttrValue(b=True)
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
# Forward _XlaScope from enclosing context (if set), otherwise create new.
# pylint: disable=protected-access
if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
else:
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
# pylint: enable=protected-access
kwargs_keys = list(kwargs.keys())
for key in kwargs_keys:
if key.startswith("experimental_"):
attrs[key] = _get_experimental_kwarg_as_attr(key, kwargs[key])
del kwargs[key]
# Support for https://github.com/tensorflow/community/pull/113/files.
elif key == "_implements" or key == "_reference":
attrs[key] = _get_kwarg_as_str_attr(key, kwargs[key])
del kwargs[key]
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
def _type_list_to_str(types):
if any(_ not in _DTYPE_TO_STR for _ in types):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join(_DTYPE_TO_STR[_] for _ in types)
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.uint32: "u32",
dtypes.uint64: "u64",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def function_def_from_tf_function(c_func):
"""Converts a SWIG-wrapped TF_Function* to a FunctionDef proto."""
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(c_func, buf)
data = c_api.TF_GetBuffer(buf)
fdef = function_pb2.FunctionDef()
fdef.ParseFromString(compat.as_bytes(data))
return fdef
| apache-2.0 | -2,418,835,701,187,961,000 | 34.760532 | 107 | 0.657387 | false |
ccnmtl/lettuce | lettuce/plugins/jsonreport_output.py | 5 | 7304 | from datetime import datetime
import json
from lettuce import world
from lettuce.terrain import after, before
def enable(filename=None):
filename = filename or "lettucetests.json"
@before.all
def before_all():
"""
Set `world._started` to `datetime.now()` to track total duration.
"""
world._started = datetime.now()
@after.all
def generate_json_output(total):
"""
This callback is called after all the features are
ran.
"""
world._stopped = datetime.now()
total_dict = total_result_to_dict(total)
with open(filename, "w") as handle:
json.dump(total_dict, handle)
@before.each_feature
@before.each_scenario
@before.each_step
def before_each_element(*args):
"""
Set `step._started`, `scenario._started` or `feature._started` to `datetime.now()`
to track step/scenario/feature duration.
"""
element = args[0]
element._started = datetime.now()
@after.each_feature
@after.each_scenario
@after.each_step
def after_each_element(*args):
"""
Set `step._stopped`, `scenario._stopped` or `feature._stopped` to `datetime.now()`
to track step/scenario/feature duration.
"""
element = args[0]
element._stopped = datetime.now()
def total_result_to_dict(total):
"""
Transform a `TotalResult` to a json-serializable Python dictionary.
:param total: a `TotalResult` instance
:return: a Python dictionary
"""
return {
"meta": extract_meta(total),
"duration": _get_duration(world),
"features": [
extract_feature_data(feature_result)
for feature_result in total.feature_results
]
}
def extract_feature_data(feature_result):
"""
Extract data from a `FeatureResult` instance.
:param feature_result: a `FeatureResult` instance
:return: a Python dictionary
"""
scenarios = []
meta = {
"steps": {
"total": 0,
"success": 0,
"failures": 0,
"skipped": 0,
"undefined": 0,
},
"scenarios": {
"total": 0,
"success": 0,
"failures": 0,
"skipped": 0,
"undefined": 0,
}
}
for scenario_result in feature_result.scenario_results:
scenario_data = extract_scenario_data(scenario_result)
scenarios.append(scenario_data)
# scenarios
success = (
not scenario_data["meta"]["failures"] and
not scenario_data["meta"]["skipped"] and
not scenario_data["meta"]["undefined"]
)
meta["scenarios"]["total"] += 1 if scenario_data["meta"]["total"] else 0
meta["scenarios"]["success"] += 1 if success else 0
meta["scenarios"]["failures"] += 1 if scenario_data["meta"]["failures"] else 0
meta["scenarios"]["skipped"] += 1 if scenario_data["meta"]["skipped"] else 0
meta["scenarios"]["undefined"] += 1 if scenario_data["meta"]["undefined"] else 0
# steps
meta["steps"]["total"] += scenario_data["meta"]["total"]
meta["steps"]["success"] += scenario_data["meta"]["success"]
meta["steps"]["failures"] += scenario_data["meta"]["failures"]
meta["steps"]["skipped"] += scenario_data["meta"]["skipped"]
meta["steps"]["undefined"] += scenario_data["meta"]["undefined"]
return {
"name": feature_result.feature.name,
"duration": _get_duration(feature_result.feature),
"meta": meta,
"scenarios": scenarios,
"background": extract_background_data(feature_result.feature.background)
}
def extract_background_data(background):
"""
Extract data from a `Background` instance.
:param background: a `Background` instance, possibly None
:return: a Python dictionary
"""
if not background:
return None
step_data = [extract_step_data(step) for step in background.steps]
return {
"meta": {
"total": len(background.steps),
"success": sum([s["meta"]["success"] for s in step_data]),
"failures": sum([s["meta"]["failed"] for s in step_data]),
"skipped": sum([s["meta"]["skipped"] for s in step_data]),
"undefined": sum([s["meta"]["undefined"] for s in step_data]),
},
"steps": step_data
}
def extract_scenario_data(scenario_result):
"""
Extract data from a `ScenarioResult` instance.
:param scenario_result: a `ScenarioResult` instance
:return: a Python dictionary
"""
return {
"name": scenario_result.scenario.name,
"duration": _get_duration(scenario_result.scenario),
"outline": scenario_result.outline,
"meta": {
"total": scenario_result.total_steps,
"success": len(scenario_result.steps_passed),
"failures": len(scenario_result.steps_failed),
"skipped": len(scenario_result.steps_skipped),
"undefined": len(scenario_result.steps_undefined),
},
"steps": [extract_step_data(step) for step in scenario_result.all_steps]
}
def extract_step_data(step):
"""
Extract data from a `Step` instance.
:param step: a `Step` instance
:return a Python dictionary
"""
step_data = {
"name": step.sentence,
"duration": _get_duration(step),
"meta": {
"success": bool(step.passed),
"failed": bool(step.failed),
"skipped": not step.passed and not step.failed and step.has_definition,
"undefined": not step.has_definition,
},
"failure": {}
}
if step.why:
step_data["failure"] = {
"exception": repr(step.why.exception),
"traceback": step.why.traceback
}
return step_data
def extract_meta(total):
"""
Extract metadata from the `TotalResult`.
:param total: a `TotalResult` instance
:return: a Python dictionary
"""
return {
"features": {
"total": total.features_ran,
"success": total.features_passed,
"failures": total.features_ran - total.features_passed,
},
"scenarios": {
"total": total.scenarios_ran,
"success": total.scenarios_passed,
"failures": total.scenarios_ran - total.scenarios_passed,
},
"steps": {
"total": total.steps,
"success": total.steps_passed,
"failures": total.steps_failed,
"skipped": total.steps_skipped,
"undefined": total.steps_undefined,
},
"is_success": total.is_success,
}
def _get_duration(element):
"""
Return the duration of an element.
:param element: either a step or a scenario or a feature
"""
return (element._stopped - element._started).seconds if hasattr(element, '_started') else None
| gpl-3.0 | -5,063,138,215,552,236,000 | 31.035088 | 98 | 0.551068 | false |
tumi8/INSALATA | src/insalata/planning/MetricFFParser.py | 1 | 1064 | from insalata.planning.PlanParserBase import PlanParserBase
class MetricFFParser(PlanParserBase):
"""
Get an ordered execution plan of functions based on the plan file of Metric-FF.
Keyword arguments:
objects -- A dictionary with all objects associated with the name used for them in the plan.
functionDict -- A dictionary with all available functions associated with their all lower case name.
planFile -- Path to the file containing the plan.
Returns:
A linear ordered list of function pointers from the setup module, associated with their respective parameter.
"""
def parsePlan(self, objects, functionDict, planFile):
lines = [line.rstrip('\n') for line in open(planFile)]
#split each line, function name first, object second
plan = []
for l in lines:
#add tuple (function, parameter)
plan.append((functionDict[l.split()[0].lower()], objects[l.split()[1].lower()]))
#ONLY WORKS FOR A SINGLE PARAMETER | apache-2.0 | -6,602,416,881,586,127,000 | 43.375 | 118 | 0.660714 | false |
La0/mozilla-relengapi | src/pulselistener/pulselistener/monitoring.py | 1 | 7418 | # -*- coding: utf-8 -*-
import asyncio
from datetime import datetime
from datetime import timedelta
import structlog
from taskcluster.utils import slugId
from taskcluster.utils import stringDate
from pulselistener import taskcluster
logger = structlog.get_logger(__name__)
GROUP_MD = '''
## {}
{:.2f}% of all tasks ({}/{})
'''
TASK_MD = '* [{0}](https://tools.taskcluster.net/task-inspector/#{0})'
TASKCLUSTER_NAMESPACE = 'project.releng.services.tasks.{task_id}'
class Monitoring(object):
'''
A simple monitoring tool sending emails through TC
every X seconds
'''
def __init__(self, period):
assert isinstance(period, int)
assert period > 0
self.period = period
self.stats = {}
self.emails = []
# Setup monitoring queue
self.tasks = asyncio.Queue()
def setup(self):
'''
Setup using taskcluster configuration
'''
# TC services
assert taskcluster.options is not None, 'Not authenticated'
self.notify = taskcluster.get_service('notify')
self.queue = taskcluster.get_service('queue')
self.index = taskcluster.get_service('index')
# Load emails from secret
self.emails = taskcluster.secrets['ADMINS']
async def add_task(self, group_id, hook_id, task_id):
'''
Add a task to watch in async queue
'''
await self.tasks.put((group_id, hook_id, task_id))
def next_report(self):
'''
Calc report times
'''
report_date = datetime.utcnow()
while True:
report_date += timedelta(seconds=self.period)
yield report_date
async def run(self):
'''
Watch task status by using an async queue
to communicate with other processes
A report is sent periodically about failed tasks
'''
for report_date in self.next_report():
while datetime.utcnow() < report_date:
# Monitor next task in queue
await self.check_task()
# Sleep a bit before trying a new task
await asyncio.sleep(1)
# Send report when timeout is reached
self.send_report()
async def check_task(self):
'''
Check next task status in queue
'''
assert self.queue is not None
# Read tasks in queue
group_id, hook_id, task_id = await self.tasks.get()
# Get its status
try:
status = self.queue.status(task_id)
except Exception as e:
logger.warn('Taskcluster queue status failure for {} : {}'.format(task_id, e))
return
task_status = status['status']['state']
if task_status in ('failed', 'completed', 'exception'):
# Retry tasks in exception
if task_status == 'exception':
await self.retry_task(group_id, hook_id, task_id)
# Lookup the failed details
if task_status == 'failed' and self.is_restartable(task_id):
logger.info('Failed task is restartable', task_id=task_id)
await self.retry_task(group_id, hook_id, task_id)
# Add to report
if hook_id not in self.stats:
self.stats[hook_id] = {'failed': [], 'completed': [], 'exception': []}
self.stats[hook_id][task_status].append(task_id)
logger.info('Got a task status', id=task_id, status=task_status)
else:
# Push back into queue so it get checked later on
await self.tasks.put((group_id, hook_id, task_id))
def is_restartable(self, task_id):
'''
A task is restartable if its indexed state using task id
has a monitoring_restart field set to True
'''
# Load the indexed data
task_path = TASKCLUSTER_NAMESPACE.format(task_id=task_id)
try:
index = self.index.findTask(task_path)
except Exception as e:
logger.info('Task not found in index', task=task_id, error=str(e))
return False
# Restart when monitoring_restart is set
return index['data'].get('monitoring_restart') is True
async def retry_task(self, group_id, hook_id, task_id):
'''
Retry a Taskcluster task by:
- fetching its definition
- updating its dates & retry count
- creating a new task
Do NOT use rerunTask as it's deprecated AND not recommended
https://docs.taskcluster.net/docs/reference/platform/taskcluster-queue/references/api#rerunTask
'''
assert self.queue is not None
# Fetch task definition
definition = self.queue.task(task_id)
# Update timestamps
date_format = '%Y-%m-%dT%H:%M:%S.%f%z'
now = datetime.utcnow()
created = datetime.strptime(definition['created'], date_format)
deadline = datetime.strptime(definition['deadline'], date_format)
expires = datetime.strptime(definition['expires'], date_format)
definition['created'] = stringDate(now)
definition['deadline'] = stringDate(now + (deadline - created))
definition['expires'] = stringDate(now + (expires - created))
# Decrement retries count
definition['retries'] -= 1
if definition['retries'] < 0:
logger.warn('Will not retry task, no more retries left', task_id=task_id, group_id=group_id, hook_id=hook_id)
return
# Trigger a new task with the updated definition
new_task_id = slugId()
logger.info('Retry task', old_task=task_id, new_task=new_task_id)
self.queue.createTask(new_task_id, definition)
# Monitor new task
await self.add_task(group_id, hook_id, new_task_id)
return new_task_id
def send_report(self):
'''
Build a report using current stats and send it through
Taskcluster Notify
'''
assert self.notify is not None
if not self.stats:
return
contents = []
# Build markdown
for hook_id, tasks_per_status in sorted(self.stats.items()):
total = sum([len(tasks) for tasks in tasks_per_status.values()])
if len(tasks_per_status['completed']) == total:
continue
content = '# {} tasks for the last period\n'.format(hook_id)
for status, tasks in sorted(tasks_per_status.items()):
nb_tasks = len(tasks)
content += GROUP_MD.format(
status,
100.0 * nb_tasks / total,
nb_tasks,
total,
)
content += '\n'.join([
TASK_MD.format(task)
for task in tasks
])
contents.append(content)
if len(contents):
# Send to admins
logger.info('Sending email to admins')
for email in self.emails:
self.notify.email({
'address': email,
'subject': 'Pulse listener tasks',
'content': '\n\n'.join(contents),
'template': 'fullscreen',
})
# Reset stats
self.stats = {}
# Shared monitoring manager
task_monitoring = Monitoring(7 * 3600)
| mpl-2.0 | 6,226,449,121,037,669,000 | 31.535088 | 121 | 0.565921 | false |
PietroPasotti/MACH | utils/preprocess.py | 1 | 7335 |
import bs4 as _bs
import nltk as _nltk
from os.path import isfile as _isfile
_default_entity_threshold = 0.5
def html_filter(path):
"""
If path is a path, opens it, cleans html and returns str.
If path is a str, cleans html and returns str.
"""
rawtext = ''
if _isfile(path):
with open(path,'rb') as f:
soup = _bs.BeautifulSoup(f)
rawtext += soup.get_text()
else:
soup = _bs.BeautifulSoup(path)
rawtext += soup.get_text()
# sometimes soupping collapses dots-space structures: from
# "... something. Another day...", we get "... something.Another day..."
# which is bad for tokenizers.
splitted = rawtext.split('.') # splits at the dots
dotted = '. '.join(splitted) # add back dot ** + space **
uniform = dotted.replace(' ',' ').strip() # clean some extra whitespace
return uniform
def tokenize(path):
punct = """.,;:'"(){}[]\/!?^*#|-_=&%$ \t\n"""
rawtext = html_filter(path) # we tear off html straight away
tokens = []
sent_tokenizer = _nltk.tokenize.PunktSentenceTokenizer()
word_tokenizer = _nltk.tokenize.PunktWordTokenizer()
senttokens = sent_tokenizer.tokenize(rawtext)
for sent in senttokens:
twords = word_tokenizer.tokenize(sent) # list of words: a sentence
twords = [word.strip(punct) for word in twords if word not in punct]
# we erase meaningless bits
# manually correct strange dutch construct that trick tokenizers,
# or other markup constructs that produce weird results:
pos = 0
for word in twords:
# zo'n --> zo, 'n
if word == 'zo' and twords[pos+1] == "'n":
del twords[pos+1]
twords[pos] = "zo'n"
# vooren --> voor- en
elif word[-1] == '-':
twords[pos] = word[:-1] + twords[pos+1] # we glue back the two words
del twords[pos+1] # we erase the next one
else:
pass
pos += 1
noempty = [word for word in twords if word]
tokens.append(noempty)
# now tokens should consist of a list of sentences, which are lists of words.
return tokens
def entity(string,struct):
"""
Outputs a likelihood ratio (float) that the given string represents
an entity.
"""
# 1. NOUNS and "attributes" seem to often be entities. _default_toresolve
# stores such categories
pos = struct.locate(string)[0]
nice = struct.trees[pos[0]]['nice']
try:
categs = nice[string] if string in nice else nice[string.lower()]
except Exception:
# some words are just skipped by the parsing engine.
# so we might not find them at all. Ignore them!
return 0
valuables = set(('noun','attribute'))
value1 = len(valuables.intersection(set(categs)))/ len(valuables)
# 2. capitalized words, unless they are at the very start of a sentence,
# are often entities
value2 = 1 if string[0].isupper() and pos[0] != 0 else 0
# 3. boh.
values = (value1, value2)
final = sum(values) / len(values)
return final
def entities(structure):
"""calls entity on every word in the structure"""
for sent,words in structure.content.items():
for wordid,wordict in words.items():
wordict['entity'] = entity(wordict['word'],structure)
return
class Structure(object):
def __init__(self,path):
"""
Should be initialized on a path to a file; which contains the full
text to be analysed."""
self.content = self.parse(path) # raw-ish content
self.trees = self.parse_trees() # trees! we like trees!
entities(self) # tries to guess which words denote entities
return
def __str__(self):
raw = self.raw()
return "<Structure('{}'), {} sentences>".format(raw[:15] + '...',len(self.raw_sentences()))
def parse(self,content):
if not isinstance(content,str):
raise Exception('path-to-file needed')
tokens = tokenize(content)
struct = {}
sentpos = 0
for sentence in tokens:
struct_sent = {}
wordpos = 0
for word in sentence:
struct_word = {'word' : word, 'tags' : set()}
struct_sent[wordpos] = struct_word
wordpos += 1
struct[sentpos] = struct_sent
sentpos += 1
# now a text consisting of two sentences, with 4 words the
# first, two the latter, should look like this:
# {0:
# {0:{ 'word':'Today', 'tags': set() } like this...
# ...
# }
return struct
def parse_trees(self):
"""
Dupira api.
"""
raw = self.raw()
raw_list = [ sent + '.' for sent in raw.split('.') if sent ]
from mach.utils.treeparse import dupira_parse
parsed = { raw_list.index(sent) : dupira_parse(sent) for sent in raw_list }
return parsed
def update(self,info):
"""
Requires info to be a dictionary from 2-tuples of integers to
sets of tags. Each tuple stands for a sentence and a word in
the sentence; that word's tags get updated by the value.
"""
for index, tags in info.items():
self.content[index[0]][index[1]]['tags'].update(tags)
def raw(self,enc=False):
"""
Returns the raw, untagged text.
"""
fulltext = ''
for sent in self.content.values():
for word in sent.values():
if fulltext:
fulltext += " "
fulltext += word['word'] # each word is a {'tags': set(), 'word' : str()} object
fulltext += '.'
if enc:
return fulltext.strip().encode(enc)
return fulltext.strip()
def raw_sentences(self,enc=False):
"""
Returns the raw, untagged text.
"""
if enc:
return [sent.strip().encode(enc) + '.' for sent in self.raw().split('.') if sent]
return [sent.strip() + '.' for sent in self.raw().split('.') if sent]
def raw_textform(self,enc=False):
"""
Returns a hierarchical structure [[[word,word,word] [word...] []]
"""
t = []
for sent in self.content.values():
s = []
for word in sent.values():
s += [word['word']]
t += [s]
if enc:
return t.encode(enc)
return t
def locate(self,string):
"""
Returns [(sent,word),[...]] where string was found in the structure.
All parts of the string are expected to be found in sequence.
"""
words = [word.strip() for word in string.split(' ')]
first = words[0]
matches = []
sents = [sent.lower().strip('.') for sent in self.raw_sentences()] # .lower!
matches_sents = [sents.index(sent) for sent in sents if string.lower() in sent] # .lower!
# this should return the indexes of the sentences where
# **the full string** was matched.
for idx in matches_sents:
sent = [word.strip().lower() for word in sents[idx].split(' ')]
for word in words:
word = word.lower() # warning: lowercase match!
if word in sent:
matches.append((idx, sent.index(word))) # (index of sentence, index of word)
if not matches:
# nothing found yet: we have a problem.
# we could search for individual words, instead of matching
# the whole string at the beginning. But that's boring.
# Better raise an error.
raise ValueError('String {} not found.'.format(string))
return matches
def walk_entities(self,thresh = _default_entity_threshold,enc=False):
"""
Yields all words likely (thresh) to be entities, together with
their position.
"""
for sid,sent in self.content.items():
for wid,wordcont in sent.items():
if wordcont['entity'] >= thresh:
if enc:
yield ((sid,wid), wordcont['word'].encode(enc))
yield ((sid,wid), wordcont['word'])
| agpl-3.0 | 8,110,119,708,750,118,000 | 24.206186 | 93 | 0.63122 | false |
osspeak/osspeak | osspeak/settings.py | 1 | 2271 | import os
import logging
import json
import clargs
import sys
OSSPEAK_DIRECTORY = os.path.join(os.path.expanduser('~'), '.osspeak')
EXECUTABLE_DIRECTORY = os.path.split(os.path.abspath(sys.argv[0]))[0]
OSSPEAK_CONFIG_PATH = 'settings.json' if os.path.exists('settings.json') else os.path.join(OSSPEAK_DIRECTORY, 'settings.json')
DEFAULT_CONFIG = {
'interface': 'cli',
'network': 'local',
'server_address': '127.0.0.1:8888',
"type_delay": .05,
'command_directory': os.path.join(OSSPEAK_DIRECTORY, 'commands'),
'external_directory': os.path.join(OSSPEAK_DIRECTORY, 'external'),
'cache': os.path.join(OSSPEAK_DIRECTORY, '.cache.json'),
'macros': os.path.join(OSSPEAK_DIRECTORY, 'macros.json'),
'print_logging_level': logging.INFO,
'file_logging_level': logging.DEBUG,
'gui_port': 3922,
'perform_actions': True,
'engine': {
'recognitionConfidence': .9
}
}
settings = DEFAULT_CONFIG.copy()
def try_load_json_file(path, default=dict):
try:
with open(path) as f:
return json.load(f)
except (FileNotFoundError, json.decoder.JSONDecodeError):
return default() if callable(default) else default
def set_settings(_settings):
global settings
settings = _settings
def save_settings(settings):
if not os.path.isdir(OSSPEAK_DIRECTORY):
os.makedirs(OSSPEAK_DIRECTORY)
with open(OSSPEAK_CONFIG_PATH, 'w') as f:
json.dump(settings, f, indent=4)
def load_user_settings():
user_settings = DEFAULT_CONFIG.copy()
user_settings.update(try_load_json_file(os.path.join(OSSPEAK_DIRECTORY, 'settings.json')))
user_settings.update(try_load_json_file(os.path.join(EXECUTABLE_DIRECTORY, 'settings.json')))
user_settings.update(try_load_json_file(os.path.join('..', 'settings.json')))
args = clargs.get_args()
if args is not None:
user_settings.update(args)
return user_settings
def get_server_address():
address = settings['server_address'].rsplit(':', 1)
return address[0], int(address[1])
def parse_server_address(address):
if isinstance(address, str):
return address
if isinstance(address, dict) and 'host' in address and 'port' in address:
return f'{address["host"]}:{address["port"]}'
| mit | -6,921,688,107,281,642,000 | 32.397059 | 126 | 0.673712 | false |
boada/HETDEXCluster | legacy/analysis/plot_fullKnowledge.py | 2 | 2805 | import pylab as pyl
import h5py as hdf
from addHaloInfo import find_indices_single
from scipy import stats
def scatterDensity(ax, xdat, ydat, extent, bins=[50,50], thresh=3):
hh, locx, locy = pyl.histogram2d(xdat, ydat, range=extent, bins=bins)
posx = pyl.digitize(xdat, locx)
posy = pyl.digitize(ydat, locy)
# finds the bins which contain points. posx = 0 for points outside "range"
ind = (posx > 0) & (posx <= bins[0]) & (posy > 0) & (posy <= bins[1])
# values of histogram with points in the bins.
hhsub = hh[posx[ind] - 1, posy[ind] - 1]
xdat1 = xdat[ind][hhsub < thresh] # low density points
ydat1 = ydat[ind][hhsub < thresh]
hh[hh < thresh] = 0 # fill the areas with low density by NaNs
ax.scatter(xdat1, ydat1, s=20, c='0.8')
ax.imshow(pyl.log10(hh.T), cmap='gray_r',
extent=pyl.array(extent).flatten(),
interpolation='nearest')
fig, ax = pyl.subplots(1, 2, squeeze=True)
f = hdf.File('out1204878_complete.hdf5', 'r')
dset = f[f.keys()[0]]
data = dset.value
# now we need to make a mask for the data
mask = (data['M200']/0.72 >= 1e13) & (data['Z'] < 0.5)
# we'll use the mask to make all the changes and then consolidate back.
dataMasked = data[mask]
hids = pyl.unique(dataMasked['HALOID'])
halos = find_indices_single(dataMasked['HALOID'], hids)
xdat = dataMasked['VRMS'][halos]/pyl.sqrt(3)
ydat = dataMasked['LOSVD'][halos]
# filter out NaNs
xdat = xdat[~pyl.isnan(ydat)]
ydat = ydat[~pyl.isnan(ydat)]
# LOSVD comparison
scatterDensity(ax[0], xdat, ydat, extent=[[xdat.min(), xdat.max()],
[ydat.min(), ydat.max()]])
# add 1-1 line, and best fit line
ax[0].plot([99,800],[99, 800], lw=2, c='#a60628', label='1:1')
slope, intercept, r_value, p_value, std_err = stats.linregress(xdat,ydat)
x = pyl.linspace(0,800)
line = slope*x + intercept
ax[0].plot(x, line, lw=2, c='#188487', label='Best Fit')
# adjust
ax[0].set_xlim(100,800)
ax[0].set_ylim(0,800)
# labels
ax[0].set_xlabel('$LOSVD_{True}$ ($km s^{-1})$')
ax[0].set_ylabel('$LOSVD_{Rec}$ ($km s^{-1})$')
# now the mass
xdat = dataMasked['M200'][halos]
ydat = dataMasked['MASS'][halos]
xdat = pyl.log10(xdat)
ydat = pyl.log10(ydat)
xdat = xdat[~pyl.isnan(ydat)]
ydat = ydat[~pyl.isnan(ydat)]
scatterDensity(ax[1], xdat, ydat, extent=[[xdat.min(), xdat.max()],
[ydat.min(), ydat.max()]])
# add 1-1 line, and best fit line
ax[1].plot([12.5,15],[12.5, 15], lw=2, c='#a60628', label='1:1')
slope, intercept, r_value, p_value, std_err = stats.linregress(xdat,ydat)
x = pyl.linspace(12.5,15)
line = slope*x + intercept
ax[1].plot(x, line, lw=2, c='#188487', label='Best Fit')
ax[1].set_xlim(12.75, 14.5)
ax[1].set_ylim(9,15)
ax[1].set_xlabel('Log $M_{200\!,True}$ ($M_{\odot})$')
ax[1].set_ylabel('Log $M_{200\!,Rec}$ ($M_{\odot})$')
pyl.show()
| mit | -6,135,495,927,693,497,000 | 28.526316 | 78 | 0.636007 | false |
dhalleine/tensorflow | tensorflow/contrib/learn/python/learn/estimators/autoencoder.py | 7 | 5322 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Autoencoder estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowBaseTransformer
from tensorflow.python.ops import nn
class TensorFlowDNNAutoencoder(TensorFlowBaseTransformer):
"""TensorFlow Autoencoder Regressor model."""
def __init__(self, hidden_units, n_classes=0, batch_size=32,
steps=200, optimizer="Adagrad", learning_rate=0.1,
clip_gradients=5.0, activation=nn.relu, add_noise=None,
continue_training=False, config=None,
verbose=1, dropout=None):
"""Initializes a TensorFlowDNNAutoencoder instance.
Args:
hidden_units: List of hidden units per layer.
batch_size: Mini batch size.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
dropout: When not None, the probability we will drop out a given
coordinate.
"""
self.hidden_units = hidden_units
self.dropout = dropout
self.activation = activation
self.add_noise = add_noise
super(TensorFlowDNNAutoencoder, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size, steps=steps, optimizer=optimizer,
learning_rate=learning_rate, clip_gradients=clip_gradients,
continue_training=continue_training,
config=config, verbose=verbose)
def _model_fn(self, x, y):
encoder, decoder, autoencoder_estimator = models.get_autoencoder_model(
self.hidden_units,
models.linear_regression,
activation=self.activation,
add_noise=self.add_noise,
dropout=self.dropout)(x)
self.encoder = encoder
self.decoder = decoder
return autoencoder_estimator
def generate(self, hidden=None):
"""Generate new data using trained construction layer."""
if hidden is None:
last_layer = len(self.hidden_units) - 1
bias = self.get_tensor_value(
"encoder/dnn/layer%d/Linear/Bias:0" % last_layer)
hidden = np.random.normal(size=bias.shape)
hidden = np.reshape(hidden, (1, len(hidden)))
return self._session.run(self.decoder, feed_dict={self.encoder: hidden})
@property
def weights_(self):
"""Returns weights of the autoencoder's weight layers."""
weights = []
for layer in range(len(self.hidden_units)):
weights.append(self.get_tensor_value(
"encoder/dnn/layer%d/Linear/Matrix:0" % layer))
for layer in range(len(self.hidden_units)):
weights.append(self.get_tensor_value(
"decoder/dnn/layer%d/Linear/Matrix:0" % layer))
weights.append(self.get_tensor_value("linear_regression/weights:0"))
return weights
@property
def bias_(self):
"""Returns bias of the autoencoder's bias layers."""
biases = []
for layer in range(len(self.hidden_units)):
biases.append(self.get_tensor_value(
"encoder/dnn/layer%d/Linear/Bias:0" % layer))
for layer in range(len(self.hidden_units)):
biases.append(self.get_tensor_value(
"decoder/dnn/layer%d/Linear/Bias:0" % layer))
biases.append(self.get_tensor_value("linear_regression/bias:0"))
return biases
| apache-2.0 | -4,095,204,240,839,011,000 | 41.238095 | 91 | 0.66103 | false |
jordotech/satchmofork | satchmo/apps/satchmo_store/contact/admin.py | 13 | 1899 | from satchmo_store.contact.models import Organization, Contact, Interaction, PhoneNumber, AddressBook, ContactOrganization, ContactOrganizationRole, ContactRole, ContactInteractionType
from satchmo_utils.admin import AutocompleteAdmin
from django.contrib import admin
class Contact_Inline(admin.TabularInline):
model = Contact
extra = 1
class PhoneNumber_Inline(admin.TabularInline):
model = PhoneNumber
extra = 1
class AddressBook_Inline(admin.StackedInline):
model = AddressBook
extra = 1
class OrganizationOptions(admin.ModelAdmin):
list_filter = ['type', 'role']
list_display = ['name', 'type', 'role']
class ContactOptions(AutocompleteAdmin):
list_display = ('last_name', 'first_name', 'organization', 'role')
list_filter = ['create_date', 'role', 'organization']
ordering = ['last_name']
search_fields = ('first_name', 'last_name', 'email')
related_search_fields = {'user': ('username', 'first_name', 'last_name', 'email')}
related_string_functions = {'user': lambda u: u"%s (%s)" % (u.username, u.get_full_name())}
inlines = [PhoneNumber_Inline, AddressBook_Inline]
class InteractionOptions(admin.ModelAdmin):
list_filter = ['type', 'date_time']
class ContactOrganizationOptions(admin.ModelAdmin):
pass
class ContactRoleOptions(admin.ModelAdmin):
pass
class ContactOrganizationRoleOptions(admin.ModelAdmin):
pass
class ContactInteractionTypeOptions(admin.ModelAdmin):
pass
admin.site.register(Organization, OrganizationOptions)
admin.site.register(Contact, ContactOptions)
admin.site.register(Interaction, InteractionOptions)
admin.site.register(ContactOrganization, ContactOrganizationOptions)
admin.site.register(ContactOrganizationRole, ContactOrganizationRoleOptions)
admin.site.register(ContactRole, ContactRoleOptions)
admin.site.register(ContactInteractionType, ContactInteractionTypeOptions)
| bsd-3-clause | 8,549,085,110,135,235,000 | 35.519231 | 184 | 0.761453 | false |
reenberg/trytravis | tests/pantable/test_parse_table_list.py | 1 | 2148 | """
"""
from .context import parse_table_list
from panflute import *
def test_parse_table_list():
markdown = False
raw_table_list = [['1', '2'], ['3', '4']]
table_list_converted = parse_table_list(markdown, raw_table_list)
table_list_referenced = [TableRow(TableCell(Plain(Str('1'))), TableCell(Plain(
Str('2')))), TableRow(TableCell(Plain(Str('3'))), TableCell(Plain(Str('4'))))]
assert repr(table_list_converted) == repr(table_list_referenced)
markdown = True
raw_table_list = [['**markdown**', '~~like this~~'],
['$E=mc^2$', '`great`']]
table_list_converted = parse_table_list(markdown, raw_table_list)
table_list_referenced = [TableRow(TableCell(Para(Strong(Str('markdown')))), TableCell(Para(Strikeout(Str('like'), Space, Str(
'this'))))), TableRow(TableCell(Para(Math('E=mc^2', format='InlineMath'))), TableCell(Para(Code('great'))))]
assert repr(table_list_converted) == repr(table_list_referenced)
# test irregular table
markdown = True
raw_table_list = [['1', '', '', '', '', ''],
['2', '3', '4', '5', '6', '7']]
table_list_converted = parse_table_list(markdown, raw_table_list)
table_list_referenced = [TableRow(TableCell(Para(Str('1'))), TableCell(), TableCell(), TableCell(), TableCell(), TableCell()), TableRow(TableCell(
Para(Str('2'))), TableCell(Para(Str('3'))), TableCell(Para(Str('4'))), TableCell(Para(Str('5'))), TableCell(Para(Str('6'))), TableCell(Para(Str('7'))))]
assert repr(table_list_converted) == repr(table_list_referenced)
markdown = False
table_list_converted = parse_table_list(markdown, raw_table_list)
table_list_referenced = [TableRow(TableCell(Plain(Str('1'))), TableCell(Plain(Str(''))), TableCell(Plain(Str(''))), TableCell(Plain(Str(''))), TableCell(Plain(Str(''))), TableCell(Plain(
Str('')))), TableRow(TableCell(Plain(Str('2'))), TableCell(Plain(Str('3'))), TableCell(Plain(Str('4'))), TableCell(Plain(Str('5'))), TableCell(Plain(Str('6'))), TableCell(Plain(Str('7'))))]
assert repr(table_list_converted) == repr(table_list_referenced)
return
| gpl-3.0 | 3,231,691,761,925,771,000 | 60.371429 | 197 | 0.626164 | false |
tensorflow/models | research/lstm_object_detection/inputs/tf_sequence_example_decoder_test.py | 2 | 4820 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lstm_object_detection.tf_sequence_example_decoder."""
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import parsing_ops
from lstm_object_detection.inputs import tf_sequence_example_decoder
from object_detection.core import standard_fields as fields
class TFSequenceExampleDecoderTest(tf.test.TestCase):
"""Tests for sequence example decoder."""
def _EncodeImage(self, image_tensor, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_encoded = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_encoded
def _DecodeImage(self, image_encoded, encoding_type='jpeg'):
with self.test_session():
if encoding_type == 'jpeg':
image_decoded = tf.image.decode_jpeg(tf.constant(image_encoded)).eval()
else:
raise ValueError('Invalid encoding type.')
return image_decoded
def testDecodeJpegImageAndBoundingBox(self):
"""Test if the decoder can correctly decode the image and bounding box.
A set of random images (represented as an image tensor) is first decoded as
the groundtrue image. Meanwhile, the image tensor will be encoded and pass
through the sequence example, and then decoded as images. The groundtruth
image and the decoded image are expected to be equal. Similar tests are
also applied to labels such as bounding box.
"""
image_tensor = np.random.randint(256, size=(256, 256, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
decoded_jpeg = self._DecodeImage(encoded_jpeg)
sequence_example = example_pb2.SequenceExample(
feature_lists=feature_pb2.FeatureLists(
feature_list={
'image/encoded':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[encoded_jpeg])),
]),
'bbox/xmin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/xmax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
'bbox/ymin':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[0.0])),
]),
'bbox/ymax':
feature_pb2.FeatureList(feature=[
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=[1.0]))
]),
})).SerializeToString()
example_decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(sequence_example))
# Test tensor dict image dimension.
self.assertAllEqual(
(tensor_dict[fields.InputDataFields.image].get_shape().as_list()),
[None, None, None, 3])
with self.test_session() as sess:
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image])
tensor_dict[fields.InputDataFields.groundtruth_boxes] = tf.squeeze(
tensor_dict[fields.InputDataFields.groundtruth_boxes])
tensor_dict = sess.run(tensor_dict)
# Test decoded image.
self.assertAllEqual(decoded_jpeg, tensor_dict[fields.InputDataFields.image])
# Test decoded bounding box.
self.assertAllEqual([0.0, 0.0, 1.0, 1.0],
tensor_dict[fields.InputDataFields.groundtruth_boxes])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 622,302,410,532,441,600 | 41.654867 | 80 | 0.625311 | false |
stdweird/aquilon | tests/broker/test_add_campus.py | 2 | 3088 | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the add campus command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestAddCampus(TestBrokerCommand):
def testaddte(self):
self.dsdb_expect_add_campus("ta", "Test Comment")
command = ["add", "campus", "--campus", "ta", "--country", "us",
"--comments", "Test Comment", "--fullname", "Test Campus"]
self.noouttest(command)
self.dsdb_verify()
def testverifyaddte(self):
command = "show campus --campus ta"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Campus: ta", command)
self.matchoutput(out, "Fullname: Test Campus", command)
self.matchoutput(out, "Comments: Test Comment", command)
def testverifyaddbuproto(self):
command = "show campus --campus ta --format proto"
out = self.commandtest(command.split(" "))
locs = self.parse_location_msg(out, 1)
self.matchoutput(locs.locations[0].name, "ta", command)
self.matchoutput(locs.locations[0].location_type, "campus", command)
def testverifybuildingall(self):
command = ["show", "campus", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "Campus: ta", command)
def testverifyshowcsv(self):
command = "show campus --campus ta --format=csv"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "campus,ta,country,us", command)
def testaddln(self):
self.dsdb_expect_add_campus("ln")
self.noouttest(["add_campus", "--campus", "ln", "--country", "gb",
"--fullname", "London"])
self.dsdb_verify()
def testaddny(self):
self.dsdb_expect_add_campus("ny")
self.noouttest(["add_campus", "--campus", "ny", "--country", "us",
"--fullname", "New York"])
self.dsdb_verify()
def testaddvi(self):
self.dsdb_expect_add_campus("vi")
self.noouttest(["add_campus", "--campus", "vi", "--country", "us",
"--fullname", "Virginia"])
self.dsdb_verify()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestAddCampus)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 | -3,567,419,043,071,977,000 | 36.204819 | 77 | 0.630505 | false |
NovaSquirrel/NovaTheSquirrel | tools/title/pb53.py | 3 | 9314 | #!/usr/bin/env python
from __future__ import division
"""
The compression format deals with 16-byte chunks considered as 8x8
pixel tiles with two bit planes. Possible formats are as follows:
* Ordinary tile: two blocks of 8 bytes
* 2-bit tile (colors 0 and 1 or 2 and 3):
a block of 8 bytes, then 8 bytes of $00 or $FF
* 2-bit tile (colors 0 and 2 or 1 and 3):
8 bytes of $00 or $FF, then a block of 8 bytes
* 2-bit tile (colors 0 and 3 or 1 and 2):
a block of 8 bytes, then the same block copied or complemented
* Solid color tile: 8 bytes of $00 or $FF then 8 bytes of $00 or $FF
* Repeat tile: Repeat previous 16 bytes
* Repeat tile from previous pattern table: Repeat 16 bytes starting
4096 bytes back
"""
def pb8_oneplane(planedata, topValue=None):
ctile = [0]
lastc = topValue
flag = 0
for c in planedata:
flag = flag << 1
c = ord(c)
if c == lastc:
flag = flag | 1
else:
ctile.append(c)
lastc = c
ctile[0] = flag
return ctile
def pb53(chrdata):
"""Compress tile data with PB53.
Return (pb53data, seekpoints)
pb53 is the compressed data
seekpoints is an array of indices of the start of the compressed data
for each 4096-byte unit after the first
"""
from array import array
from binascii import b2a_hex
out = array('B')
seekpoints = []
for i in range(0, len(chrdata), 16):
if i > 0 and i % 4096 == 0:
seekpoints.append(len(out))
tile = chrdata[i:i + 16]
asInts = array('B', tile)
solid0 = (asInts[0] in (0x00, 0xFF)
and all(x == tile[0] for x in tile[1:8]))
solid1 = (asInts[8] in (0x00, 0xFF)
and all(x == tile[8] for x in tile[9:16]))
# Solid color tiles: $84-$87
if solid0 and solid1:
ctrlbyte = 0x84 | (0x02 & asInts[8]) | (0x01 & asInts[0])
out.append(ctrlbyte)
continue
# Duplicate previous tile: $82
if i >= 16 and chrdata[i - 16:i] == tile:
ctrlbyte = 0x82
out.append(ctrlbyte)
continue
# Duplicate tile from previous pattern table: $83
if i >= 4096 and chrdata[i - 4096:i - 4080] == tile:
ctrlbyte = 0x83
out.append(ctrlbyte)
continue
# Encode first plane
if solid0:
ctrlbyte = 0x80 | (0x01 & asInts[0])
out.append(ctrlbyte)
else:
pb = pb8_oneplane(tile[0:8])
out.extend(pb)
# Encode second plane
if solid1:
ctrlbyte = 0x80 | (0x01 & asInts[8])
out.append(ctrlbyte)
elif tile[0:8] == tile[8:16]:
# Colors 0 and 3
ctrlbyte = 0x82
out.append(ctrlbyte)
elif all(a ^ b == 0xFF for (a, b) in zip(asInts[0:8], asInts[8:16])):
# Colors 1 and 2
ctrlbyte = 0x83
out.append(ctrlbyte)
else:
pb = pb8_oneplane(tile[8:16])
out.extend(pb)
return (out.tostring(), seekpoints)
def unpb53plane(ctrlbyte, it):
if ctrlbyte >= 0x80:
# Solid plane
p0data = 0xFF if (ctrlbyte & 0x01) else 0x00
return ([p0data] * 8)
p0data = [it.next()]
while len(p0data) < 8:
ctrlbyte = ctrlbyte << 1
p0data.append(p0data[-1] if ctrlbyte & 0x80 else it.next())
return p0data
def unpb53(data, numTiles=None):
from array import array
out = array('B')
it = (ord(c) for c in data)
for ctrlbyte in it:
if numTiles is not None and len(out) >= numTiles * 16:
break
if ctrlbyte >= 0x84 and ctrlbyte <= 0x87:
# Solid color tiles
p0data = 0xFF if (ctrlbyte & 0x01) else 0x00
out.extend([p0data] * 8)
p1data = 0xFF if (ctrlbyte & 0x02) else 0x00
out.extend([p1data] * 8)
continue
if ctrlbyte == 0x82:
# Repeat previous tile
out.extend(out[-16:])
continue
if ctrlbyte == 0x83:
# Repeat corresponding tile from other bank
out.extend(out[-4096:-4080])
continue
# Decode each plane
out.extend(unpb53plane(ctrlbyte, it))
ctrlbyte = it.next()
if ctrlbyte in (0x82, 0x83):
# 2-color plane, colors 0/3 or 1/2
xorbyte = 0xFF if (ctrlbyte & 0x01) else 0x00
out.extend(c ^ xorbyte for c in out[-8:])
else:
out.extend(unpb53plane(ctrlbyte, it))
return out.tostring()
roms = [
'../../my_games/Concentration Room 0.02.nes',
'../../my_games/lj65 0.41.nes',
'../roms/Zooming_Secretary.nes',
'../../compomenu/roms/fhbg.nes',
'../../compomenu/roms/lan.nes',
'../../compomenu/roms/mower.nes',
'../../compomenu/roms/slappin.nes',
'../../compomenu/roms/thwaite.nes',
'../../ruder/ruder.nes',
]
def test():
import ines
for filename in roms:
rom = ines.load_ines(filename)
(data, seekpoints) = pb53(rom['chr'])
print "%s: compressed %s to %s" % (filename, len(rom['chr']), len(data))
print "seekpoints:", seekpoints
unpacked = unpb53(data)
if unpacked != rom['chr']:
diffs = [i if a != b else None
for (i, (a, b)) in enumerate(zip(rom['chr'], unpacked))]
print "unpacked different starting at", diffs[0]
else:
print "unpack ok"
def parse_argv(argv):
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] [[-i] INFILE [[-o] OUTFILE]]")
parser.add_option("-d", "--unpack", dest="unpacking",
help="unpack instead of packing",
action="store_true", default=False)
parser.add_option("--raw", dest="withHeader",
help="don't write 2-byte length header",
action="store_false", default=True)
parser.add_option("-i", "--input", dest="infilename",
help="read input from INFILE", metavar="INFILE")
parser.add_option("-o", "--output", dest="outfilename",
help="write output to OUTFILE", metavar="OUTFILE")
(options, args) = parser.parse_args(argv[1:])
# Fill unfilled roles with positional arguments
argsreader = iter(args)
infilename = options.infilename
if infilename is None:
try:
infilename = argsreader.next()
except StopIteration:
infilename = '-'
if infilename == '-' and options.unpacking:
import sys
if sys.stdin.isatty():
raise ValueError('cannot decompress from terminal')
outfilename = options.outfilename
if outfilename is None:
try:
outfilename = argsreader.next()
except StopIteration:
outfilename = '-'
if outfilename == '-' and not options.unpacking:
import sys
if sys.stdout.isatty():
raise ValueError('cannot compress to terminal')
return (infilename, outfilename, options.unpacking, options.withHeader)
argvTestingMode = True
def main(argv=None):
import sys
if argv is None:
argv = sys.argv
if (argvTestingMode and len(argv) < 2
and sys.stdin.isatty() and sys.stdout.isatty()):
argv.extend(raw_input('args:').split())
try:
(infilename, outfilename, unpacking, withHeader) = parse_argv(argv)
except StandardError, e:
import sys
sys.stderr.write("%s: %s\n" % (argv[0], str(e)))
sys.exit(1)
# Read input file
infp = None
try:
if infilename != '-':
infp = open(infilename, 'rb')
else:
infp = sys.stdin
data = infp.read()
finally:
if infp and infilename != '-':
infp.close()
del infilename, infp
if unpacking:
# Decompress input file
if withHeader:
numTiles = ord(data[0]) * 256 + ord(data[1])
startOffset = -((-numTiles) // 256) * 2
else:
numTiles = None
startOffset = 0
outdata = unpb53(data[startOffset:])
if maxlength is not None:
if len(outdata) < maxlength:
raise IndexError('incomplete chunk')
if len(outdata) > maxlength:
outdata = outdata[:maxlength]
else:
# Compress input file
(outdata, seekpoints) = pb53(data)
if withHeader:
# The .pb53 header is the unpacked length in 16-byte units,
# 16-bit big endian, followed by 16-bit seek points
sz = (len(data) // 16) % 0x10000
headerwords = [sz]
headerwords.extend(seekpoints)
header = "".join("".join((chr((sz >> 8) & 0xFF), chr(sz & 0xFF)))
for sz in headerwords)
outdata = header + outdata
# Read input file
outfp = None
try:
if outfilename != '-':
outfp = open(outfilename, 'wb')
else:
outfp = sys.stdout
outfp.write(outdata)
finally:
if outfp and outfilename != '-':
outfp.close()
if __name__=='__main__':
main()
## test()
| gpl-3.0 | 8,970,250,331,609,650,000 | 30.360269 | 86 | 0.547778 | false |
ajyoon/brown | doc/module_doc.py | 1 | 5430 | import re
import doc.doc_config as doc_config
from doc.utils import (first_or_none,
line_num_at,
previous_line_ending_index_from,
everything_in_indentation_block,
parse_general_text)
from doc.attribute_doc import AttributeDoc
from doc.class_doc import ClassDoc
from doc.method_doc import MethodDoc
from doc.method_type import MethodType
class ModuleDoc:
"""A Python module as far as docs are concerned."""
docstring_re = re.compile(
r' *(\"\"\"(?P<content>.*?)\"\"\")',
flags=re.DOTALL)
class_name_re_capture = r'(?P<class>[A-Z_]\w*)'
class_re = re.compile(r'^class '
+ class_name_re_capture
+ r'(\((?P<superclasses>.*?)\))?'
+ r':\n',
flags=re.DOTALL | re.MULTILINE)
module_level_method_re = re.compile(
r'^def (?P<method>(__|[a-z])[A-Za-z_0-9]*)'
'\((?P<args>.*?)\):\n',
flags=re.DOTALL | re.MULTILINE)
module_level_attribute_re = re.compile(
r'^(?P<name>\w+) = (?P<value>.*$)\n',
flags=re.MULTILINE)
def __init__(self, path, name, global_index):
self.path = path
self.name = name
self.package = 'NOT YET KNOWN'
self.classes = []
self.methods = []
self.attributes = []
self.global_index = global_index
self.global_index.add(self)
self.summary = ''
self.details = ''
self.parse_module()
@property
def url(self):
return doc_config.API + '/' + self.name.replace('.', '/') + '.html'
@property
def source_url(self):
if type(self.parent).__name__ == 'MethodDoc':
path = self.parent.path
else:
path = self.parent.parent.path
return '{}/{}'.format(
doc_config.SOURCE_ROOT,
path)
@property
def unqualified_name(self):
if '.' in self.name:
return self.name.split('.')[-1]
return self.name
@property
def display_name(self):
return self.name.replace('.', '<wbr>.')
def read_file_lines(self):
file = open(self.path, 'r')
return file.readlines()
def parse_module(self):
lines = self.read_file_lines()
contents = ''.join(lines)
class_matches = list(re.finditer(ModuleDoc.class_re,
contents))
method_matches = list(re.finditer(ModuleDoc.module_level_method_re,
contents))
attribute_matches = list(re.finditer(ModuleDoc.module_level_attribute_re,
contents))
docstring_blocks = re.finditer(ModuleDoc.docstring_re, contents)
# NOTE: This completely ignores classes/methods without docstrings
for block_index, block in enumerate(docstring_blocks):
last_line_end_i = previous_line_ending_index_from(
block.start(0), contents)
docstring = block.group('content')
class_match = first_or_none(
c for c in class_matches
# Allow a blank line before class docstring
if c.end(0) - 1 == last_line_end_i
or c.end(0) == last_line_end_i)
method_match = first_or_none(
m for m in method_matches
if m.end(0) - 1 == last_line_end_i)
attribute_match = first_or_none(
a for a in attribute_matches
if a.end(0) - 1 == last_line_end_i)
if class_match:
class_body = everything_in_indentation_block(
block.end(0), contents)
self.classes.append(ClassDoc(
class_match.group('class'),
self,
class_match.group('superclasses'),
docstring,
class_body,
self.global_index,
line_num_at(class_match.start(0), contents),
line_num_at(block.end(0), contents)))
elif method_match:
self.methods.append(MethodDoc(
method_match.group('method'),
self,
method_match.group('args'),
docstring,
MethodType.normal,
self.global_index,
line_num_at(method_match.start(0), contents)))
elif attribute_match:
self.attributes.append(AttributeDoc(
attribute_match.group('name'),
self,
docstring,
False,
True,
attribute_match.group('value'),
self.global_index,
line_num_at(attribute_match.start(0), contents)))
elif block_index == 0:
if '\n\n' in docstring:
self.summary, self.details = docstring.split('\n\n', 1)
else:
self.summary = docstring
self.details = ''
else:
pass
def resolve_names_and_parse_html(self):
self.summary = parse_general_text(self.summary, self)
self.details = parse_general_text(self.details, self)
| gpl-3.0 | -4,305,341,140,923,211,000 | 36.448276 | 81 | 0.496317 | false |
craftoid/sconcho | sconcho/gui/pattern_repeat_dialog.py | 1 | 3216 | # -*- coding: utf-8 -*-
########################################################################
#
# (c) 2009-2012 Markus Dittrich
#
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public
# License Version 3 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License Version 3 for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
#######################################################################
from functools import partial
from PyQt4.QtCore import (QDir,
Qt,
SIGNAL)
from PyQt4.QtGui import (QColorDialog,
QDialog)
from sconcho.gui.ui_pattern_repeat_box_dialog import Ui_PatternRepeatDialog
##########################################################################
#
# This widget allows customization and deletion of pattern repeats on
# the canvas
#
##########################################################################
class PatternRepeatDialog(QDialog, Ui_PatternRepeatDialog):
def __init__(self, width, color, hasLegend, parent = None):
""" Initialize the dialog. """
super(PatternRepeatDialog, self).__init__(parent)
self.setupUi(self)
self.lineWidthSpinner.setValue(width)
self.width = width
self.showInLegend = hasLegend
self.legendChecker.setCheckState(hasLegend)
self.color = color
self.set_button_color()
self.connect(self.acceptButton, SIGNAL("pressed()"),
partial(self.done, 1))
self.connect(self.colorButton, SIGNAL("pressed()"),
self.change_color)
self.connect(self.lineWidthSpinner, SIGNAL("valueChanged(int)"),
self.change_width)
self.connect(self.legendChecker, SIGNAL("stateChanged(int)"),
self.change_legend_state)
def set_button_color(self):
""" Set the color of a button via a stylesheet. """
styleSheet = "background-color: " + self.color.name() + ";"
self.colorButton.setStyleSheet(styleSheet)
def change_color(self):
""" Fire up a QColorDialog to let the user change the
color of the lines marking a pattern repeat box.
"""
color = QColorDialog.getColor(self.color, None,
"Select Line Color")
self.color = color
self.set_button_color()
def change_width(self, newWidth):
""" Adjust the line width after a user change to the
width SpinBox.
"""
self.width = newWidth
def change_legend_state(self, legendState):
""" Change the legend state of the repeat box, i.e.
if a legend entry is shown or not.
"""
self.showInLegend = legendState
| gpl-3.0 | 5,044,797,588,355,203,000 | 29.339623 | 75 | 0.574316 | false |
domob1812/huntercore | test/functional/rpc_rawtransaction.py | 1 | 21696 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"], ["-addresstype=legacy"]]
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Namecoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Two data outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([('data', '99'), ('data', '99')])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{'data': '99'}, {'data': '99'}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), ('data', '99'), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {'data': '99'}, {'data': '99'}]),
)
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
if __name__ == '__main__':
RawTransactionsTest().main()
| mit | -4,876,497,170,849,068,000 | 55.795812 | 233 | 0.654775 | false |
datalogics/scons | test/Repository/SConscript.py | 2 | 3620 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test how we handle SConscript calls when using a Repository.
"""
import sys
import TestSCons
if sys.platform == 'win32':
_exe = '.exe'
else:
_exe = ''
test = TestSCons.TestSCons()
#
test.subdir('work',
['work', 'src'],
'rep1',
['rep1', 'src'],
'rep2',
['rep2', 'build'],
['rep2', 'src'],
['rep2', 'src', 'sub'])
#
workpath_rep1 = test.workpath('rep1')
workpath_rep2 = test.workpath('rep2')
#
test.write(['work', 'SConstruct'], """
Repository(r'%s')
SConscript('src/SConscript')
""" % workpath_rep1)
test.write(['rep1', 'src', 'SConscript'], """\
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
env = Environment(BUILDERS={'Cat':Builder(action=cat)})
env.Cat(target = 'foo', source = ['aaa.in', 'bbb.in', 'ccc.in'])
""")
test.write(['rep1', 'src', 'aaa.in'], "rep1/src/aaa.in\n")
test.write(['rep1', 'src', 'bbb.in'], "rep1/src/bbb.in\n")
test.write(['rep1', 'src', 'ccc.in'], "rep1/src/ccc.in\n")
# Make the rep1 non-writable,
# so we'll detect if we try to write into it accidentally.
test.writable('rep1', 0)
test.run(chdir = 'work', arguments = ".")
test.fail_test(test.read(['work', 'src', 'foo']) != """\
rep1/src/aaa.in
rep1/src/bbb.in
rep1/src/ccc.in
""")
test.up_to_date(chdir = 'work', arguments = ".")
#
test.write(['rep2', 'build', 'SConstruct'], """
env = Environment(REPOSITORY = r'%s')
env.Repository('$REPOSITORY')
SConscript('src/SConscript')
""" % workpath_rep2)
test.write(['rep2', 'src', 'SConscript'], """\
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
env = Environment(BUILDERS={'Cat':Builder(action=cat)})
env.Cat(target = 'foo', source = ['aaa.in', 'bbb.in', 'ccc.in'])
SConscript('sub/SConscript')
""")
test.write(['rep2', 'src', 'sub', 'SConscript'], """\
""")
test.write(['rep2', 'src', 'aaa.in'], "rep2/src/aaa.in\n")
test.write(['rep2', 'src', 'bbb.in'], "rep2/src/bbb.in\n")
test.write(['rep2', 'src', 'ccc.in'], "rep2/src/ccc.in\n")
test.run(chdir = 'rep2/build', arguments = ".")
test.fail_test(test.read(['rep2', 'build', 'src', 'foo']) != """\
rep2/src/aaa.in
rep2/src/bbb.in
rep2/src/ccc.in
""")
#
test.pass_test()
| mit | 6,580,266,810,103,364,000 | 27.503937 | 73 | 0.636188 | false |
deniseschannon/rancher-compose | tests/integration/cattletest/core/test_service.py | 1 | 5774 | from common_fixtures import * # NOQA
from os import path
import os
import pytest
import cattle
import ConfigParser
@pytest.fixture(scope='session')
def client(admin_user_client, request):
try:
return cattle.from_env(url=os.environ['RANCHER_URL'],
access_key=os.environ['RANCHER_ACCESS_KEY'],
secret_key=os.environ['RANCHER_SECRET_KEY'])
except KeyError:
pass
try:
config = ConfigParser.ConfigParser()
config.read(path.join(_base(), '../../tox.ini'))
return cattle.from_env(url=config.get('rancher', 'url'),
access_key=config.get('rancher', 'access-key'),
secret_key=config.get('rancher', 'secret-key'))
except ConfigParser.NoOptionError:
pass
return new_context(admin_user_client, request).client
def test_stack_create_upgrade_finish(client):
name = 'project-' + random_str()
rancher_compose = '''
.catalog:
uuid: foo
'''
template = '''
one:
image: nginx
two:
image: nginx
'''
env = client.create_environment(name=name, dockerCompose=template,
rancherCompose=rancher_compose)
env = client.wait_success(env)
assert env.state == 'active'
assert env.externalId is None
names = set()
for s in env.services():
assert s.state == 'inactive'
names.add(s.name)
assert names == {'one', 'two'}
env = client.wait_success(env.activateservices())
for s in env.services():
s = client.wait_success(s)
assert s.state == 'active'
rancher_compose = '''
.catalog:
uuid: foo2
'''
template = '''
one:
image: nginx:2
two:
image: nginx
'''
# TODO: externalId should not be in upgrade
env.upgrade(dockerCompose=template,
rancherCompose=rancher_compose,
externalId='foo2')
env = client.wait_success(env, timeout=120)
for s in env.services():
s = client.wait_success(s)
if s.name == 'one':
assert s.state == 'upgraded'
elif s.name == 'two':
assert s.state == 'active'
assert env.externalId == 'foo2'
assert env.previousExternalId == ''
env.finishupgrade()
env = client.wait_success(env)
for s in env.services():
s = client.wait_success(s)
assert s.state == 'active'
assert env.externalId == 'foo2'
assert env.previousExternalId is None
def test_stack_create_and_upgrade(client):
name = 'project-' + random_str()
rancher_compose = '''
.catalog:
uuid: foo
'''
template = '''
one:
image: nginx
two:
image: nginx
'''
env = client.create_environment(name=name, dockerCompose=template,
environment={'a': 'b', 'd': 'e'},
rancherCompose=rancher_compose)
env = client.wait_success(env)
env = client.wait_success(env.activateservices())
assert env.state == 'active'
assert env.environment == {'a': 'b', 'd': 'e'}
for s in env.services():
s = client.wait_success(s)
assert s.state == 'active'
rancher_compose = '''
.catalog:
uuid: foo2
'''
template = '''
one:
image: nginx:2
two:
image: nginx
'''
# TODO: externalId should not be in upgrade
env = env.upgrade(dockerCompose=template,
environment={'a': 'x'},
rancherCompose=rancher_compose,
externalId='foo2')
assert env.environment == {'a': 'b', 'd': 'e'}
env = client.wait_success(env, timeout=120)
assert env.state == 'upgraded'
for s in env.services():
s = client.wait_success(s)
if s.name == 'one':
assert s.state == 'upgraded'
assert env.environment == {'a': 'x', 'd': 'e'}
assert env.previousEnvironment == {'a': 'b', 'd': 'e'}
env = env.rollback()
env = client.wait_success(env, timeout=120)
assert env.state == 'active'
for s in env.services():
s = client.wait_success(s)
assert s.state == 'active'
# TODO this should really be ''
assert env.externalId == 'foo2'
assert env.environment == {'a': 'b', 'd': 'e'}
assert env.previousExternalId is None
assert env.previousEnvironment is None
def test_stack_change_scale_upgrade(client):
name = 'project-' + random_str()
template = '''
one:
image: nginx
'''
rancher_compose = '''
one:
scale: 2
'''
env = client.create_environment(name=name, dockerCompose=template,
rancherCompose=rancher_compose)
env = client.wait_success(env)
env = client.wait_success(env.activateservices())
assert env.state == 'active'
s = find_one(env.services)
assert s.launchConfig.imageUuid == 'docker:nginx'
assert s.scale == 2
template = '''
one:
image: nginx:2
'''
# Something else about the service needs to change too, like metadata
# scale is ignore in the diff
rancher_compose = '''
one:
metadata:
foo: bar
scale: 4
'''
env.upgrade(dockerCompose=template,
rancherCompose=rancher_compose)
env = client.wait_success(env, timeout=120)
assert env.state == 'upgraded'
s = find_one(env.services)
assert s.launchConfig.imageUuid == 'docker:nginx:2'
assert s.scale == 2
def test_stack_create_circles(client):
name = 'project-' + random_str()
template = '''
one:
image: nginx
links:
- two
two:
image: nginx
links:
- one
'''
env = client.create_environment(name=name, dockerCompose=template)
env = client.wait_success(env)
for s in env.services():
find_one(s.consumedservices)
def _base():
return path.dirname(__file__)
| apache-2.0 | -1,413,051,527,604,582,100 | 24.213974 | 78 | 0.587461 | false |
HackerExperience/asynqp | src/asynqp/channel.py | 1 | 16321 | import asyncio
import re
from . import bases
from . import frames
from . import spec
from . import queue
from . import exchange
from . import message
from . import routing
from .exceptions import UndeliverableMessage
VALID_QUEUE_NAME_RE = re.compile(r'^(?!amq\.)(\w|[-.:])*$', flags=re.A)
VALID_EXCHANGE_NAME_RE = re.compile(r'^(?!amq\.)(\w|[-.:])+$', flags=re.A)
class Channel(object):
"""
Manage AMQP Channels.
A Channel is a 'virtual connection' over which messages are sent and received.
Several independent channels can be multiplexed over the same :class:`Connection`,
so peers can perform several tasks concurrently while using a single socket.
Channels are created using :meth:`Connection.open_channel() <Connection.open_channel>`.
.. attribute::id
the numerical ID of the channel
"""
def __init__(self, id, synchroniser, sender, basic_return_consumer, queue_factory, reader):
self.id = id
self.synchroniser = synchroniser
self.sender = sender
self.basic_return_consumer = basic_return_consumer
self.queue_factory = queue_factory
self.reader = reader
@asyncio.coroutine
def declare_exchange(self, name, type, *, durable=True, auto_delete=False, internal=False):
"""
Declare an :class:`Exchange` on the broker. If the exchange does not exist, it will be created.
This method is a :ref:`coroutine <coroutine>`.
:param str name: the name of the exchange.
:param str type: the type of the exchange (usually one of ``'fanout'``, ``'direct'``, ``'topic'``, or ``'headers'``)
:keyword bool durable: If true, the exchange will be re-created when the server restarts.
:keyword bool auto_delete: If true, the exchange will be deleted when the last queue is un-bound from it.
:keyword bool internal: If true, the exchange cannot be published to directly; it can only be bound to other exchanges.
:return: the new :class:`Exchange` object.
"""
if name == '':
return exchange.Exchange(self.reader, self.synchroniser, self.sender, name, 'direct', True, False, False)
if not VALID_EXCHANGE_NAME_RE.match(name):
raise ValueError("Invalid exchange name.\n"
"Valid names consist of letters, digits, hyphen, underscore, period, or colon, "
"and do not begin with 'amq.'")
self.sender.send_ExchangeDeclare(name, type, durable, auto_delete, internal)
yield from self.synchroniser.await(spec.ExchangeDeclareOK)
ex = exchange.Exchange(self.reader, self.synchroniser, self.sender, name, type, durable, auto_delete, internal)
self.reader.ready()
return ex
@asyncio.coroutine
def declare_queue(self, name='', *, durable=True, exclusive=False, auto_delete=False):
"""
Declare a queue on the broker. If the queue does not exist, it will be created.
This method is a :ref:`coroutine <coroutine>`.
:param str name: the name of the queue.
Supplying a name of '' will create a queue with a unique name of the server's choosing.
:keyword bool durable: If true, the queue will be re-created when the server restarts.
:keyword bool exclusive: If true, the queue can only be accessed by the current connection,
and will be deleted when the connection is closed.
:keyword bool auto_delete: If true, the queue will be deleted when the last consumer is cancelled.
If there were never any conusmers, the queue won't be deleted.
:return: The new :class:`Queue` object.
"""
q = yield from self.queue_factory.declare(name, durable, exclusive, auto_delete)
return q
@asyncio.coroutine
def close(self):
"""
Close the channel by handshaking with the server.
This method is a :ref:`coroutine <coroutine>`.
"""
self.sender.send_Close(0, 'Channel closed by application', 0, 0)
yield from self.synchroniser.await(spec.ChannelCloseOK)
# don't call self.reader.ready - stop reading frames from the q
@asyncio.coroutine
def set_qos(self, prefetch_size=0, prefetch_count=0, apply_globally=False):
"""
Specify quality of service by requesting that messages be pre-fetched
from the server. Pre-fetching means that the server will deliver messages
to the client while the client is still processing unacknowledged messages.
This method is a :ref:`coroutine <coroutine>`.
:param int prefetch_size: Specifies a prefetch window in bytes.
Messages smaller than this will be sent from the server in advance.
This value may be set to 0, which means "no specific limit".
:param int prefetch_count: Specifies a prefetch window in terms of whole messages.
:param bool apply_globally: If true, apply these QoS settings on a global level.
The meaning of this is implementation-dependent. From the
`RabbitMQ documentation <https://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.qos.global>`_:
RabbitMQ has reinterpreted this field. The original specification said:
"By default the QoS settings apply to the current channel only.
If this field is set, they are applied to the entire connection."
Instead, RabbitMQ takes global=false to mean that the QoS settings should apply
per-consumer (for new consumers on the channel; existing ones being unaffected) and
global=true to mean that the QoS settings should apply per-channel.
"""
self.sender.send_BasicQos(prefetch_size, prefetch_count, apply_globally)
yield from self.synchroniser.await(spec.BasicQosOK)
self.reader.ready()
def set_return_handler(self, handler):
"""
Set ``handler`` as the callback function for undeliverable messages
that were returned by the server.
By default, an exception is raised, which will be handled by
the event loop's exception handler (see :meth:`BaseEventLoop.set_exception_handler <asyncio.BaseEventLoop.set_exception_handler>`).
If ``handler`` is None, this default behaviour is set.
:param callable handler: A function to be called when a message is returned.
The callback will be passed the undelivered message.
"""
self.basic_return_consumer.set_callback(handler)
class ChannelFactory(object):
def __init__(self, loop, protocol, dispatcher, connection_info):
self.loop = loop
self.protocol = protocol
self.dispatcher = dispatcher
self.connection_info = connection_info
self.next_channel_id = 1
@asyncio.coroutine
def open(self):
synchroniser = routing.Synchroniser()
sender = ChannelMethodSender(self.next_channel_id, self.protocol, self.connection_info)
basic_return_consumer = BasicReturnConsumer()
consumers = queue.Consumers(self.loop)
consumers.add_consumer(basic_return_consumer)
handler = ChannelFrameHandler(synchroniser, sender)
reader, writer = routing.create_reader_and_writer(handler)
handler.message_receiver = MessageReceiver(synchroniser, sender, consumers, reader)
queue_factory = queue.QueueFactory(sender, synchroniser, reader, consumers)
channel = Channel(self.next_channel_id, synchroniser, sender, basic_return_consumer, queue_factory, reader)
self.dispatcher.add_writer(self.next_channel_id, writer)
try:
sender.send_ChannelOpen()
reader.ready()
yield from synchroniser.await(spec.ChannelOpenOK)
except:
self.dispatcher.remove_writer(self.next_channel_id)
raise
self.next_channel_id += 1
reader.ready()
return channel
class ChannelFrameHandler(bases.FrameHandler):
def handle_ChannelOpenOK(self, frame):
self.synchroniser.notify(spec.ChannelOpenOK)
def handle_QueueDeclareOK(self, frame):
self.synchroniser.notify(spec.QueueDeclareOK, frame.payload.queue)
def handle_ExchangeDeclareOK(self, frame):
self.synchroniser.notify(spec.ExchangeDeclareOK)
def handle_ExchangeDeleteOK(self, frame):
self.synchroniser.notify(spec.ExchangeDeleteOK)
def handle_QueueBindOK(self, frame):
self.synchroniser.notify(spec.QueueBindOK)
def handle_QueueUnbindOK(self, frame):
self.synchroniser.notify(spec.QueueUnbindOK)
def handle_QueuePurgeOK(self, frame):
self.synchroniser.notify(spec.QueuePurgeOK)
def handle_QueueDeleteOK(self, frame):
self.synchroniser.notify(spec.QueueDeleteOK)
def handle_BasicGetEmpty(self, frame):
self.synchroniser.notify(spec.BasicGetEmpty, False)
def handle_BasicGetOK(self, frame):
asyncio.async(self.message_receiver.receive_getOK(frame))
def handle_BasicConsumeOK(self, frame):
self.synchroniser.notify(spec.BasicConsumeOK, frame.payload.consumer_tag)
def handle_BasicCancelOK(self, frame):
self.synchroniser.notify(spec.BasicCancelOK)
def handle_BasicDeliver(self, frame):
asyncio.async(self.message_receiver.receive_deliver(frame))
def handle_ContentHeaderFrame(self, frame):
asyncio.async(self.message_receiver.receive_header(frame))
def handle_ContentBodyFrame(self, frame):
asyncio.async(self.message_receiver.receive_body(frame))
def handle_ChannelClose(self, frame):
self.sender.send_CloseOK()
def handle_ChannelCloseOK(self, frame):
self.synchroniser.notify(spec.ChannelCloseOK)
def handle_BasicQosOK(self, frame):
self.synchroniser.notify(spec.BasicQosOK)
def handle_BasicReturn(self, frame):
asyncio.async(self.message_receiver.receive_return(frame))
class MessageReceiver(object):
def __init__(self, synchroniser, sender, consumers, reader):
self.synchroniser = synchroniser
self.sender = sender
self.consumers = consumers
self.reader = reader
self.message_builder = None
@asyncio.coroutine
def receive_getOK(self, frame):
self.synchroniser.notify(spec.BasicGetOK, True)
payload = frame.payload
self.message_builder = message.MessageBuilder(
self.sender,
payload.delivery_tag,
payload.redelivered,
payload.exchange,
payload.routing_key
)
self.reader.ready()
@asyncio.coroutine
def receive_deliver(self, frame):
payload = frame.payload
self.message_builder = message.MessageBuilder(
self.sender,
payload.delivery_tag,
payload.redelivered,
payload.exchange,
payload.routing_key,
payload.consumer_tag
)
self.reader.ready()
yield from self.async_receive()
@asyncio.coroutine
def receive_return(self, frame):
payload = frame.payload
self.message_builder = message.MessageBuilder(
self.sender,
'',
'',
payload.exchange,
payload.routing_key,
BasicReturnConsumer.tag
)
self.reader.ready()
yield from self.async_receive()
@asyncio.coroutine
def async_receive(self):
yield from self.synchroniser.await(frames.ContentHeaderFrame)
tag, msg = yield from self.synchroniser.await(frames.ContentBodyFrame)
self.consumers.deliver(tag, msg)
self.reader.ready()
@asyncio.coroutine
def receive_header(self, frame):
self.synchroniser.notify(frames.ContentHeaderFrame)
self.message_builder.set_header(frame.payload)
self.reader.ready()
@asyncio.coroutine
def receive_body(self, frame):
self.message_builder.add_body_chunk(frame.payload)
if self.message_builder.done():
msg = self.message_builder.build()
tag = self.message_builder.consumer_tag
self.synchroniser.notify(frames.ContentBodyFrame, (tag, msg))
self.message_builder = None
# don't call self.reader.ready() if the message is all here -
# get() or async_receive() will call
# it when they have finished processing the completed msg
return
self.reader.ready()
# basically just a collection of aliases with some arguments hard coded for convenience
class ChannelMethodSender(bases.Sender):
def __init__(self, channel_id, protocol, connection_info):
super().__init__(channel_id, protocol)
self.connection_info = connection_info
def send_ChannelOpen(self):
self.send_method(spec.ChannelOpen(''))
def send_ExchangeDeclare(self, name, type, durable, auto_delete, internal):
self.send_method(spec.ExchangeDeclare(0, name, type, False, durable, auto_delete, internal, False, {}))
def send_ExchangeDelete(self, name, if_unused):
self.send_method(spec.ExchangeDelete(0, name, if_unused, False))
def send_QueueDeclare(self, name, durable, exclusive, auto_delete):
self.send_method(spec.QueueDeclare(0, name, False, durable, exclusive, auto_delete, False, {}))
def send_QueueBind(self, queue_name, exchange_name, routing_key):
self.send_method(spec.QueueBind(0, queue_name, exchange_name, routing_key, False, {}))
def send_QueueUnbind(self, queue_name, exchange_name, routing_key):
self.send_method(spec.QueueUnbind(0, queue_name, exchange_name, routing_key, {}))
def send_QueuePurge(self, queue_name):
self.send_method(spec.QueuePurge(0, queue_name, False))
def send_QueueDelete(self, queue_name, if_unused, if_empty):
self.send_method(spec.QueueDelete(0, queue_name, if_unused, if_empty, False))
def send_BasicPublish(self, exchange_name, routing_key, mandatory, message):
self.send_method(spec.BasicPublish(0, exchange_name, routing_key, mandatory, False))
self.send_content(message)
def send_BasicConsume(self, queue_name, no_local, no_ack, exclusive):
self.send_method(spec.BasicConsume(0, queue_name, '', no_local, no_ack, exclusive, False, {}))
def send_BasicCancel(self, consumer_tag):
self.send_method(spec.BasicCancel(consumer_tag, False))
def send_BasicGet(self, queue_name, no_ack):
self.send_method(spec.BasicGet(0, queue_name, no_ack))
def send_BasicAck(self, delivery_tag):
self.send_method(spec.BasicAck(delivery_tag, False))
def send_BasicReject(self, delivery_tag, redeliver):
self.send_method(spec.BasicReject(delivery_tag, redeliver))
def send_Close(self, status_code, msg, class_id, method_id):
self.send_method(spec.ChannelClose(status_code, msg, class_id, method_id))
def send_CloseOK(self):
self.send_method(spec.ChannelCloseOK())
def send_BasicQos(self, prefetch_size, prefetch_count, apply_globally):
self.send_method(spec.BasicQos(prefetch_size, prefetch_count, apply_globally))
def send_content(self, msg):
header_payload = message.get_header_payload(msg, spec.BasicPublish.method_type[0])
header_frame = frames.ContentHeaderFrame(self.channel_id, header_payload)
self.protocol.send_frame(header_frame)
for payload in message.get_frame_payloads(msg, self.connection_info.frame_max - 8):
frame = frames.ContentBodyFrame(self.channel_id, payload)
self.protocol.send_frame(frame)
class BasicReturnConsumer(object):
tag = -1 # a 'real' tag is a string so there will never be a clash
def __init__(self):
self.callback = self.default_behaviour
self.cancelled_future = asyncio.Future()
def set_callback(self, callback):
if callback is None:
self.callback = self.default_behaviour
return
if not callable(callback):
raise TypeError("The handler must be a callable with one argument (a message).", callback)
self.callback = callback
def default_behaviour(self, msg):
raise UndeliverableMessage(msg)
| mit | 59,513,056,076,001,260 | 38.807317 | 139 | 0.66779 | false |
mozilla/bedrock | lib/l10n_utils/management/commands/_fluent_templater.py | 6 | 1809 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from ._fluent import (
get_migration_context,
strip_whitespace,
trans_to_lang,
GETTEXT_RE,
TRANS_BLOCK_RE,
)
class Templater:
def __init__(self, cmd):
self.stdout = cmd.stdout
self.dependencies = {}
def handle(self, template):
self.dependencies.update(self.get_dependencies(template))
with template.open('r') as tfp:
template_str = tfp.read()
template_str = GETTEXT_RE.sub(self.gettext_to_fluent, template_str)
template_str = TRANS_BLOCK_RE.sub(self.trans_to_fluent, template_str)
outname = template.stem + '_ftl.html'
with template.with_name(outname).open('w') as tfp:
tfp.write(template_str)
def get_dependencies(self, template):
context = get_migration_context(template)
deps = {}
for (fluent_file, fluent_id), lang_set in context.dependencies.items():
for _, lang_string in lang_set:
deps[lang_string] = fluent_id
return deps
def gettext_to_fluent(self, m):
lang_id = strip_whitespace(m['string'])
if lang_id not in self.dependencies:
return m.group()
args = ''
if m['args']:
args = ', ' + m['args']
return f"ftl('{self.dependencies[lang_id]}'{args})"
def trans_to_fluent(self, m):
lang_id = trans_to_lang(m['string'])
if lang_id not in self.dependencies:
return m.group()
args = ''
if m['args']:
args = ', ' + m['args']
return f"{{{{ ftl('{self.dependencies[lang_id]}'{args}) }}}}"
| mpl-2.0 | -6,121,289,217,726,026,000 | 33.132075 | 79 | 0.583195 | false |
pkilambi/ceilometer | ceilometer/alarm/rpc.py | 2 | 2461 | #
# Copyright 2013 eNovance <[email protected]>
#
# Authors: Mehdi Abaakouk <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_context import context
from oslo_log import log
import six
from ceilometer.alarm.storage import models
from ceilometer.i18n import _
from ceilometer import messaging
OPTS = [
cfg.StrOpt('notifier_rpc_topic',
default='alarm_notifier',
help='The topic that ceilometer uses for alarm notifier '
'messages.'),
]
cfg.CONF.register_opts(OPTS, group='alarm')
LOG = log.getLogger(__name__)
class RPCAlarmNotifier(object):
def __init__(self):
transport = messaging.get_transport()
self.client = messaging.get_rpc_client(
transport, topic=cfg.CONF.alarm.notifier_rpc_topic,
version="1.0")
def notify(self, alarm, previous, reason, reason_data):
actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state])
if not actions:
LOG.debug(_('alarm %(alarm_id)s has no action configured '
'for state transition from %(previous)s to '
'state %(state)s, skipping the notification.') %
{'alarm_id': alarm.alarm_id,
'previous': previous,
'state': alarm.state})
return
self.client.cast(context.get_admin_context(),
'notify_alarm', data={
'actions': actions,
'alarm_id': alarm.alarm_id,
'alarm_name': alarm.name,
'severity': alarm.severity,
'previous': previous,
'current': alarm.state,
'reason': six.text_type(reason),
'reason_data': reason_data})
| apache-2.0 | 1,028,771,583,186,684,700 | 36.861538 | 77 | 0.589191 | false |
ChengyuSong/xen-arm | tools/xm-test/tests/network/05_network_dom0_ping_pos.py | 42 | 1442 | #!/usr/bin/python
# Copyright (C) International Business Machines Corp., 2005
# Author: <[email protected]>
# Ping tests to dom0 interface
# - determines dom0 network
# - creates a single guest domain
# - sets up a single NIC on same subnet as dom0
# - conducts ping tests to the dom0 IP address.
# ping -c 1 -s $size $dom0_IP
# where $size = 1, 48, 64, 512, 1440, 1500, 1505,
# 4096, 4192, 32767, 65507, 65508
pingsizes = [ 1, 48, 64, 512, 1440, 1500, 1505, 4096, 4192,
32767, 65507 ]
from XmTestLib import *
rc = 0
# Test creates 1 domain, which requires 2 ips: 1 for the domains and 1 for
# aliases on dom0
if xmtest_netconf.canRunNetTest(2) == False:
SKIP("Don't have enough free configured IPs to run this test")
# Fire up a guest domain w/1 nic
domain = XmTestDomain()
domain.newDevice(XenNetDevice, "eth0")
try:
console = domain.start()
except DomainError, e:
if verbose:
print "Failed to create test domain because:"
print e.extra
FAIL(str(e))
try:
# Ping dom0
fails=""
netdev = domain.getDevice("eth0")
dom0ip = netdev.getDom0AliasIP()
for size in pingsizes:
out = console.runCmd("ping -q -c 1 -s " + str(size) + " " + dom0ip)
if out["return"]:
fails += " " + str(size)
except ConsoleError, e:
FAIL(str(e))
domain.stop()
if len(fails):
FAIL("Ping to dom0 failed for size" + fails + ".")
| gpl-2.0 | -9,186,185,786,361,203,000 | 25.703704 | 75 | 0.628294 | false |
landism/pants | src/python/pants/backend/jvm/tasks/scaladoc_gen.py | 11 | 2435 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.tasks.jvmdoc_gen import Jvmdoc, JvmdocGen
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.util.memo import memoized
class ScaladocGen(JvmdocGen):
"""Generate scaladoc html for Scala source targets."""
@classmethod
@memoized
def jvmdoc(cls):
return Jvmdoc(tool_name='scaladoc', product_type='scaladoc')
@classmethod
def subsystem_dependencies(cls):
return super(ScaladocGen, cls).subsystem_dependencies() + (DistributionLocator, ScalaPlatform.scoped(cls))
@classmethod
def prepare(cls, options, round_manager):
super(ScaladocGen, cls).prepare(options, round_manager)
ScalaPlatform.prepare_tools(round_manager)
def execute(self):
def is_scala(target):
return target.has_sources('.scala')
self.generate_doc(is_scala, self.create_scaladoc_command)
def create_scaladoc_command(self, classpath, gendir, *targets):
sources = []
for target in targets:
sources.extend(target.sources_relative_to_buildroot())
# TODO(Tejal Desai): pantsbuild/pants/65: Remove java_sources attribute for ScalaLibrary
# A '.scala' owning target may not have java_sources, eg: junit_tests
if hasattr(target, 'java_sources'):
for java_target in target.java_sources:
sources.extend(java_target.sources_relative_to_buildroot())
if not sources:
return None
scala_platform = ScalaPlatform.global_instance()
tool_classpath = scala_platform.compiler_classpath(self.context.products)
args = ['-usejavacp',
'-classpath', ':'.join(classpath),
'-d', gendir]
args.extend(self.args)
args.extend(sources)
java_executor = SubprocessExecutor(DistributionLocator.cached())
runner = java_executor.runner(jvm_options=self.jvm_options,
classpath=tool_classpath,
main='scala.tools.nsc.ScalaDoc',
args=args)
return runner.command
| apache-2.0 | 4,697,154,759,604,350,000 | 35.343284 | 110 | 0.694867 | false |
sgraham/nope | tools/json_schema_compiler/cpp_type_generator.py | 3 | 11369 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
from json_parse import OrderedDict
import schema_util
class _TypeDependency(object):
"""Contains information about a dependency a namespace has on a type: the
type's model, and whether that dependency is "hard" meaning that it cannot be
forward declared.
"""
def __init__(self, type_, hard=False):
self.type_ = type_
self.hard = hard
def GetSortKey(self):
return '%s.%s' % (self.type_.namespace.name, self.type_.name)
class CppTypeGenerator(object):
"""Manages the types of properties and provides utilities for getting the
C++ type out of a model.Property
"""
def __init__(self, model, schema_loader, default_namespace=None):
"""Creates a cpp_type_generator. The given root_namespace should be of the
format extensions::api::sub. The generator will generate code suitable for
use in the given model's namespace.
"""
self._default_namespace = default_namespace
if self._default_namespace is None:
self._default_namespace = model.namespaces.values()[0]
self._schema_loader = schema_loader
def GetEnumNoneValue(self, type_):
"""Gets the enum value in the given model.Property indicating no value has
been set.
"""
return '%s_NONE' % self.FollowRef(type_).unix_name.upper()
def GetEnumLastValue(self, type_):
"""Gets the enum value in the given model.Property indicating the last value
for the type.
"""
return '%s_LAST' % self.FollowRef(type_).unix_name.upper()
def GetEnumValue(self, type_, enum_value):
"""Gets the enum value of the given model.Property of the given type.
e.g VAR_STRING
"""
value = cpp_util.Classname(enum_value.name.upper())
prefix = (type_.cpp_enum_prefix_override or
self.FollowRef(type_).unix_name)
value = '%s_%s' % (prefix.upper(), value)
# To avoid collisions with built-in OS_* preprocessor definitions, we add a
# trailing slash to enum names that start with OS_.
if value.startswith("OS_"):
value += "_"
return value
def GetCppType(self, type_, is_ptr=False, is_in_container=False):
"""Translates a model.Property or model.Type into its C++ type.
If REF types from different namespaces are referenced, will resolve
using self._schema_loader.
Use |is_ptr| if the type is optional. This will wrap the type in a
scoped_ptr if possible (it is not possible to wrap an enum).
Use |is_in_container| if the type is appearing in a collection, e.g. a
std::vector or std::map. This will wrap it in the correct type with spacing.
"""
cpp_type = None
if type_.property_type == PropertyType.REF:
ref_type = self._FindType(type_.ref_type)
if ref_type is None:
raise KeyError('Cannot find referenced type: %s' % type_.ref_type)
cpp_type = self.GetCppType(ref_type)
elif type_.property_type == PropertyType.BOOLEAN:
cpp_type = 'bool'
elif type_.property_type == PropertyType.INTEGER:
cpp_type = 'int'
elif type_.property_type == PropertyType.INT64:
cpp_type = 'int64'
elif type_.property_type == PropertyType.DOUBLE:
cpp_type = 'double'
elif type_.property_type == PropertyType.STRING:
cpp_type = 'std::string'
elif type_.property_type in (PropertyType.ENUM,
PropertyType.OBJECT,
PropertyType.CHOICES):
if self._default_namespace is type_.namespace:
cpp_type = cpp_util.Classname(type_.name)
else:
cpp_namespace = cpp_util.GetCppNamespace(
type_.namespace.environment.namespace_pattern,
type_.namespace.unix_name)
cpp_type = '%s::%s' % (cpp_namespace,
cpp_util.Classname(type_.name))
elif type_.property_type == PropertyType.ANY:
cpp_type = 'base::Value'
elif type_.property_type == PropertyType.FUNCTION:
# Functions come into the json schema compiler as empty objects. We can
# record these as empty DictionaryValues so that we know if the function
# was passed in or not.
cpp_type = 'base::DictionaryValue'
elif type_.property_type == PropertyType.ARRAY:
item_cpp_type = self.GetCppType(type_.item_type, is_in_container=True)
cpp_type = 'std::vector<%s>' % cpp_util.PadForGenerics(item_cpp_type)
elif type_.property_type == PropertyType.BINARY:
cpp_type = 'std::vector<char>'
else:
raise NotImplementedError('Cannot get type of %s' % type_.property_type)
# HACK: optional ENUM is represented elsewhere with a _NONE value, so it
# never needs to be wrapped in pointer shenanigans.
# TODO(kalman): change this - but it's an exceedingly far-reaching change.
if not self.FollowRef(type_).property_type == PropertyType.ENUM:
if is_in_container and (is_ptr or not self.IsCopyable(type_)):
cpp_type = 'linked_ptr<%s>' % cpp_util.PadForGenerics(cpp_type)
elif is_ptr:
cpp_type = 'scoped_ptr<%s>' % cpp_util.PadForGenerics(cpp_type)
return cpp_type
def IsCopyable(self, type_):
return not (self.FollowRef(type_).property_type in (PropertyType.ANY,
PropertyType.ARRAY,
PropertyType.OBJECT,
PropertyType.CHOICES))
def GenerateForwardDeclarations(self):
"""Returns the forward declarations for self._default_namespace.
"""
c = Code()
for namespace, deps in self._NamespaceTypeDependencies().iteritems():
filtered_deps = [
dep for dep in deps
# Add more ways to forward declare things as necessary.
if (not dep.hard and
dep.type_.property_type in (PropertyType.CHOICES,
PropertyType.OBJECT))]
if not filtered_deps:
continue
cpp_namespace = cpp_util.GetCppNamespace(
namespace.environment.namespace_pattern,
namespace.unix_name)
c.Concat(cpp_util.OpenNamespace(cpp_namespace))
for dep in filtered_deps:
c.Append('struct %s;' % dep.type_.name)
c.Concat(cpp_util.CloseNamespace(cpp_namespace))
return c
def GenerateIncludes(self, include_soft=False):
"""Returns the #include lines for self._default_namespace.
"""
c = Code()
for namespace, dependencies in self._NamespaceTypeDependencies().items():
for dependency in dependencies:
if dependency.hard or include_soft:
c.Append('#include "%s/%s.h"' % (namespace.source_file_dir,
namespace.unix_name))
return c
def _FindType(self, full_name):
"""Finds the model.Type with name |qualified_name|. If it's not from
|self._default_namespace| then it needs to be qualified.
"""
namespace = self._schema_loader.ResolveType(full_name,
self._default_namespace)
if namespace is None:
raise KeyError('Cannot resolve type %s. Maybe it needs a prefix '
'if it comes from another namespace?' % full_name)
return namespace.types[schema_util.StripNamespace(full_name)]
def FollowRef(self, type_):
"""Follows $ref link of types to resolve the concrete type a ref refers to.
If the property passed in is not of type PropertyType.REF, it will be
returned unchanged.
"""
if type_.property_type != PropertyType.REF:
return type_
return self.FollowRef(self._FindType(type_.ref_type))
def _NamespaceTypeDependencies(self):
"""Returns a dict ordered by namespace name containing a mapping of
model.Namespace to every _TypeDependency for |self._default_namespace|,
sorted by the type's name.
"""
dependencies = set()
for function in self._default_namespace.functions.values():
for param in function.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
if function.callback:
for param in function.callback.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
for type_ in self._default_namespace.types.values():
for prop in type_.properties.values():
dependencies |= self._TypeDependencies(prop.type_,
hard=not prop.optional)
for event in self._default_namespace.events.values():
for param in event.params:
dependencies |= self._TypeDependencies(param.type_,
hard=not param.optional)
# Make sure that the dependencies are returned in alphabetical order.
dependency_namespaces = OrderedDict()
for dependency in sorted(dependencies, key=_TypeDependency.GetSortKey):
namespace = dependency.type_.namespace
if namespace is self._default_namespace:
continue
if namespace not in dependency_namespaces:
dependency_namespaces[namespace] = []
dependency_namespaces[namespace].append(dependency)
return dependency_namespaces
def _TypeDependencies(self, type_, hard=False):
"""Gets all the type dependencies of a property.
"""
deps = set()
if type_.property_type == PropertyType.REF:
deps.add(_TypeDependency(self._FindType(type_.ref_type), hard=hard))
elif type_.property_type == PropertyType.ARRAY:
# Non-copyable types are not hard because they are wrapped in linked_ptrs
# when generated. Otherwise they're typedefs, so they're hard (though we
# could generate those typedefs in every dependent namespace, but that
# seems weird).
deps = self._TypeDependencies(type_.item_type,
hard=self.IsCopyable(type_.item_type))
elif type_.property_type == PropertyType.CHOICES:
for type_ in type_.choices:
deps |= self._TypeDependencies(type_, hard=self.IsCopyable(type_))
elif type_.property_type == PropertyType.OBJECT:
for p in type_.properties.values():
deps |= self._TypeDependencies(p.type_, hard=not p.optional)
return deps
def GeneratePropertyValues(self, prop, line, nodoc=False):
"""Generates the Code to display all value-containing properties.
"""
c = Code()
if not nodoc:
c.Comment(prop.description)
if prop.value is not None:
c.Append(line % {
"type": self.GetCppType(prop.type_),
"name": prop.name,
"value": prop.value
})
else:
has_child_code = False
c.Sblock('namespace %s {' % prop.name)
for child_property in prop.type_.properties.values():
child_code = self.GeneratePropertyValues(child_property,
line,
nodoc=nodoc)
if child_code:
has_child_code = True
c.Concat(child_code)
c.Eblock('} // namespace %s' % prop.name)
if not has_child_code:
c = None
return c
| bsd-3-clause | -4,473,572,651,163,809,300 | 40.644689 | 80 | 0.632597 | false |
DeercoderCourse/cs231n | assignment2/cs231n/classifiers/convnet.py | 5 | 4193 | import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
def two_layer_convnet(X, model, y=None, reg=0.0):
"""
Compute the loss and gradient for a simple two-layer ConvNet. The architecture
is conv-relu-pool-affine-softmax, where the conv layer uses stride-1 "same"
convolutions to preserve the input size; the pool layer uses non-overlapping
2x2 pooling regions. We use L2 regularization on both the convolutional layer
weights and the affine layer weights.
Inputs:
- X: Input data, of shape (N, C, H, W)
- model: Dictionary mapping parameter names to parameters. A two-layer Convnet
expects the model to have the following parameters:
- W1, b1: Weights and biases for the convolutional layer
- W2, b2: Weights and biases for the affine layer
- y: Vector of labels of shape (N,). y[i] gives the label for the point X[i].
- reg: Regularization strength.
Returns:
If y is None, then returns:
- scores: Matrix of scores, where scores[i, c] is the classification score for
the ith input and class c.
If y is not None, then returns a tuple of:
- loss: Scalar value giving the loss.
- grads: Dictionary with the same keys as model, mapping parameter names to
their gradients.
"""
# Unpack weights
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
N, C, H, W = X.shape
# We assume that the convolution is "same", so that the data has the same
# height and width after performing the convolution. We can then use the
# size of the filter to figure out the padding.
conv_filter_height, conv_filter_width = W1.shape[2:]
assert conv_filter_height == conv_filter_width, 'Conv filter must be square'
assert conv_filter_height % 2 == 1, 'Conv filter height must be odd'
assert conv_filter_width % 2 == 1, 'Conv filter width must be odd'
conv_param = {'stride': 1, 'pad': (conv_filter_height - 1) / 2}
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
# Compute the forward pass
a1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)
scores, cache2 = affine_forward(a1, W2, b2)
if y is None:
return scores
# Compute the backward pass
data_loss, dscores = softmax_loss(scores, y)
# Compute the gradients using a backward pass
da1, dW2, db2 = affine_backward(dscores, cache2)
dX, dW1, db1 = conv_relu_pool_backward(da1, cache1)
# Add regularization
dW1 += reg * W1
dW2 += reg * W2
reg_loss = 0.5 * reg * sum(np.sum(W * W) for W in [W1, W2])
loss = data_loss + reg_loss
grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2}
return loss, grads
def init_two_layer_convnet(weight_scale=1e-3, bias_scale=0, input_shape=(3, 32, 32),
num_classes=10, num_filters=32, filter_size=5):
"""
Initialize the weights for a two-layer ConvNet.
Inputs:
- weight_scale: Scale at which weights are initialized. Default 1e-3.
- bias_scale: Scale at which biases are initialized. Default is 0.
- input_shape: Tuple giving the input shape to the network; default is
(3, 32, 32) for CIFAR-10.
- num_classes: The number of classes for this network. Default is 10
(for CIFAR-10)
- num_filters: The number of filters to use in the convolutional layer.
- filter_size: The width and height for convolutional filters. We assume that
all convolutions are "same", so we pick padding to ensure that data has the
same height and width after convolution. This means that the filter size
must be odd.
Returns:
A dictionary mapping parameter names to numpy arrays containing:
- W1, b1: Weights and biases for the convolutional layer
- W2, b2: Weights and biases for the fully-connected layer.
"""
C, H, W = input_shape
assert filter_size % 2 == 1, 'Filter size must be odd; got %d' % filter_size
model = {}
model['W1'] = weight_scale * np.random.randn(num_filters, C, filter_size, filter_size)
model['b1'] = bias_scale * np.random.randn(num_filters)
model['W2'] = weight_scale * np.random.randn(num_filters * H * W / 4, num_classes)
model['b2'] = bias_scale * np.random.randn(num_classes)
return model
pass
| apache-2.0 | 8,090,188,160,403,811,000 | 37.46789 | 88 | 0.686859 | false |
charlescearl/VirtualMesos | third_party/boto-2.0b2/boto/fps/connection.py | 4 | 7853 | # Copyright (c) 2008 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import base64
import urllib
import xml.sax
import uuid
import boto
import boto.utils
from boto import handler
from boto.connection import AWSQueryConnection
from boto.resultset import ResultSet
from boto.exception import FPSResponseError
class FPSConnection(AWSQueryConnection):
APIVersion = '2007-01-08'
SignatureVersion = '1'
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None,
host='fps.sandbox.amazonaws.com', debug=0,
https_connection_factory=None, path="/"):
AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, host, debug,
https_connection_factory, path)
def install_payment_instruction(self, instruction, token_type="Unrestricted", transaction_id=None):
"""
InstallPaymentInstruction
instruction: The PaymentInstruction to send, for example:
MyRole=='Caller' orSay 'Roles do not match';
token_type: Defaults to "Unrestricted"
transaction_id: Defaults to a new ID
"""
if(transaction_id == None):
transaction_id = uuid.uuid4()
params = {}
params['PaymentInstruction'] = instruction
params['TokenType'] = token_type
params['CallerReference'] = transaction_id
response = self.make_request("InstallPaymentInstruction", params)
return response
def install_caller_instruction(self, token_type="Unrestricted", transaction_id=None):
"""
Set us up as a caller
This will install a new caller_token into the FPS section.
This should really only be called to regenerate the caller token.
"""
response = self.install_payment_instruction("MyRole=='Caller';", token_type=token_type, transaction_id=transaction_id)
body = response.read()
if(response.status == 200):
rs = ResultSet()
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
caller_token = rs.TokenId
try:
boto.config.save_system_option("FPS", "caller_token", caller_token)
except(IOError):
boto.config.save_user_option("FPS", "caller_token", caller_token)
return caller_token
else:
raise FPSResponseError(response.status, response.reason, body)
def install_recipient_instruction(self, token_type="Unrestricted", transaction_id=None):
"""
Set us up as a Recipient
This will install a new caller_token into the FPS section.
This should really only be called to regenerate the recipient token.
"""
response = self.install_payment_instruction("MyRole=='Recipient';", token_type=token_type, transaction_id=transaction_id)
body = response.read()
if(response.status == 200):
rs = ResultSet()
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
recipient_token = rs.TokenId
try:
boto.config.save_system_option("FPS", "recipient_token", recipient_token)
except(IOError):
boto.config.save_user_option("FPS", "recipient_token", recipient_token)
return recipient_token
else:
raise FPSResponseError(response.status, response.reason, body)
def make_url(self, returnURL, paymentReason, pipelineName, **params):
"""
Generate the URL with the signature required for a transaction
"""
params['callerKey'] = str(self.aws_access_key_id)
params['returnURL'] = str(returnURL)
params['paymentReason'] = str(paymentReason)
params['pipelineName'] = pipelineName
if(not params.has_key('callerReference')):
params['callerReference'] = str(uuid.uuid4())
deco = [(key.lower(),i,key) for i,key in enumerate(params.keys())]
deco.sort()
keys = [key for _,_,key in deco]
url = ''
canonical = ''
for k in keys:
url += "&%s=%s" % (k, urllib.quote_plus(str(params[k])))
canonical += "%s%s" % (k, str(params[k]))
url = "/cobranded-ui/actions/start?%s" % ( url[1:])
hmac = self.hmac.copy()
hmac.update(canonical)
signature = urllib.quote_plus(base64.encodestring(hmac.digest()).strip())
return "https://authorize.payments-sandbox.amazon.com%s&awsSignature=%s" % (url, signature)
def make_payment(self, amount, sender_token, charge_fee_to="Recipient", reference=None, senderReference=None, recipientReference=None, senderDescription=None, recipientDescription=None, callerDescription=None, metadata=None, transactionDate=None):
"""
Make a payment transaction
You must specify the amount and the sender token.
"""
params = {}
params['RecipientTokenId'] = boto.config.get("FPS", "recipient_token")
params['CallerTokenId'] = boto.config.get("FPS", "caller_token")
params['SenderTokenId'] = sender_token
params['TransactionAmount.Amount'] = str(amount)
params['TransactionAmount.CurrencyCode'] = "USD"
params['ChargeFeeTo'] = charge_fee_to
if(transactionDate != None):
params['TransactionDate'] = transactionDate
if(senderReference != None):
params['SenderReference'] = senderReference
if(recipientReference != None):
params['RecipientReference'] = recipientReference
if(senderDescription != None):
params['SenderDescription'] = senderDescription
if(recipientDescription != None):
params['RecipientDescription'] = recipientDescription
if(callerDescription != None):
params['CallerDescription'] = callerDescription
if(metadata != None):
params['MetaData'] = metadata
if(transactionDate != None):
params['TransactionDate'] = transactionDate
if(reference == None):
reference = uuid.uuid4()
params['CallerReference'] = reference
response = self.make_request("Pay", params)
body = response.read()
if(response.status == 200):
rs = ResultSet()
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
else:
raise FPSResponseError(response.status, response.reason, body)
| apache-2.0 | 2,692,012,762,430,975,000 | 42.38674 | 251 | 0.631224 | false |
crdoconnor/olympia | apps/users/views.py | 12 | 26509 | import functools
from datetime import datetime
from functools import partial
from django import http
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from django.db import IntegrityError
from django.db.models import Q, Sum
from django.shortcuts import (get_list_or_404, get_object_or_404, redirect,
render)
from django.template import Context, loader
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.views.decorators.cache import never_cache
import commonware.log
import waffle
from mobility.decorators import mobile_template
from session_csrf import anonymous_csrf, anonymous_csrf_exempt
from tower import ugettext as _
import amo
import users.notifications as notifications
from abuse.models import send_abuse_report
from access import acl
from access.middleware import ACLMiddleware
from addons.decorators import addon_view_factory
from addons.models import Addon, AddonUser, Category
from amo import messages
from amo.decorators import (json_view, login_required, permission_required,
post_required, write)
from amo.forms import AbuseForm
from amo.urlresolvers import get_url_prefix, reverse
from amo.utils import escape_all, log_cef, send_mail
from bandwagon.models import Collection
from browse.views import PersonasFilter
from translations.query import order_by_translation
from users.models import UserNotification
import tasks
from . import forms
from .models import UserProfile
from .signals import logged_out
from .utils import EmailResetCode, UnsubscribeCode
log = commonware.log.getLogger('z.users')
addon_view = addon_view_factory(qs=Addon.objects.valid)
THEMES_LIMIT = 20
def user_view(f):
@functools.wraps(f)
def wrapper(request, user_id, *args, **kw):
"""Provides a user object given a user ID or username."""
if user_id.isdigit():
key = 'id'
else:
key = 'username'
# If the username is `me` then show the current user's profile.
if (user_id == 'me' and request.amo_user and
request.amo_user.username):
user_id = request.amo_user.username
user = get_object_or_404(UserProfile, **{key: user_id})
return f(request, user, *args, **kw)
return wrapper
@login_required(redirect=False)
@json_view
def ajax(request):
"""Query for a user matching a given email."""
if 'q' not in request.GET:
raise http.Http404()
data = {'status': 0, 'message': ''}
email = request.GET.get('q', '').strip()
if not email:
data.update(message=_('An email address is required.'))
return data
user = UserProfile.objects.filter(email=email)
msg = _('A user with that email address does not exist.')
if user:
data.update(status=1, id=user[0].id, name=user[0].name)
else:
data['message'] = msg
return escape_all(data)
@user_view
def confirm(request, user, token):
if not user.confirmationcode:
return redirect('users.login')
if user.confirmationcode != token:
log.info(u"Account confirmation failed for user (%s)", user)
messages.error(request, _('Invalid confirmation code!'))
return redirect('users.login')
user.confirmationcode = ''
user.save()
messages.success(request, _('Successfully verified!'))
log.info(u"Account confirmed for user (%s)", user)
return redirect('users.login')
@user_view
def confirm_resend(request, user):
if not user.confirmationcode:
return redirect('users.login')
# Potential for flood here if someone requests a confirmationcode and then
# re-requests confirmations. We may need to track requests in the future.
log.info(u"Account confirm re-requested for user (%s)", user)
user.email_confirmation_code()
msg = _(u'An email has been sent to your address {0} to confirm '
u'your account. Before you can log in, you have to activate '
u'your account by clicking on the link provided in this '
u'email.').format(user.email)
messages.info(request, _('Confirmation Email Sent'), msg)
return redirect('users.login')
@login_required
def delete(request):
amouser = request.amo_user
if request.method == 'POST':
form = forms.UserDeleteForm(request.POST, request=request)
if form.is_valid():
messages.success(request, _('Profile Deleted'))
amouser.anonymize()
logout(request)
form = None
return http.HttpResponseRedirect(reverse('users.login'))
else:
form = forms.UserDeleteForm()
return render(request, 'users/delete.html',
{'form': form, 'amouser': amouser})
@login_required
def delete_photo(request):
u = request.amo_user
if request.method == 'POST':
u.picture_type = ''
u.save()
log.debug(u"User (%s) deleted photo" % u)
tasks.delete_photo.delay(u.picture_path)
messages.success(request, _('Photo Deleted'))
return http.HttpResponseRedirect(reverse('users.edit') +
'#user-profile')
return render(request, 'users/delete_photo.html', dict(user=u))
@write
@login_required
def edit(request):
# Don't use request.amo_user since it has too much caching.
amouser = UserProfile.objects.get(pk=request.user.id)
if request.method == 'POST':
# ModelForm alters the instance you pass in. We need to keep a copy
# around in case we need to use it below (to email the user)
original_email = amouser.email
form = forms.UserEditForm(request.POST, request.FILES, request=request,
instance=amouser)
if form.is_valid():
messages.success(request, _('Profile Updated'))
if amouser.email != original_email:
l = {'user': amouser,
'mail1': original_email,
'mail2': amouser.email}
log.info(u"User (%(user)s) has requested email change from "
u"(%(mail1)s) to (%(mail2)s)" % l)
messages.info(
request, _('Email Confirmation Sent'),
_(u'An email has been sent to {0} to confirm your new '
u'email address. For the change to take effect, you '
u'need to click on the link provided in this email. '
u'Until then, you can keep logging in with your '
u'current email address.').format(amouser.email))
token, hash_ = EmailResetCode.create(amouser.id, amouser.email)
url = '%s%s' % (settings.SITE_URL,
reverse('users.emailchange',
args=[amouser.id, token, hash_]))
t = loader.get_template('users/email/emailchange.ltxt')
c = {'domain': settings.DOMAIN, 'url': url}
send_mail(
_('Please confirm your email address '
'change at %s' % settings.DOMAIN),
t.render(Context(c)), None, [amouser.email],
use_blacklist=False, real_email=True)
# Reset the original email back. We aren't changing their
# address until they confirm the new one
amouser.email = original_email
form.save()
return redirect('users.edit')
else:
messages.error(
request,
_('Errors Found'),
_('There were errors in the changes you made. Please correct '
'them and resubmit.'))
else:
form = forms.UserEditForm(instance=amouser, request=request)
return render(request, 'users/edit.html',
{'form': form, 'amouser': amouser})
def tshirt_eligible(user):
MIN_PERSONA_ADU = 10000
return (
user.t_shirt_requested or
AddonUser.objects.filter(
user=user,
role__in=(amo.AUTHOR_ROLE_OWNER, amo.AUTHOR_ROLE_DEV),
addon__type=amo.ADDON_EXTENSION,
addon__disabled_by_user=False)
.filter(
Q(addon__is_listed=True,
addon___current_version__files__status__in=amo.REVIEWED_STATUSES,
addon__status__in=amo.REVIEWED_STATUSES) |
Q(addon__is_listed=False,
addon__versions__files__is_signed=True))
.exists() or
Addon.objects.filter(
authors=user,
type=amo.ADDON_PERSONA,
status=amo.STATUS_PUBLIC,
disabled_by_user=False)
.aggregate(users=Sum('average_daily_users'))['users'] >=
MIN_PERSONA_ADU)
@write
@login_required
def t_shirt(request):
if not waffle.switch_is_active('t-shirt-orders'):
raise http.Http404()
user = request.user
eligible = tshirt_eligible(user)
if request.method == 'POST':
if not eligible:
messages.error(request,
_("We're sorry, but you are not eligible to "
"request a t-shirt at this time."))
return redirect('users.t-shirt')
if not user.t_shirt_requested:
user.update(t_shirt_requested=datetime.now())
return render(request, 'users/t-shirt.html',
{'eligible': eligible, 'user': user})
@write
@login_required
@permission_required('Users', 'Edit')
@user_view
def admin_edit(request, user):
if request.method == 'POST':
form = forms.AdminUserEditForm(request.POST, request.FILES,
request=request, instance=user)
if form.is_valid():
form.save()
messages.success(request, _('Profile Updated'))
return http.HttpResponseRedirect(reverse('zadmin.index'))
else:
form = forms.AdminUserEditForm(instance=user, request=request)
return render(request, 'users/edit.html', {'form': form, 'amouser': user})
@user_view
def emailchange(request, user, token, hash):
try:
_uid, newemail = EmailResetCode.parse(token, hash)
except ValueError:
return http.HttpResponse(status=400)
if _uid != user.id:
# I'm calling this a warning because invalid hashes up to this point
# could be any number of things, but this is a targeted attack from
# one user account to another
log.warning((u"[Tampering] Valid email reset code for UID (%s) "
u"attempted to change email address for user (%s)") %
(_uid, user))
return http.HttpResponse(status=400)
if UserProfile.objects.filter(email=newemail).exists():
log.warning((u"[Tampering] User (%s) tries to change his email to "
u"an existing account with the same email address (%s)") %
(user, newemail))
return http.HttpResponse(status=400)
user.email = newemail
user.save()
l = {'user': user, 'newemail': newemail}
log.info(u"User (%(user)s) confirmed new email address (%(newemail)s)" % l)
messages.success(
request, _('Your email address was changed successfully'),
_(u'From now on, please use {0} to log in.').format(newemail))
return http.HttpResponseRedirect(reverse('users.edit'))
def _clean_next_url(request):
gets = request.GET.copy()
url = gets.get('to', settings.LOGIN_REDIRECT_URL)
if not is_safe_url(url, host=request.get_host()):
log.info(u'Unsafe redirect to %s' % url)
url = settings.LOGIN_REDIRECT_URL
domain = gets.get('domain', None)
if domain in settings.VALID_LOGIN_REDIRECTS.keys():
url = settings.VALID_LOGIN_REDIRECTS[domain] + url
gets['to'] = url
request.GET = gets
return request
@anonymous_csrf
@mobile_template('users/{mobile/}login_modal.html')
def login_modal(request, template=None):
return _login(request, template=template)
@anonymous_csrf
@mobile_template('users/{mobile/}login.html')
def login(request, template=None):
return _login(request, template=template)
def _login(request, template=None, data=None, dont_redirect=False):
data = data or {}
# In case we need it later. See below.
get_copy = request.GET.copy()
if 'to' in request.GET:
request = _clean_next_url(request)
if request.user.is_authenticated():
return http.HttpResponseRedirect(
request.GET.get('to', settings.LOGIN_REDIRECT_URL))
limited = getattr(request, 'limited', 'recaptcha_shown' in request.POST)
user = None
login_status = None
if 'username' in request.POST:
try:
# We are doing all this before we try and validate the form.
user = UserProfile.objects.get(email=request.POST['username'])
limited = ((user.failed_login_attempts >=
settings.LOGIN_RATELIMIT_USER) or limited)
login_status = False
except UserProfile.DoesNotExist:
log_cef('Authentication Failure', 5, request,
username=request.POST['username'],
signature='AUTHFAIL',
msg='The username was invalid')
pass
partial_form = partial(forms.AuthenticationForm, use_recaptcha=limited)
r = auth.views.login(request, template_name=template,
redirect_field_name='to',
authentication_form=partial_form,
extra_context=data)
if isinstance(r, http.HttpResponseRedirect):
# Django's auth.views.login has security checks to prevent someone from
# redirecting to another domain. Since we want to allow this in
# certain cases, we have to make a new response object here to replace
# the above.
if 'domain' in request.GET:
request.GET = get_copy
request = _clean_next_url(request)
r = http.HttpResponseRedirect(request.GET['to'])
# Succsesful log in according to django. Now we do our checks. I do
# the checks here instead of the form's clean() because I want to use
# the messages framework and it's not available in the request there.
if user.deleted:
logout(request)
log.warning(u'Attempt to log in with deleted account (%s)' % user)
messages.error(request, _('Wrong email address or password!'))
data.update({'form': partial_form()})
user.log_login_attempt(False)
log_cef('Authentication Failure', 5, request,
username=request.user,
signature='AUTHFAIL',
msg='Account is deactivated')
return render(request, template, data)
if user.confirmationcode:
logout(request)
log.info(u'Attempt to log in with unconfirmed account (%s)' % user)
msg1 = _(u'A link to activate your user account was sent by email '
u'to your address {0}. You have to click it before you '
u'can log in.').format(user.email)
url = "%s%s" % (settings.SITE_URL,
reverse('users.confirm.resend', args=[user.id]))
msg2 = _('If you did not receive the confirmation email, make '
'sure your email service did not mark it as "junk '
'mail" or "spam". If you need to, you can have us '
'<a href="%s">resend the confirmation message</a> '
'to your email address mentioned above.') % url
messages.error(request, _('Activation Email Sent'), msg1)
messages.info(request, _('Having Trouble?'), msg2,
title_safe=True, message_safe=True)
data.update({'form': partial_form()})
user.log_login_attempt(False)
return render(request, template, data)
rememberme = request.POST.get('rememberme', None)
if rememberme:
request.session.set_expiry(settings.SESSION_COOKIE_AGE)
log.debug(
u'User (%s) logged in successfully with "remember me" set' %
user)
login_status = True
if dont_redirect:
# We're recalling the middleware to re-initialize amo_user
ACLMiddleware().process_request(request)
r = render(request, template, data)
if login_status is not None:
user.log_login_attempt(login_status)
log_cef('Authentication Failure', 5, request,
username=request.POST['username'],
signature='AUTHFAIL',
msg='The password was incorrect')
return r
def logout(request):
user = request.user
if not user.is_anonymous():
log.debug(u"User (%s) logged out" % user)
auth.logout(request)
if 'to' in request.GET:
request = _clean_next_url(request)
next = request.GET.get('to')
if not next:
next = settings.LOGOUT_REDIRECT_URL
prefixer = get_url_prefix()
if prefixer:
next = prefixer.fix(next)
response = http.HttpResponseRedirect(next)
# Fire logged out signal.
logged_out.send(None, request=request, response=response)
return response
@user_view
def profile(request, user):
# Get user's own and favorite collections, if they allowed that.
own_coll = fav_coll = []
if user.display_collections:
own_coll = (Collection.objects.listed().filter(author=user)
.order_by('-created'))[:10]
if user.display_collections_fav:
fav_coll = (Collection.objects.listed()
.filter(following__user=user)
.order_by('-following__created'))[:10]
edit_any_user = acl.action_allowed(request, 'Users', 'Edit')
own_profile = (request.user.is_authenticated() and
request.amo_user.id == user.id)
addons = []
personas = []
limited_personas = False
if user.is_developer:
addons = user.addons.reviewed().filter(
addonuser__user=user, addonuser__listed=True)
personas = addons.filter(type=amo.ADDON_PERSONA).order_by(
'-persona__popularity')
if personas.count() > THEMES_LIMIT:
limited_personas = True
personas = personas[:THEMES_LIMIT]
addons = addons.exclude(type=amo.ADDON_PERSONA).order_by(
'-weekly_downloads')
addons = amo.utils.paginate(request, addons, 5)
reviews = amo.utils.paginate(request, user.reviews.all())
data = {'profile': user, 'own_coll': own_coll, 'reviews': reviews,
'fav_coll': fav_coll, 'edit_any_user': edit_any_user,
'addons': addons, 'own_profile': own_profile,
'personas': personas, 'limited_personas': limited_personas,
'THEMES_LIMIT': THEMES_LIMIT}
if not own_profile:
data['abuse_form'] = AbuseForm(request=request)
return render(request, 'users/profile.html', data)
@user_view
def themes(request, user, category=None):
cats = Category.objects.filter(type=amo.ADDON_PERSONA)
ctx = {
'profile': user,
'categories': order_by_translation(cats, 'name'),
'search_cat': 'themes'
}
if user.is_artist:
base = user.addons.reviewed().filter(
type=amo.ADDON_PERSONA,
addonuser__user=user, addonuser__listed=True)
if category:
qs = cats.filter(slug=category)
ctx['category'] = cat = get_list_or_404(qs)[0]
base = base.filter(categories__id=cat.id)
else:
base = Addon.objects.none()
filter_ = PersonasFilter(request, base, key='sort',
default='popular')
addons = amo.utils.paginate(request, filter_.qs, 30,
count=base.count())
ctx.update({
'addons': addons,
'filter': filter_,
'sorting': filter_.field,
'sort_opts': filter_.opts
})
return render(request, 'browse/personas/grid.html', ctx)
@anonymous_csrf
def register(request):
if request.user.is_authenticated():
messages.info(request, _('You are already logged in to an account.'))
form = None
elif request.method == 'POST':
form = forms.UserRegisterForm(request.POST)
mkt_user = UserProfile.objects.filter(email=form.data['email'],
password='')
if form.is_valid():
try:
u = form.save(commit=False)
u.set_password(form.cleaned_data['password'])
u.generate_confirmationcode()
u.lang = request.LANG
u.save()
log.info(u'Registered new account for user (%s)', u)
log_cef('New Account', 5, request, username=u.username,
signature='AUTHNOTICE',
msg='User created a new account')
u.email_confirmation_code()
msg = _('Congratulations! Your user account was '
'successfully created.')
messages.success(request, msg)
msg = _(u'An email has been sent to your address {0} to '
'confirm your account. Before you can log in, you '
'have to activate your account by clicking on the '
'link provided in this email.').format(u.email)
messages.info(request, _('Confirmation Email Sent'), msg)
except IntegrityError, e:
# I was unable to reproduce this, but I suspect it happens
# when they POST twice quickly and the slaves don't have the
# new info yet (total guess). Anyway, I'm assuming the
# first one worked properly, so this is still a success
# case to the end user so we just log it...
log.error('Failed to register new user (%s): %s' % (u, e))
return http.HttpResponseRedirect(reverse('users.login'))
elif mkt_user.exists():
f = PasswordResetForm()
f.users_cache = [mkt_user[0]]
f.save(use_https=request.is_secure(),
email_template_name='users/email/pwreset.ltxt',
request=request)
return render(request, 'users/newpw_sent.html', {})
else:
messages.error(request, _('There are errors in this form'),
_('Please correct them and resubmit.'))
else:
form = forms.UserRegisterForm()
reg_action = reverse('users.register')
return render(request, 'users/register.html',
{'form': form, 'register_action': reg_action})
@anonymous_csrf_exempt
@user_view
def report_abuse(request, user):
form = AbuseForm(request.POST or None, request=request)
if request.method == 'POST' and form.is_valid():
send_abuse_report(request, user, form.cleaned_data['text'])
messages.success(request, _('User reported.'))
else:
return render(request, 'users/report_abuse_full.html',
{'profile': user, 'abuse_form': form})
return redirect(user.get_url_path())
@post_required
@user_view
def remove_locale(request, user):
"""Remove a locale from the user's translations."""
POST = request.POST
if 'locale' in POST and POST['locale'] != settings.LANGUAGE_CODE:
user.remove_locale(POST['locale'])
return http.HttpResponse()
return http.HttpResponseBadRequest()
@never_cache
@anonymous_csrf
def password_reset_confirm(request, uidb64=None, token=None):
"""
Pulled from django contrib so that we can add user into the form
so then we can show relevant messages about the user.
"""
assert uidb64 is not None and token is not None
user = None
try:
uid_int = urlsafe_base64_decode(uidb64)
user = UserProfile.objects.get(id=uid_int)
except (ValueError, UserProfile.DoesNotExist):
pass
if user is not None and default_token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = forms.SetPasswordForm(user, request.POST)
if form.is_valid():
form.save()
log_cef('Password Changed', 5, request,
username=user.username,
signature='PASSWORDCHANGED',
msg='User changed password')
return redirect(reverse('django.contrib.auth.'
'views.password_reset_complete'))
else:
form = forms.SetPasswordForm(user)
else:
validlink = False
form = None
return render(request, 'users/pwreset_confirm.html',
{'form': form, 'validlink': validlink})
@never_cache
def unsubscribe(request, hash=None, token=None, perm_setting=None):
"""
Pulled from django contrib so that we can add user into the form
so then we can show relevant messages about the user.
"""
assert hash is not None and token is not None
user = None
try:
email = UnsubscribeCode.parse(token, hash)
user = UserProfile.objects.get(email=email)
except (ValueError, UserProfile.DoesNotExist):
pass
perm_settings = []
if user is not None:
unsubscribed = True
if not perm_setting:
# TODO: make this work. nothing currently links to it, though.
perm_settings = [l for l in notifications.NOTIFICATIONS
if not l.mandatory]
else:
perm_setting = notifications.NOTIFICATIONS_BY_SHORT[perm_setting]
UserNotification.update_or_create(
update={'enabled': False},
user=user, notification_id=perm_setting.id)
perm_settings = [perm_setting]
else:
unsubscribed = False
email = ''
return render(request, 'users/unsubscribe.html',
{'unsubscribed': unsubscribed, 'email': email,
'perm_settings': perm_settings})
| bsd-3-clause | 8,681,261,902,699,199,000 | 35.313699 | 79 | 0.595911 | false |
Alecto3-D/testable-greeter | bb-master/sandbox/lib/python3.5/site-packages/buildbot/db/migrate/versions/049_add_schedulers_enabled.py | 11 | 1161 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import sqlalchemy as sa
from buildbot.util import sautils
def upgrade(migrate_engine):
metadata = sa.MetaData()
metadata.bind = migrate_engine
schedulers_table = sautils.Table('schedulers', metadata, autoload=True)
enabled = sa.Column('enabled', sa.SmallInteger,
nullable=False, server_default="1")
enabled.create(schedulers_table)
| mit | -821,195,806,334,519,400 | 37.7 | 79 | 0.74677 | false |
peterbe/django-semanticui-form | semanticuiform/templatetags/semanticui.py | 1 | 3411 | from django import forms
from django.template import Context
from django.template.loader import get_template
from django import template
from semanticuiform import config
register = template.Library()
@register.filter
def semanticui(element):
markup_classes = {'label': '', 'value': '', 'single_value': ''}
return render(element, markup_classes, False)
@register.filter
def semanticui_inline(element, label_cols=''):
markup_classes = {'label': label_cols, 'value': '', 'single_value': ''}
# for cl in label_cols.split(' '):
# splitted_class = cl.split('-')
#
# try:
# value_nb_cols = int(splitted_class[-1])
# except ValueError:
# value_nb_cols = config.BOOTSTRAP_COLUMN_COUNT
#
# if value_nb_cols >= config.BOOTSTRAP_COLUMN_COUNT:
# splitted_class[-1] = config.BOOTSTRAP_COLUMN_COUNT
# else:
# offset_class = cl.split('-')
# offset_class[-1] = 'offset-' + str(value_nb_cols)
# splitted_class[-1] = str(config.BOOTSTRAP_COLUMN_COUNT - value_nb_cols)
# markup_classes['single_value'] += ' ' + '-'.join(offset_class)
# markup_classes['single_value'] += ' ' + '-'.join(splitted_class)
#
# markup_classes['value'] += ' ' + '-'.join(splitted_class)
return render(element, markup_classes, True)
# @register.filter
# def add_input_classes(field):
# if not is_checkbox(field) and not is_multiple_checkbox(field) \
# and not is_radio(field) and not is_file(field):
# field_classes = field.field.widget.attrs.get('class', '')
# field_classes += ' form-control'
# field.field.widget.attrs['class'] = field_classes
def render(element, markup_classes, is_inline):
element_type = element.__class__.__name__.lower()
if element_type == 'boundfield':
# add_input_classes(element)
template = get_template("semanticui/field.html")
context = Context({
'field': element,
'classes': markup_classes,
'form': element.form,
'is_inline': is_inline,
})
else:
has_management = getattr(element, 'management_form', None)
if has_management:
# for form in element.forms:
# for field in form.visible_fields():
# add_input_classes(field)
template = get_template("semanticui/formset.html")
context = Context({
'formset': element,
'classes': markup_classes,
'is_inline': is_inline,
})
else:
# for field in element.visible_fields():
# add_input_classes(field)
template = get_template("semanticui/form.html")
context = Context({
'form': element,
'classes': markup_classes,
'is_inline': is_inline,
})
return template.render(context)
@register.filter
def is_checkbox(field):
return isinstance(field.field.widget, forms.CheckboxInput)
@register.filter
def is_multiple_checkbox(field):
return isinstance(field.field.widget, forms.CheckboxSelectMultiple)
@register.filter
def is_radio(field):
return isinstance(field.field.widget, forms.RadioSelect)
@register.filter
def is_file(field):
return isinstance(field.field.widget, forms.FileInput)
| bsd-3-clause | 5,753,463,180,729,247,000 | 31.485714 | 85 | 0.59484 | false |
Artanis/pygcui | pygcui/widgets.py | 1 | 13022 | """PygCUI widgets
Copyright (c) 2012, Erik Youngren
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Erik Youngren.
"""
import logging
import collections
from itertools import repeat, accumulate
import pygame
import pygcurse
import util
import local
logging.basicConfig(
level=logging.NOTSET,
format='[%(levelname)s] %(message)s')
def mainloop():
"""Gather and dispatch events.
TODO: Write.
"""
running = True
while running:
pass
class Widget:
"""Base class for all pygui widgets.
"""
def __init__(self):
logging.info("new Widget {0!r}".format(self))
def render(self, surface):
pass
class Container(Widget):
"""Base class for widgets that contain other widgets.
"""
def __init__(self):
super().__init__()
def add(self, widget):
"""Adds the widget to the container, using simple defaults for multiple-child containers.
"""
logging.info('add {0!r} to {1!r}'.format(widget, self))
def remove(self, widget):
"""Removes the widget from the container.
"""
logging.info('remove {0!r} from {1!r}'.format(widget, self))
def __iter__(self):
"""Returns an iterator for all non-internal child widgets contained.
"""
pass
class Bin(Container):
"""Base class for containers with one child.
"""
def __init__(self, child=None):
super().__init__()
self.child = child
def add(self, widget):
super().add(widget)
self.child = widget
def remove(self, widget):
super().remove(widget)
if self.child is widget:
self.child = None
def render(self, surface):
super().render(surface)
if self.child is not None:
self.child.render(surface)
def __iter__(self):
if self.child is not None:
return iter([self.child])
else:
return iter([])
class Button(Bin):
"""A push button that emits a signal when activated.
"""
def __init__(self, label=None):
super().__init__(label)
self.alignx = local.ALIGN_CENTER
self.aligny = local.ALIGN_MIDDLE
self.fgcolor = 'black'
self.bgcolor = 'gray'
def clicked(self):
raise NotImplemented
def render(self, surface):
super().render(surface)
class ToggleButton(Button):
"""A button that retains its state.
"""
pass
class CheckButton(ToggleButton):
"""A toggle button styled as a checkbox and label.
"""
pass
class RadioButton(CheckButton):
"""A toggle button that is mutually exclusive with other radio buttons in
it's group.
"""
pass
class ComboBox(Bin):
"""Choose from a list of items.
TODO: Figure out how this will work. See Menu.
"""
pass
class Expander(Bin):
"""A widget that can hide it's child.
"""
pass
class Frame(Bin):
"""A bin with a decorative frame and optional label.
"""
def __init__(self, label=None):
self.label = label
super().__init__()
self.fgcolor = 'black'
self.bgcolor = 'gray'
def render(self, surface):
surface.fill(' ', bgcolor='gray')
# render children
if self.child is not None:
child_width, child_height = surface.width-2, surface.height-2
if child_width > 0 and child_height > 0:
child_surface = pygcurse.PygcurseSurface(
surface.width-2, surface.height-2)
super().render(child_surface)
child_surface.paste(None, surface,
(1, 1, surface.right-1, surface.bottom-1))
# render label
if self.label is not None:
lbl_width, lbl_height = self.label.size
if lbl_width > surface.width - 2:
lbl_width = surface.width - 2
elif lbl_width+2 < surface.width:
lbl_width = lbl_width + 2
if lbl_width > 0 and lbl_height > 0:
lbl_surface = pygcurse.PygcurseSurface(lbl_width, lbl_height)
self.label.render(lbl_surface)
lbl_surface.paste(None, surface,
(surface.centerx - lbl_surface.width // 2, 0,
lbl_surface.width, lbl_surface.height))
def __repr__(self):
return "<pygcui.Frame {0!r}>".format(str(self.label))
class Item(Bin):
"""Base class for MenuItem.
Note: Probably will be removed.
"""
pass
class MenuItem(Item):
"""Widget used as an item in menus.
"""
pass
class CheckMenuItem(MenuItem):
"""A toggle-able menu item styled as a checkbox and label.
"""
pass
class RadioMenuItem(CheckMenuItem):
"""A check menu item that is mutually exclusive with other radio menu items
in it's group.
"""
pass
class SeparatorMenuItem(MenuItem):
"""A widget that places a horizontal line in a menu.
"""
pass
class ScrolledWindow(Bin):
"""Add scrollbars to the child widget.
"""
pass
class Viewport(Bin):
"""Displays a portion of a larger widget.
Can be used to allow scrolling of the larger widget. The larger widget
will render at it's preferred size, and the Viewport will choose which
part is copied to the allocated surface.
"""
pass
class Window(Bin):
"""A top-level widget that contains one child.
Creates a pygcurse.PygcurseWindow.
"""
def __init__(self, width=80, height=25, caption=None, fullscreen=False):
self._caption = caption
self._width = width
self._height = height
super().__init__()
self._windowsurface = pygcurse.PygcurseWindow(
self._width, self._height, self._caption,
fullscreen=fullscreen)
self._windowsurface.autoupdate = False
def render(self, surface=None):
logging.debug("render {0!r}".format(self))
super().render(self._windowsurface)
self._windowsurface.update()
def __repr__(self):
return "<pygcui.Window {0!r} ({1}, {2})>".format(
self._caption, self._width, self._height)
class Box(Container):
"""A class containing multiple widgets, organized into vertical or
horizontal stacks.
TODO: Respect homogeneity (currently forces homogeneous).
"""
def __init__(self, vertical=True):
super().__init__()
self.homogenous = True
self.vertical = vertical
self.children = collections.OrderedDict()
def pack_start(self, child, expand=True, fill=True):
logging.info(
"packing {0!r} into {1!r} (start, expand={2}, fill={3})".format(
child, self, expand, fill))
if not expand:
logging.warning("IGNORED: expand=False")
if not expand and not fill:
logging.warning("IGNORED: fill=False")
# FIXME: This is WRONG. See docs.
self.children[child] = util.Packing(
expand, fill, local.PACK_START)
def pack_end(self, child, expand=True, fill=True):
logging.info(
"packing {0!r} into {1!r} (end, expand={2}, fill={3})".format(
child, self, expand, fill))
if not expand:
logging.warning("IGNORED: expand=False")
if not expand and not fill:
logging.warning("IGNORED: fill=False")
# FIXME: This is WRONG. See docs.
self.children[child] = util.Packing(
expand, fill, local.PACK_END)
self.children.move_to_end(child, last=False)
def _child_allocations(self, width, height):
if len(self.children) < 1:
return []
if not self.homogenous:
logging.warning("IGNORED: homogeneous=False")
if self.vertical:
size = height
else:
size = width
extra = size // len(self.children)
n_extra = size % len(self.children)
logging.debug((
"distributing {0} of {1} available space to each of {2} children. "
"{3} left.").format(extra, size, len(self.children), n_extra))
sizes = [extra for n in self.children]
logging.debug("distributing extra space to children.")
for i, s in enumerate(sizes[:]):
if n_extra < 1:
break
sizes[i] = sizes[i] + 1
n_extra = n_extra - 1
logging.debug("calculating regions")
if self.vertical:
sizes = tuple(zip(
repeat(0, len(sizes)),
(s-sizes[i] for i, s in enumerate(accumulate(sizes))),
repeat(width, len(sizes)),
sizes))
else:
sizes = tuple(zip(
(s-sizes[i] for i, s in enumerate(accumulate(sizes))),
repeat(0, len(sizes)),
sizes,
repeat(height, len(sizes))))
logging.debug("final allocations {0!r}".format(sizes))
return sizes
def add(self, widget):
self.pack_start(widget)
def remove(self, widget):
try:
del self.children[widget]
except KeyError:
raise ValueError((
"Non-related widget. "
"Widget must be a child of container."))
def render(self, surface):
super().render(surface)
sizes = self._child_allocations(*surface.size)
for child, region in zip(self.children, sizes):
child_surface = pygcurse.PygcurseSurface(
region[2], region[3])
child.render(child_surface)
child_surface.paste(None, surface, region)
def __iter__(self):
return iter(self.children)
class MenuShell(Container):
"""A base class for menus.
"""
pass
class Menu(MenuShell):
"""A drop down menu.
TODO: Figure out how this will work. See combobox.
"""
pass
class MenuBar(MenuShell):
"""Displays MenuItems horizontally.
"""
pass
class Table(Container):
"""Arranges widgets in a grid
"""
pass
class TextView(Container):
"""Displays text.
"""
pass
class DrawingArea(Widget):
"""Widget for custom user-interface elements.
"""
pass
class Entry(Widget):
"""A single-line text entry field.
"""
pass
class Label(Widget):
"""Displays a single line of non-editable text.
"""
def __init__(self, text=None):
self.text = text
self.fgcolor = 'gray'
self.bgcolor = 'black'
self.alignx = local.ALIGN_CENTER
self.aligny = local.ALIGN_MIDDLE
@property
def size(self):
return (len(self.text), 1)
def render(self, surface):
"""Render the label to the given surface.
"""
super().render(surface)
w, h = self.size
x = util.calculate_align_x(self.alignx, w, surface.width)
y = util.calculate_align_y(self.aligny, h, surface.height)
logging.debug('render label {0!r} in ({1}, {2}) at ({3}, {4})'.format(self, surface.width, surface.height, x, y))
surface.putchars(self.text, x=x, y=y, bgcolor=self.bgcolor, fgcolor=self.fgcolor)
def __str__(self):
return self.text
def __repr__(self):
return "<pygcui.Label {0!r}>".format(self.text)
class Range(Widget):
"""Base class for widgets that allow the user to set a value in a
range.
"""
pass
| bsd-2-clause | -3,674,857,383,054,125,600 | 24.992016 | 121 | 0.593764 | false |
croeder/ccp_nlp | doc/src/main/python/doi.py | 1 | 1550 | #!/Library/Frameworks/Python.framework/Versions/3.3/bin/python
import urllib.request
import psycopg2
import sys
import time
### CRAPS OUT after a few minutes
# hits pubmed for converting pmids to dois
PMID="http://www.pubmedcentral.nih.gov/utils/idconv/v1.0/"
OPTIONS="&versions=no&format=csv"
# hits postgres for pmid ids
DBNAME="medline"
USER="postgres"
PASSWORD="P0stgr3s!"
HOST="140.226.123.80"
batchSize=100
DELAY=10
def convertPmidBatch(IDS):
h = urllib.request.urlopen(PMID + "?ids=" + IDS + OPTIONS)
response = h.read()
responseStr = str(response, encoding='utf8')
# "PMID","PMCID","DOI","Version","MID","IsCurrent","IsLive","ReleaseDate","Msg"
for line in responseStr.splitlines()[1:] :
(pmid, pmcid, doi) =line.split(",")[0:3]
if (doi.strip("\"") != ""):
print(pmid + "," + doi)
h.close()
def fetchPmidBatches(batchNum):
idStringList = []
conn = psycopg2.connect(database=DBNAME, host=HOST, user=USER, password=PASSWORD)
cursor = conn.cursor()
cursor.execute("select pmid from medline_batches where id = " + str(batchNum))
batchNumber=0
batch = cursor.fetchmany(batchSize)
while batch:
idString=""
for row in batch:
idString= idString + "," + str(row[0])
idStringList.append(idString[1:])
batch = cursor.fetchmany(batchSize)
batchNumber += 1
cursor.close()
conn.close()
return idStringList
##for batchNum in range(0,8316):
for batchNum in range(7000,7100):
for idString in fetchPmidBatches(batchNum):
sys.stderr.write(str(batchNum) + "\n")
convertPmidBatch(idString)
time.sleep(DELAY)
| bsd-3-clause | -8,782,411,980,336,529,000 | 25.724138 | 82 | 0.708387 | false |
codrut3/tensorflow | tensorflow/contrib/layers/python/layers/__init__.py | 57 | 1864 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""layers module with higher level NN primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.layers.python.layers.embedding_ops import *
from tensorflow.contrib.layers.python.layers.encoders import *
from tensorflow.contrib.layers.python.layers.feature_column import *
from tensorflow.contrib.layers.python.layers.feature_column_ops import *
from tensorflow.contrib.layers.python.layers.initializers import *
from tensorflow.contrib.layers.python.layers.layers import *
from tensorflow.contrib.layers.python.layers.normalization import *
from tensorflow.contrib.layers.python.layers.optimizers import *
from tensorflow.contrib.layers.python.layers.regularizers import *
from tensorflow.contrib.layers.python.layers.rev_block_lib import *
from tensorflow.contrib.layers.python.layers.summaries import *
from tensorflow.contrib.layers.python.layers.target_column import *
from tensorflow.contrib.layers.python.ops.bucketization_op import *
from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op import *
# pylint: enable=wildcard-import
| apache-2.0 | 8,452,668,939,315,521,000 | 50.777778 | 80 | 0.769313 | false |
spodkowinski/cassandra-dtest | offline_tools_test.py | 3 | 21854 | import json
import os
import random
import re
import subprocess
from ccmlib import common
from ccmlib.node import ToolError
from dtest import Tester, debug, create_ks
from tools.decorators import since
class TestOfflineTools(Tester):
# In 2.0, we will get this error log message due to jamm not being
# in the classpath
ignore_log_patterns = ["Unable to initialize MemoryMeter"]
def sstablelevelreset_test(self):
"""
Insert data and call sstablelevelreset on a series of
tables. Confirm level is reset to 0 using its output.
Test a variety of possible errors and ensure response is resonable.
@since 2.1.5
@jira_ticket CASSANDRA-7614
"""
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
node1 = cluster.nodelist()[0]
# test by trying to run on nonexistent keyspace
cluster.stop(gently=False)
try:
node1.run_sstablelevelreset("keyspace1", "standard1")
except ToolError as e:
self.assertIn("ColumnFamily not found: keyspace1/standard1", e.message)
# this should return exit code 1
self.assertEqual(e.exit_status, 1, "Expected sstablelevelreset to have a return code of 1, but instead return code was {}".format(e.exit_status))
# now test by generating keyspace but not flushing sstables
cluster.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=100', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
cluster.stop(gently=False)
output, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
self._check_stderr_error(error)
self.assertIn("Found no sstables, did you give the correct keyspace", output)
self.assertEqual(rc, 0, msg=str(rc))
# test by writing small amount of data and flushing (all sstables should be level 0)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1};")
node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop(gently=False)
output, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
self._check_stderr_error(error)
self.assertIn("since it is already on level 0", output)
self.assertEqual(rc, 0, msg=str(rc))
# test by loading large amount data so we have multiple levels and checking all levels are 0 at end
cluster.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=50K', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
cluster.flush()
self.wait_for_compactions(node1)
cluster.stop()
initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
_, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
self._check_stderr_error(error)
self.assertEqual(rc, 0, msg=str(rc))
debug(initial_levels)
debug(final_levels)
# let's make sure there was at least L1 beforing resetting levels
self.assertTrue(max(initial_levels) > 0)
# let's check all sstables are on L0 after sstablelevelreset
self.assertTrue(max(final_levels) == 0)
def get_levels(self, data):
(out, err, rc) = data
return map(int, re.findall("SSTable Level: ([0-9])", out))
def wait_for_compactions(self, node):
pattern = re.compile("pending tasks: 0")
while True:
output, err, _ = node.nodetool("compactionstats")
if pattern.search(output):
break
def sstableofflinerelevel_test(self):
"""
Generate sstables of varying levels.
Reset sstables to L0 with sstablelevelreset
Run sstableofflinerelevel and ensure tables are promoted correctly
Also test a variety of bad inputs including nonexistent keyspace and sstables
@since 2.1.5
@jira_ticket CASSANRDA-8031
"""
cluster = self.cluster
cluster.set_configuration_options(values={'compaction_throughput_mb_per_sec': 0})
cluster.populate(1).start(wait_for_binary_proto=True)
node1 = cluster.nodelist()[0]
# NOTE - As of now this does not return when it encounters Exception and causes test to hang, temporarily commented out
# test by trying to run on nonexistent keyspace
# cluster.stop(gently=False)
# output, error, rc = node1.run_sstableofflinerelevel("keyspace1", "standard1", output=True)
# self.assertTrue("java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.standard1" in error)
# # this should return exit code 1
# self.assertEqual(rc, 1, msg=str(rc))
# cluster.start()
# now test by generating keyspace but not flushing sstables
node1.stress(['write', 'n=1', 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
cluster.stop(gently=False)
try:
output, error, _ = node1.run_sstableofflinerelevel("keyspace1", "standard1")
except ToolError as e:
self.assertIn("No sstables to relevel for keyspace1.standard1", e.stdout)
self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
# test by flushing (sstable should be level 0)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
debug("Altering compaction strategy to LCS")
session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1};")
node1.stress(['write', 'n=1K', 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop()
output, _, rc = node1.run_sstableofflinerelevel("keyspace1", "standard1")
self.assertIn("L0=1", output)
self.assertEqual(rc, 0, msg=str(rc))
cluster.start(wait_for_binary_proto=True)
# test by loading large amount data so we have multiple sstables
# must write enough to create more than just L1 sstables
keys = 8 * cluster.data_dir_count
node1.stress(['write', 'n={0}K'.format(keys), 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1200)',
'-rate', 'threads=8'])
node1.flush()
debug("Waiting for compactions to finish")
self.wait_for_compactions(node1)
debug("Stopping node")
cluster.stop()
debug("Done stopping node")
# Let's reset all sstables to L0
debug("Getting initial levels")
initial_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
self.assertNotEqual([], initial_levels)
debug('initial_levels:')
debug(initial_levels)
debug("Running sstablelevelreset")
node1.run_sstablelevelreset("keyspace1", "standard1")
debug("Getting final levels")
final_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
self.assertNotEqual([], final_levels)
debug('final levels:')
debug(final_levels)
# let's make sure there was at least 3 levels (L0, L1 and L2)
self.assertGreater(max(initial_levels), 1)
# let's check all sstables are on L0 after sstablelevelreset
self.assertEqual(max(final_levels), 0)
# time to relevel sstables
debug("Getting initial levels")
initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
debug("Running sstableofflinerelevel")
output, error, _ = node1.run_sstableofflinerelevel("keyspace1", "standard1")
debug("Getting final levels")
final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
debug(output)
debug(error)
debug(initial_levels)
debug(final_levels)
# let's check sstables were promoted after releveling
self.assertGreater(max(final_levels), 1)
@since('2.2')
def sstableverify_test(self):
"""
Generate sstables and test offline verification works correctly
Test on bad input: nonexistent keyspace and sstables
Test on potential situations: deleted sstables, corrupted sstables
"""
cluster = self.cluster
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
# test on nonexistent keyspace
try:
(out, err, rc) = node1.run_sstableverify("keyspace1", "standard1")
except ToolError as e:
self.assertIn("Unknown keyspace/table keyspace1.standard1", e.message)
self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
# test on nonexistent sstables:
node1.stress(['write', 'n=100', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
(out, err, rc) = node1.run_sstableverify("keyspace1", "standard1")
self.assertEqual(rc, 0, msg=str(rc))
# Generate multiple sstables and test works properly in the simple case
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
node1.flush()
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop()
(out, error, rc) = node1.run_sstableverify("keyspace1", "standard1")
self.assertEqual(rc, 0, msg=str(rc))
# STDOUT of the sstableverify command consists of multiple lines which may contain
# Java-normalized paths. To later compare these with Python-normalized paths, we
# map over each line of out and replace Java-normalized paths with Python equivalents.
outlines = map(lambda line: re.sub("(?<=path=').*(?=')",
lambda match: os.path.normcase(match.group(0)),
line),
out.splitlines())
# check output is correct for each sstable
sstables = self._get_final_sstables(node1, "keyspace1", "standard1")
for sstable in sstables:
verified = False
hashcomputed = False
for line in outlines:
if sstable in line:
if "Verifying BigTableReader" in line:
verified = True
elif "Checking computed hash of BigTableReader" in line:
hashcomputed = True
else:
debug(line)
debug(verified)
debug(hashcomputed)
debug(sstable)
self.assertTrue(verified and hashcomputed)
# now try intentionally corrupting an sstable to see if hash computed is different and error recognized
sstable1 = sstables[1]
with open(sstable1, 'r') as f:
sstabledata = bytearray(f.read())
with open(sstable1, 'w') as out:
position = random.randrange(0, len(sstabledata))
sstabledata[position] = (sstabledata[position] + 1) % 256
out.write(sstabledata)
# use verbose to get some coverage on it
try:
(out, error, rc) = node1.run_sstableverify("keyspace1", "standard1", options=['-v'])
except ToolError as e:
# Process sstableverify output to normalize paths in string to Python casing as above
error = re.sub("(?<=Corrupted: ).*", lambda match: os.path.normcase(match.group(0)), e.message)
self.assertIn("Corrupted: " + sstable1, error)
self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
def sstableexpiredblockers_test(self):
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
session.execute("create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0")
# create a blocker:
session.execute("insert into ks.cf (key, val) values (1,1)")
node1.flush()
session.execute("delete from ks.cf where key = 2")
node1.flush()
session.execute("delete from ks.cf where key = 3")
node1.flush()
out, error, _ = node1.run_sstableexpiredblockers(keyspace="ks", column_family="cf")
self.assertIn("blocks 2 expired sstables from getting dropped", out)
# 4.0 removes back compatibility with pre-3.0 versions, so testing upgradesstables for
# paths from those versions to 4.0 is invalid (and can only fail). There isn't currently
# any difference between the 3.0 and 4.0 sstable format though, but when the version is
# bumped for 4.0, remove the max_version & add a case for testing a 3.0 -> 4.0 upgrade
@since('2.2', max_version='3.X')
def sstableupgrade_test(self):
"""
Test that sstableupgrade functions properly offline on a same-version Cassandra sstable, a
stdout message of "Found 0 sstables that need upgrading." should be returned.
"""
# Set up original node version to test for upgrade
cluster = self.cluster
testversion = cluster.version()
original_install_dir = cluster.get_install_dir()
debug('Original install dir: {}'.format(original_install_dir))
# Set up last major version to upgrade from, assuming 2.1 branch is the oldest tested version
if testversion < '2.2':
# Upgrading from 2.0->2.1 fails due to the jamm 0.2.5->0.3.0 jar update.
# ** This will happen again next time jamm version is upgraded.
# CCM doesn't handle this upgrade correctly and results in an error when flushing 2.1:
# Error opening zip file or JAR manifest missing : /home/mshuler/git/cassandra/lib/jamm-0.2.5.jar
# The 2.1 installed jamm version is 0.3.0, but bin/cassandra.in.sh used by nodetool still has 0.2.5
# (when this is fixed in CCM issue #463, install version='github:apache/cassandra-2.0' as below)
self.skipTest('Skipping 2.1 test due to jamm.jar version upgrade problem in CCM node configuration.')
elif testversion < '3.0':
debug('Test version: {} - installing github:apache/cassandra-2.1'.format(testversion))
cluster.set_install_dir(version='github:apache/cassandra-2.1')
# As of 3.5, sstable format 'ma' from 3.0 is still the latest - install 2.2 to upgrade from
elif testversion < '4.0':
debug('Test version: {} - installing github:apache/cassandra-2.2'.format(testversion))
cluster.set_install_dir(version='github:apache/cassandra-2.2')
# From 4.0, one can only upgrade from 3.0
else:
debug('Test version: {} - installing github:apache/cassandra-3.0'.format(testversion))
cluster.set_install_dir(version='github:apache/cassandra-3.0')
# Start up last major version, write out an sstable to upgrade, and stop node
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
# Check that node1 is actually what we expect
debug('Downgraded install dir: {}'.format(node1.get_install_dir()))
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
session.execute('create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0')
session.execute('insert into ks.cf (key, val) values (1,1)')
node1.flush()
cluster.stop()
debug('Beginning ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
# Upgrade Cassandra to original testversion and run sstableupgrade
cluster.set_install_dir(original_install_dir)
# Check that node1 is actually upgraded
debug('Upgraded to original install dir: {}'.format(node1.get_install_dir()))
# Perform a node start/stop so system tables get internally updated, otherwise we may get "Unknown keyspace/table ks.cf"
cluster.start(wait_for_binary_proto=True)
node1.flush()
cluster.stop()
# A bit hacky, but we can only upgrade to 4.0 from 3.0, but both use the
# same sstable major format currently, so there is no upgrading to do.
# So on 4.0, we only test that sstable upgrade detect there is no
# upgrade. We'll removed that test if 4.0 introduce a major sstable
# change before it's release.
if testversion < '4.0':
(out, error, rc) = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
debug(out)
debug(error)
debug('Upgraded ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
self.assertIn('Found 1 sstables that need upgrading.', out)
# Check that sstableupgrade finds no upgrade needed on current version.
(out, error, rc) = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
debug(out)
debug(error)
self.assertIn('Found 0 sstables that need upgrading.', out)
@since('3.0')
def sstabledump_test(self):
"""
Test that sstabledump functions properly offline to output the contents of a table.
"""
cluster = self.cluster
# disable JBOD conf since the test expects exactly one SSTable to be written.
cluster.set_datadir_count(1)
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
session.execute('create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0')
session.execute('insert into ks.cf (key, val) values (1,1)')
# delete a partition and then insert a row to test CASSANDRA-13177
session.execute('DELETE FROM ks.cf WHERE key = 2')
session.execute('INSERT INTO ks.cf (key, val) VALUES (2, 2)')
node1.flush()
cluster.stop()
[(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'])
debug(out)
debug(error)
# Load the json output and check that it contains the inserted key=1
s = json.loads(out)
debug(s)
self.assertEqual(len(s), 2)
# order the rows so that we have key=1 first, then key=2
row0, row1 = s
(row0, row1) = (row0, row1) if row0['partition']['key'] == ['1'] else (row1, row0)
self.assertEqual(row0['partition']['key'], ['1'])
self.assertEqual(row1['partition']['key'], ['2'])
self.assertIsNotNone(row1['partition'].get('deletion_info'))
self.assertIsNotNone(row1.get('rows'))
# Check that we only get the key back using the enumerate option
[(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'], enumerate_keys=True)
debug(out)
debug(error)
s = json.loads(out)
debug(s)
self.assertEqual(len(s), 2)
dumped_keys = set(row[0] for row in s)
self.assertEqual(set(['1', '2']), dumped_keys)
def _check_stderr_error(self, error):
acceptable = ["Max sstable size of", "Consider adding more capacity", "JNA link failure", "Class JavaLaunchHelper is implemented in both"]
if len(error) > 0:
for line in error.splitlines():
self.assertTrue(any([msg in line for msg in acceptable]),
'Found line \n\n"{line}"\n\n in error\n\n{error}'.format(line=line, error=error))
def _get_final_sstables(self, node, ks, table):
"""
Return the node final sstable data files, excluding the temporary tables.
If sstableutil exists (>= 3.0) then we rely on this tool since the table
file names no longer contain tmp in their names (CASSANDRA-7066).
"""
# Get all sstable data files
allsstables = map(os.path.normcase, node.get_sstables(ks, table))
# Remove any temporary files
tool_bin = node.get_tool('sstableutil')
if os.path.isfile(tool_bin):
args = [tool_bin, '--type', 'tmp', ks, table]
env = common.make_cassandra_env(node.get_install_cassandra_root(), node.get_node_cassandra_root())
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
tmpsstables = map(os.path.normcase, stdout.splitlines())
ret = list(set(allsstables) - set(tmpsstables))
else:
ret = [sstable for sstable in allsstables if "tmp" not in sstable[50:]]
return ret
| apache-2.0 | 5,086,686,880,546,679,000 | 45.896996 | 157 | 0.62158 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.