gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import logging
import cffi
import cle
from sortedcontainers import SortedDict
from ..analysis import Analysis
_l = logging.getLogger('angr.analyses.cfg.cfb')
class CFBlanketView(object):
"""
A view into the control-flow blanket.
"""
def __init__(self, cfb):
self._cfb = cfb
def __getitem__(self, item):
if isinstance(item, slice):
addr = item.start
start_addr = self._cfb.floor_addr(addr)
addr_ = start_addr
while True:
obj = self._cfb[addr_]
yield obj
addr_ += obj
# Find gaps
# TODO: finish it
raise NotImplementedError()
#
# An address can be mapped to one of the following types of object
# - Block
# - MemoryData
# - Unknown
#
class Unknown(object):
def __init__(self, addr, size, bytes_=None, object_=None, segment=None, section=None):
self.addr = addr
self.size = size
# Optional
self.bytes = bytes_
self.object = object_
self.segment = segment
self.section = section
if size == 0:
raise Exception("You cannot create an unknown region of size 0.")
def __repr__(self):
s = "<Unknown %#x-%#x>" % (self.addr, self.addr + self.size)
return s
class CFBlanket(Analysis):
"""
A Control-Flow Blanket is a representation for storing all instructions, data entries, and bytes of a full program.
"""
def __init__(self, cfg=None):
self._blanket = SortedDict()
self._ffi = cffi.FFI()
if cfg is not None:
self._from_cfg(cfg)
else:
_l.debug("CFG is not specified. Initialize CFBlanket from the knowledge base.")
for func in self.kb.functions.values():
self.add_function(func)
def floor_addr(self, addr):
try:
return next(self._blanket.irange(maximum=addr, reverse=True))
except StopIteration:
raise KeyError(addr)
def floor_item(self, addr):
key = self.floor_addr(addr)
return key, self._blanket[key]
def floor_items(self, addr=None):
if addr is None:
start_addr = None
else:
try:
start_addr = next(self._blanket.irange(maximum=addr, reverse=True))
except StopIteration:
start_addr = addr
for key in self._blanket.irange(minimum=start_addr):
yield key, self._blanket[key]
def ceiling_addr(self, addr):
try:
return next(self._blanket.irange(minimum=addr, reverse=False))
except StopIteration:
raise KeyError(addr)
def ceiling_item(self, addr):
key = self.ceiling_addr(addr)
return key, self._blanket[key]
def __getitem__(self, addr):
return self._blanket[addr]
def add_obj(self, addr, obj):
"""
Adds an object `obj` to the blanket at the specified address `addr`
"""
self._blanket[addr] = obj
def add_function(self, func):
"""
Add a function `func` and all blocks of this function to the blanket.
"""
for block in func.blocks:
self.add_obj(block.addr, block)
def dbg_repr(self):
"""
The debugging representation of this CFBlanket.
:return: The debugging representation of this CFBlanket.
:rtype: str
"""
output = [ ]
for obj in self.project.loader.all_objects:
for section in obj.sections:
if section.memsize == 0:
continue
min_addr, max_addr = section.min_addr, section.max_addr
output.append("### Object %s" % repr(section))
output.append("### Range %#x-%#x" % (min_addr, max_addr))
pos = min_addr
while pos < max_addr:
try:
addr, thing = self.floor_item(pos)
output.append("%#x: %s" % (addr, repr(thing)))
if thing.size == 0: pos += 1
else: pos += thing.size
except KeyError:
pos += 1
output.append("")
return "\n".join(output)
def _from_cfg(self, cfg):
"""
Initialize CFBlanket from a CFG instance.
:param cfg: A CFG instance.
:return: None
"""
# Let's first add all functions first
for func in cfg.kb.functions.values():
self.add_function(func)
self._mark_unknowns()
def _mark_unknowns(self):
"""
Mark all unmapped regions.
:return: None
"""
for obj in self.project.loader.all_objects:
if isinstance(obj, cle.ELF):
# sections?
if obj.sections:
for section in obj.sections:
if not section.memsize or not section.vaddr:
continue
min_addr, max_addr = section.min_addr, section.max_addr
self._mark_unknowns_core(min_addr, max_addr, obj=obj, section=section)
elif obj.segments:
for segment in obj.segments:
if not segment.memsize:
continue
min_addr, max_addr = segment.min_addr, segment.max_addr
self._mark_unknowns_core(min_addr, max_addr, obj=obj, segment=segment)
else:
# is it empty?
_l.warning("Empty ELF object %s.", repr(obj))
elif isinstance(obj, cle.PE):
if obj.sections:
for section in obj.sections:
if not section.memsize:
continue
min_addr, max_addr = section.min_addr, section.max_addr
self._mark_unknowns_core(min_addr, max_addr, obj=obj, section=section)
else:
# is it empty?
_l.warning("Empty PE object %s.", repr(obj))
else:
min_addr, max_addr = obj.min_addr, obj.max_addr
self._mark_unknowns_core(min_addr, max_addr, obj=obj)
def _mark_unknowns_core(self, min_addr, max_addr, obj=None, segment=None, section=None):
try:
addr = self.floor_addr(min_addr)
if addr < min_addr:
raise KeyError
except KeyError:
# there is no other lower address
try:
next_addr = self.ceiling_addr(min_addr)
if next_addr >= max_addr:
raise KeyError
except KeyError:
next_addr = max_addr
size = next_addr - min_addr
if obj is None or isinstance(obj, cle.ExternObject):
bytes_ = None
else:
try:
_l.debug("Loading bytes from object %s, section %s, segmeng %s, addresss %#x.",
obj, section, segment, min_addr)
bytes_ptr, _ = self.project.loader.memory.read_bytes_c(min_addr)
bytes_ = self._ffi.unpack(self._ffi.cast('char*', bytes_ptr), size) # type: str
except KeyError:
# The address does not exist
bytes_ = None
self.add_obj(min_addr,
Unknown(min_addr, size, bytes_=bytes_, object_=obj, segment=segment, section=section)
)
addr = min_addr
while addr < max_addr:
last_addr, last_item = self.floor_item(addr)
if last_addr < min_addr:
# impossible
raise Exception('Impossible')
if last_item.size == 0:
# Make sure everything has a non-zero size
last_item_size = 1
else:
last_item_size = last_item.size
end_addr = last_addr + last_item_size
if end_addr < max_addr:
try:
next_addr = self.ceiling_addr(end_addr)
except KeyError:
next_addr = max_addr
if next_addr > end_addr:
# there is a gap
size = next_addr - end_addr
if obj is None or isinstance(obj, cle.ExternObject):
bytes_ = None
else:
try:
_l.debug("Loading bytes from object %s, section %s, segmeng %s, addresss %#x.",
obj, section, segment, next_addr)
bytes_ptr, _ = self.project.loader.memory.read_bytes_c(next_addr)
bytes_ = self._ffi.unpack(self._ffi.cast('char*', bytes_ptr), size) # type: str
except KeyError:
# The address does not exist
bytes_ = None
self.add_obj(end_addr,
Unknown(end_addr, size, bytes_=bytes_, object_=obj, segment=segment, section=section)
)
addr = next_addr
else:
addr = max_addr
from angr.analyses import AnalysesHub
AnalysesHub.register_default('CFB', CFBlanket)
AnalysesHub.register_default('CFBlanket', CFBlanket)
|
|
import numpy as np
import tensorflow as tf
from cnn_methods import *
from cnn_class import CNN
import cnn_eval
import time, resource
import os, sys
#batching methods
#randomly adds zero padding to some examples to increase minibatch variety
def flex(input_list, params):
for example in input_list:
if example.shape[0] + params['FLEX'] <= params['MAX_LENGTH']:
#~30% chance of padding the left side
if boolean_percent(15):
example = insert_padding(example, params['FLEX'], True)
elif boolean_percent(15):
example = insert_padding(example, int(math.ceil(params['FLEX']/2.0)), True)
#~30% chance of padding the right
if boolean_percent(15):
example = insert_padding(example, params['FLEX'], False)
elif boolean_percent(15):
example = insert_padding(example, int(math.ceil(params['FLEX']/2.0)), False)
return input_list
#inserts zero padding for flex--note this is on a np array which has already been processed for feeding into tf
def insert_padding(example, tokens_to_pad, left):
if left:
example = np.concatenate((np.zeros((tokens_to_pad)), example))
else:
example = np.concatenate((example, np.zeros((tokens_to_pad))))
return example
#takes tokenized list_of_examples and pads all to the maximum length
def pad_all(list_of_examples, params):
max_length = get_max_length(list_of_examples)
for i in range(len(list_of_examples)):
list_of_examples[i] = pad_one(list_of_examples[i], max_length, params)
return list_of_examples
#pads all sentences to same length
def pad_one(list_of_word_vecs, max_length, params):
left = (max_length - len(list_of_word_vecs)) / 2
right = left
if (max_length - len(list_of_word_vecs)) % 2 != 0:
right += 1
return np.asarray(([0] * left) + list_of_word_vecs.tolist() + ([0] * right))
#returns a boolean true in percent of cases
def boolean_percent(percent):
return random.randrange(100) < percent
#shuffle two numpy arrays in unison
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
return a, b
# sorts examples in input_x by length
def sort_examples_by_length(x, y):
lengths = []
for i in range(len(x)):
lengths.append(len(x[i]))
new_lengths = []
new_x = []
new_y = []
for i in range(len(lengths)):
for j in range(len(new_lengths)):
if lengths[i] < new_lengths[j]:
new_lengths.insert(j, lengths[i])
new_x.insert(j, x[i])
new_y.insert(j, y[i])
break
else:
new_lengths.append(lengths[i])
new_x.append(x[i])
new_y.append(y[i])
return new_x, new_y
def scramble_batches(params, x, y):
extras = len(x) % params['BATCH_SIZE']
x, y = shuffle_in_unison(x, y)
duplicates_x = []
duplicates_y = []
for i in range(extras):
duplicates_x.append(x[i])
duplicates_y.append(y[i])
x.extend(duplicates_x)
y.extend(duplicates_y)
if params['FLEX'] > 0:
x = flex(x, params)
x, y = sort_examples_by_length(x, y)
batches_x, batches_y = [], []
while len(y) >= params['BATCH_SIZE']:
batches_x.append(pad_all(x[:params['BATCH_SIZE']], params))
batches_y.append(np.asarray(y[:params['BATCH_SIZE']]))
x = x[params['BATCH_SIZE']:]
y = y[params['BATCH_SIZE']:]
return batches_x, batches_y
def separate_train_and_val(train_X, train_Y):
shuffle_in_unison(train_X, train_Y)
val_split = len(train_X)/10
return train_X[val_split:], train_Y[val_split:], train_X[:val_split], train_Y[:val_split]
#remove any existing old chkpt files, ignore nonexistent ones
#DEBUGGING: remove timelog from here
def remove_chkpt_files(epoch, model_dir):
for past_epoch in range(epoch):
file_path = model_dir + 'temp_cnn_eval_epoch%i' %(past_epoch)
if os.path.isfile(file_path) and os.path.isfile(file_path + '.meta'):
os.remove(file_path)
os.remove(file_path + '.meta')
def epoch_write_statements(timelog, init_time, epoch):
timelog.write('\n\n')
timelog.write('epoch %i start time %g\n' %(epoch, time.clock()))
timelog.write('total time spent since epoch 1: %i\n' %((time.time() - init_time)))
timelog.write('avg time per epoch: %g\n' %((time.time() - init_time)/ (epoch + 1)))
timelog.write('CPU usage: %g\n'
%(resource.getrusage(resource.RUSAGE_SELF).ru_utime +
resource.getrusage(resource.RUSAGE_SELF).ru_stime))
timelog.write('memory usage: %g' %(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
#debug method--writes all files in model_dir to file timelog
def print_debug_paths(model_dir, timelog):
for dirname, dirnames, filenames in os.walk(model_dir):
# print path to all subdirectories first.
for subdirname in dirnames:
timelog.write(os.path.join(dirname, subdirname))
# print path to all filenames.
for filename in filenames:
timelog.write(os.path.join(dirname, filename))
#dependency of print_clips method
def l2_loss_float(W):
return tf.sqrt(tf.scalar_mul(tf.convert_to_tensor(2.0), tf.nn.l2_loss(W)))
#debug method--prints the l2 norm of weights for purpose of checking l2 clipping
def print_clips(cnn, sess, params):
check_weights = tf.reduce_sum(cnn.weights[0]).eval(session=sess)
check_biases = tf.reduce_sum(cnn.biases[0]).eval(session=sess)
check_Wfc = tf.reduce_sum(cnn.W_fc).eval(session=sess)
check_bfc = tf.reduce_sum(cnn.b_fc).eval(session=sess)
cnn.clip_vars(params)
weights_2 = tf.reduce_sum(cnn.weights[0]).eval(session=sess)
biases_2 = tf.reduce_sum(cnn.biases[0]).eval(session=sess)
Wfc_2 = tf.reduce_sum(cnn.W_fc).eval(session=sess)
bfc_2 = tf.reduce_sum(cnn.b_fc).eval(session=sess)
if np.array_equal(check_weights, weights_2):
print 'clipped'
elif np.array_equal(check_biases, biases_2):
print 'clipped'
elif np.array_equal(check_Wfc, Wfc_2):
print 'clipped'
elif np.array_equal(check_bfc, bfc_2):
print 'clipped'
else:
print 'no clip. means:'
print l2_loss_float(cnn.weights[0]).eval(session=sess)
print l2_loss_float(cnn.biases[0]).eval(session=sess)
print l2_loss_float(cnn.W_fc).eval(session=sess)
print l2_loss_float(cnn.b_fc).eval(session=sess)
return cnn
def clip_tensors(j, length, cnn, sess, params):
if j == (length - 2):
cnn = print_clips(cnn, sess, params)
else:
cnn.clip_vars(params)
return cnn
def initial_prints(timelog, saver, sess, model_dir, val_X, val_Y, word_vec_array, params):
timelog.write('\n\n\nNew Model:')
init_time = time.time()
timelog.write(str(init_time))
path = saver.save(sess, model_dir + 'temp_cnn_eval_epoch%i' %0)
best_dev_loss = cnn_eval.float_entropy(path, val_X, val_Y, word_vec_array, params)
timelog.write( '\ndebug dev loss %g' %best_dev_loss)
timelog.write('\n%g'%time.clock())
return best_dev_loss, init_time
def set_up_model(params, word_vec_array):
cnn = CNN(params, word_vec_array)
train_step = cnn.train_op # optimizer.minimize(loss)
with cnn.graph.as_default():
saver = tf.train.Saver(tf.all_variables(), max_to_keep=None)
return cnn, cnn.loss, train_step, saver
def epoch_train(train_X, train_Y, word_vec_array, params, cnn, train_step, timelog):
batches_x, batches_y = scramble_batches(params, train_X, train_Y)
total_train_step_time = 0
total_clip_time = 0
for j in range(len(batches_x)):
feed_dict = {cnn.input_x: batches_x[j], cnn.input_y: batches_y[j],
cnn.dropout: params['TRAIN_DROPOUT'],
cnn.word_embeddings_new: np.zeros([0, word_vec_array.shape[1]])}
start_train_step_time = time.time()
train_step.run(feed_dict=feed_dict, session=cnn.sess)
total_train_step_time = total_train_step_time + (time.time() - start_train_step_time)
#apply l2 clipping to weights and biases
#DEBUGGING
if params['REGULARIZER'] == 'l2_clip' and False:
start_clip_time = time.time()
cnn.clip_vars(params)
total_clip_time = total_clip_time + (time.time() - start_clip_time)
timelog.write('\nthe amount of time the training step takes: %g' %(total_train_step_time))
timelog.write('\nthe amount of time clipping_vars takes: %g' %(total_clip_time))
return cnn
def print_if_file_doesnt_exist(timelog, file_path, cur_loc):
if not os.path.isfile(file_path):
timelog.write("found that ")
def train(params, input_X, input_Y, word_vec_array, model_dir):
train_X, train_Y, val_X, val_Y = separate_train_and_val(input_X, input_Y)
path_final, word_embeddings = None, None
with open(model_dir + 'train_log', 'a') as timelog:
# with tf.Graph().as_default(), tf.Session() as sess:
cnn, loss, train_step, saver = set_up_model(params, word_vec_array)
best_dev_loss, init_time = initial_prints(timelog, saver, cnn.sess, model_dir, val_X,
val_Y, word_vec_array, params)
for epoch in range(params['EPOCHS']):
cnn = epoch_train(train_X, train_Y, word_vec_array, params, cnn, train_step, timelog)
epoch_write_statements(timelog, init_time, epoch)
start_save_time = time.time()
path = saver.save(cnn.sess, model_dir + 'temp_cnn_eval_epoch%i' %(epoch))
total_save_time = time.time() - start_save_time
timelog.write("\nthe amount of time it takes to save the model at epoch " +
str(epoch) + ": %g" %(total_save_time))
float_entropy_init_time = time.time()
dev_loss = cnn_eval.float_entropy(path, val_X, val_Y, word_vec_array, params)
float_entropy_time = time.time() - float_entropy_init_time
timelog.write('\ndev cross entropy, best cross entropy: %g, %g (it took %g '
%(dev_loss, best_dev_loss, float_entropy_time) + 'seconds to compute)')
timelog.flush()
start_save_best_time = time.time()
if dev_loss < best_dev_loss:
timelog.write('\nnew best model, saving model in ' + (model_dir + 'cnn_final%i' %epoch))
best_dev_loss = dev_loss
path_final = saver.save(cnn.sess, model_dir + 'cnn_final%i' %epoch)
word_embeddings = cnn.word_embeddings.eval(session=cnn.sess)
#early stop if accuracy drops significantly
elif dev_loss > best_dev_loss + .02:
break
total_save_best_time = time.time() - start_save_best_time
timelog.write('\nhow long saving the best model at the end takes: %g' %(total_save_best_time))
remove_chkpt_files(epoch, model_dir)
if (path_final is None or word_embeddings is None):
timelog.write('failure to train, returning current state')
path_final = saver.save(cnn.sess, model_dir + 'cnn_final%i' %epoch)
word_embeddings = cnn.word_embeddings.eval(session=cnn.sess)
return path_final, word_embeddings
|
|
############################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# http://flatcam.org #
# Author: Juan Pablo Caram (c) #
# Date: 12/18/2015 #
# MIT Licence #
# #
# SVG Features supported: #
# * Groups #
# * Rectangles (w/ rounded corners) #
# * Circles #
# * Ellipses #
# * Polygons #
# * Polylines #
# * Lines #
# * Paths #
# * All transformations #
# #
# Reference: www.w3.org/TR/SVG/Overview.html #
############################################################
import xml.etree.ElementTree as ET
import re
import itertools
from svg.path import Path, Line, Arc, CubicBezier, QuadraticBezier, parse_path
from shapely.geometry import LinearRing, LineString, Point
from shapely.affinity import translate, rotate, scale, skew, affine_transform
import numpy as np
import logging
log = logging.getLogger('base2')
def svgparselength(lengthstr):
"""
Parse an SVG length string into a float and a units
string, if any.
:param lengthstr: SVG length string.
:return: Number and units pair.
:rtype: tuple(float, str|None)
"""
integer_re_str = r'[+-]?[0-9]+'
number_re_str = r'(?:[+-]?[0-9]*\.[0-9]+(?:[Ee]' + integer_re_str + ')?' + r')|' + \
r'(?:' + integer_re_str + r'(?:[Ee]' + integer_re_str + r')?)'
length_re_str = r'(' + number_re_str + r')(em|ex|px|in|cm|mm|pt|pc|%)?'
match = re.search(length_re_str, lengthstr)
if match:
return float(match.group(1)), match.group(2)
raise Exception('Cannot parse SVG length: %s' % lengthstr)
def path2shapely(path, res=1.0):
"""
Converts an svg.path.Path into a Shapely
LinearRing or LinearString.
:rtype : LinearRing
:rtype : LineString
:param path: svg.path.Path instance
:param res: Resolution (minimum step along path)
:return: Shapely geometry object
"""
points = []
for component in path:
# Line
if isinstance(component, Line):
start = component.start
x, y = start.real, start.imag
if len(points) == 0 or points[-1] != (x, y):
points.append((x, y))
end = component.end
points.append((end.real, end.imag))
continue
# Arc, CubicBezier or QuadraticBezier
if isinstance(component, Arc) or \
isinstance(component, CubicBezier) or \
isinstance(component, QuadraticBezier):
# How many points to use in the dicrete representation.
length = component.length(res / 10.0)
steps = int(length / res + 0.5)
# solve error when step is below 1,
# it may cause other problems, but LineString needs at least two points
if steps == 0:
steps = 1
frac = 1.0 / steps
# print length, steps, frac
for i in range(steps):
point = component.point(i * frac)
x, y = point.real, point.imag
if len(points) == 0 or points[-1] != (x, y):
points.append((x, y))
end = component.point(1.0)
points.append((end.real, end.imag))
continue
log.warning("I don't know what this is:", component)
continue
if path.closed:
return LinearRing(points)
else:
return LineString(points)
def svgrect2shapely(rect, n_points=32):
"""
Converts an SVG rect into Shapely geometry.
:param rect: Rect Element
:type rect: xml.etree.ElementTree.Element
:return: shapely.geometry.polygon.LinearRing
"""
w = svgparselength(rect.get('width'))[0]
h = svgparselength(rect.get('height'))[0]
x_obj = rect.get('x')
if x_obj is not None:
x = svgparselength(x_obj)[0]
else:
x = 0
y_obj = rect.get('y')
if y_obj is not None:
y = svgparselength(y_obj)[0]
else:
y = 0
rxstr = rect.get('rx')
rystr = rect.get('ry')
if rxstr is None and rystr is None: # Sharp corners
pts = [
(x, y), (x + w, y), (x + w, y + h), (x, y + h), (x, y)
]
else: # Rounded corners
rx = 0.0 if rxstr is None else svgparselength(rxstr)[0]
ry = 0.0 if rystr is None else svgparselength(rystr)[0]
n_points = int(n_points / 4 + 0.5)
t = np.arange(n_points, dtype=float) / n_points / 4
x_ = (x + w - rx) + rx * np.cos(2 * np.pi * (t + 0.75))
y_ = (y + ry) + ry * np.sin(2 * np.pi * (t + 0.75))
lower_right = [(x_[i], y_[i]) for i in range(n_points)]
x_ = (x + w - rx) + rx * np.cos(2 * np.pi * t)
y_ = (y + h - ry) + ry * np.sin(2 * np.pi * t)
upper_right = [(x_[i], y_[i]) for i in range(n_points)]
x_ = (x + rx) + rx * np.cos(2 * np.pi * (t + 0.25))
y_ = (y + h - ry) + ry * np.sin(2 * np.pi * (t + 0.25))
upper_left = [(x_[i], y_[i]) for i in range(n_points)]
x_ = (x + rx) + rx * np.cos(2 * np.pi * (t + 0.5))
y_ = (y + ry) + ry * np.sin(2 * np.pi * (t + 0.5))
lower_left = [(x_[i], y_[i]) for i in range(n_points)]
pts = [(x + rx, y), (x - rx + w, y)] + \
lower_right + \
[(x + w, y + ry), (x + w, y + h - ry)] + \
upper_right + \
[(x + w - rx, y + h), (x + rx, y + h)] + \
upper_left + \
[(x, y + h - ry), (x, y + ry)] + \
lower_left
return LinearRing(pts)
def svgcircle2shapely(circle):
"""
Converts an SVG circle into Shapely geometry.
:param circle: Circle Element
:type circle: xml.etree.ElementTree.Element
:return: Shapely representation of the circle.
:rtype: shapely.geometry.polygon.LinearRing
"""
# cx = float(circle.get('cx'))
# cy = float(circle.get('cy'))
# r = float(circle.get('r'))
cx = svgparselength(circle.get('cx'))[0] # TODO: No units support yet
cy = svgparselength(circle.get('cy'))[0] # TODO: No units support yet
r = svgparselength(circle.get('r'))[0] # TODO: No units support yet
# TODO: No resolution specified.
return Point(cx, cy).buffer(r)
def svgellipse2shapely(ellipse, n_points=64):
"""
Converts an SVG ellipse into Shapely geometry
:param ellipse: Ellipse Element
:type ellipse: xml.etree.ElementTree.Element
:param n_points: Number of discrete points in output.
:return: Shapely representation of the ellipse.
:rtype: shapely.geometry.polygon.LinearRing
"""
cx = svgparselength(ellipse.get('cx'))[0] # TODO: No units support yet
cy = svgparselength(ellipse.get('cy'))[0] # TODO: No units support yet
rx = svgparselength(ellipse.get('rx'))[0] # TODO: No units support yet
ry = svgparselength(ellipse.get('ry'))[0] # TODO: No units support yet
t = np.arange(n_points, dtype=float) / n_points
x = cx + rx * np.cos(2 * np.pi * t)
y = cy + ry * np.sin(2 * np.pi * t)
pts = [(x[i], y[i]) for i in range(n_points)]
return LinearRing(pts)
def svgline2shapely(line):
"""
:param line: Line element
:type line: xml.etree.ElementTree.Element
:return: Shapely representation on the line.
:rtype: shapely.geometry.polygon.LinearRing
"""
x1 = svgparselength(line.get('x1'))[0]
y1 = svgparselength(line.get('y1'))[0]
x2 = svgparselength(line.get('x2'))[0]
y2 = svgparselength(line.get('y2'))[0]
return LineString([(x1, y1), (x2, y2)])
def svgpolyline2shapely(polyline):
ptliststr = polyline.get('points')
points = parse_svg_point_list(ptliststr)
return LineString(points)
def svgpolygon2shapely(polygon):
ptliststr = polygon.get('points')
points = parse_svg_point_list(ptliststr)
return LinearRing(points)
def getsvggeo(node):
"""
Extracts and flattens all geometry from an SVG node
into a list of Shapely geometry.
:param node: xml.etree.ElementTree.Element
:return: List of Shapely geometry
:rtype: list
"""
kind = re.search('(?:\{.*\})?(.*)$', node.tag).group(1)
geo = []
# Recurse
if len(node) > 0:
for child in node:
subgeo = getsvggeo(child)
if subgeo is not None:
geo += subgeo
# Parse
elif kind == 'path':
log.debug("***PATH***")
P = parse_path(node.get('d'))
P = path2shapely(P)
geo = [P]
elif kind == 'rect':
log.debug("***RECT***")
R = svgrect2shapely(node)
geo = [R]
elif kind == 'circle':
log.debug("***CIRCLE***")
C = svgcircle2shapely(node)
geo = [C]
elif kind == 'ellipse':
log.debug("***ELLIPSE***")
E = svgellipse2shapely(node)
geo = [E]
elif kind == 'polygon':
log.debug("***POLYGON***")
poly = svgpolygon2shapely(node)
geo = [poly]
elif kind == 'line':
log.debug("***LINE***")
line = svgline2shapely(node)
geo = [line]
elif kind == 'polyline':
log.debug("***POLYLINE***")
pline = svgpolyline2shapely(node)
geo = [pline]
else:
log.warning("Unknown kind: " + kind)
geo = None
# ignore transformation for unknown kind
if geo is not None:
# Transformations
if 'transform' in node.attrib:
trstr = node.get('transform')
trlist = parse_svg_transform(trstr)
#log.debug(trlist)
# Transformations are applied in reverse order
for tr in trlist[::-1]:
if tr[0] == 'translate':
geo = [translate(geoi, tr[1], tr[2]) for geoi in geo]
elif tr[0] == 'scale':
geo = [scale(geoi, tr[0], tr[1], origin=(0, 0))
for geoi in geo]
elif tr[0] == 'rotate':
geo = [rotate(geoi, tr[1], origin=(tr[2], tr[3]))
for geoi in geo]
elif tr[0] == 'skew':
geo = [skew(geoi, tr[1], tr[2], origin=(0, 0))
for geoi in geo]
elif tr[0] == 'matrix':
geo = [affine_transform(geoi, tr[1:]) for geoi in geo]
else:
raise Exception('Unknown transformation: %s', tr)
return geo
def parse_svg_point_list(ptliststr):
"""
Returns a list of coordinate pairs extracted from the "points"
attribute in SVG polygons and polylines.
:param ptliststr: "points" attribute string in polygon or polyline.
:return: List of tuples with coordinates.
"""
pairs = []
last = None
pos = 0
i = 0
for match in re.finditer(r'(\s*,\s*)|(\s+)', ptliststr.strip(' ')):
val = float(ptliststr[pos:match.start()])
if i % 2 == 1:
pairs.append((last, val))
else:
last = val
pos = match.end()
i += 1
# Check for last element
val = float(ptliststr[pos:])
if i % 2 == 1:
pairs.append((last, val))
else:
log.warning("Incomplete coordinates.")
return pairs
def parse_svg_transform(trstr):
"""
Parses an SVG transform string into a list
of transform names and their parameters.
Possible transformations are:
* Translate: translate(<tx> [<ty>]), which specifies
a translation by tx and ty. If <ty> is not provided,
it is assumed to be zero. Result is
['translate', tx, ty]
* Scale: scale(<sx> [<sy>]), which specifies a scale operation
by sx and sy. If <sy> is not provided, it is assumed to be
equal to <sx>. Result is: ['scale', sx, sy]
* Rotate: rotate(<rotate-angle> [<cx> <cy>]), which specifies
a rotation by <rotate-angle> degrees about a given point.
If optional parameters <cx> and <cy> are not supplied,
the rotate is about the origin of the current user coordinate
system. Result is: ['rotate', rotate-angle, cx, cy]
* Skew: skewX(<skew-angle>), which specifies a skew
transformation along the x-axis. skewY(<skew-angle>), which
specifies a skew transformation along the y-axis.
Result is ['skew', angle-x, angle-y]
* Matrix: matrix(<a> <b> <c> <d> <e> <f>), which specifies a
transformation in the form of a transformation matrix of six
values. matrix(a,b,c,d,e,f) is equivalent to applying the
transformation matrix [a b c d e f]. Result is
['matrix', a, b, c, d, e, f]
Note: All parameters to the transformations are "numbers",
i.e. no units present.
:param trstr: SVG transform string.
:type trstr: str
:return: List of transforms.
:rtype: list
"""
trlist = []
assert isinstance(trstr, str)
trstr = trstr.strip(' ')
integer_re_str = r'[+-]?[0-9]+'
number_re_str = r'(?:[+-]?[0-9]*\.[0-9]+(?:[Ee]' + integer_re_str + ')?' + r')|' + \
r'(?:' + integer_re_str + r'(?:[Ee]' + integer_re_str + r')?)'
# num_re_str = r'[\+\-]?[0-9\.e]+' # TODO: Negative exponents missing
comma_or_space_re_str = r'(?:(?:\s+)|(?:\s*,\s*))'
translate_re_str = r'translate\s*\(\s*(' + \
number_re_str + r')(?:' + \
comma_or_space_re_str + \
r'(' + number_re_str + r'))?\s*\)'
scale_re_str = r'scale\s*\(\s*(' + \
number_re_str + r')' + \
r'(?:' + comma_or_space_re_str + \
r'(' + number_re_str + r'))?\s*\)'
skew_re_str = r'skew([XY])\s*\(\s*(' + \
number_re_str + r')\s*\)'
rotate_re_str = r'rotate\s*\(\s*(' + \
number_re_str + r')' + \
r'(?:' + comma_or_space_re_str + \
r'(' + number_re_str + r')' + \
comma_or_space_re_str + \
r'(' + number_re_str + r'))?\s*\)'
matrix_re_str = r'matrix\s*\(\s*' + \
r'(' + number_re_str + r')' + comma_or_space_re_str + \
r'(' + number_re_str + r')' + comma_or_space_re_str + \
r'(' + number_re_str + r')' + comma_or_space_re_str + \
r'(' + number_re_str + r')' + comma_or_space_re_str + \
r'(' + number_re_str + r')' + comma_or_space_re_str + \
r'(' + number_re_str + r')\s*\)'
while len(trstr) > 0:
match = re.search(r'^' + translate_re_str, trstr)
if match:
trlist.append([
'translate',
float(match.group(1)),
float(match.group(2)) if match.group else 0.0
])
trstr = trstr[len(match.group(0)):].strip(' ')
continue
match = re.search(r'^' + scale_re_str, trstr)
if match:
trlist.append([
'translate',
float(match.group(1)),
float(match.group(2)) if match.group else float(match.group(1))
])
trstr = trstr[len(match.group(0)):].strip(' ')
continue
match = re.search(r'^' + skew_re_str, trstr)
if match:
trlist.append([
'skew',
float(match.group(2)) if match.group(1) == 'X' else 0.0,
float(match.group(2)) if match.group(1) == 'Y' else 0.0
])
trstr = trstr[len(match.group(0)):].strip(' ')
continue
match = re.search(r'^' + rotate_re_str, trstr)
if match:
trlist.append([
'rotate',
float(match.group(1)),
float(match.group(2)) if match.group(2) else 0.0,
float(match.group(3)) if match.group(3) else 0.0
])
trstr = trstr[len(match.group(0)):].strip(' ')
continue
match = re.search(r'^' + matrix_re_str, trstr)
if match:
trlist.append(['matrix'] + [float(x) for x in match.groups()])
trstr = trstr[len(match.group(0)):].strip(' ')
continue
raise Exception("Don't know how to parse: %s" % trstr)
return trlist
if __name__ == "__main__":
tree = ET.parse('tests/svg/drawing.svg')
root = tree.getroot()
ns = re.search(r'\{(.*)\}', root.tag).group(1)
print ns
for geo in getsvggeo(root):
print geo
|
|
# -*- coding: utf-8 -*-
"""
flaskbb.message.views
~~~~~~~~~~~~~~~~~~~~~
The views for the conversations and messages are located in this module.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import uuid
from datetime import datetime
from flask import Blueprint, redirect, request, url_for, flash, abort
from flask_login import login_required, current_user
from flask_babelplus import gettext as _
from flaskbb.extensions import db
from flaskbb.utils.settings import flaskbb_config
from flaskbb.utils.helpers import render_template, format_quote
from flaskbb.message.forms import ConversationForm, MessageForm
from flaskbb.message.models import Conversation, Message
from flaskbb.user.models import User
message = Blueprint("message", __name__)
@message.route("/")
@message.route("/inbox")
@login_required
def inbox():
page = request.args.get('page', 1, type=int)
conversations = Conversation.query.\
filter(
Conversation.user_id == current_user.id,
Conversation.draft == False,
Conversation.trash == False
).\
order_by(Conversation.id.asc()).\
paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False)
message_count = Conversation.query.\
filter(Conversation.user_id == current_user.id).\
count()
return render_template("message/inbox.html", conversations=conversations,
message_count=message_count)
@message.route("/<int:conversation_id>/view", methods=["GET", "POST"])
def view_conversation(conversation_id):
conversation = Conversation.query.filter_by(
id=conversation_id,
user_id=current_user.id
).first_or_404()
if conversation.unread:
conversation.unread = False
current_user.invalidate_cache(permissions=False)
conversation.save()
form = MessageForm()
if form.validate_on_submit():
message_count = Conversation.query.\
filter(Conversation.user_id == current_user.id).\
count()
if message_count >= flaskbb_config["MESSAGE_QUOTA"]:
flash(_("You cannot send any messages anymore because you have"
"reached your message limit."), "danger")
return redirect(url_for("message.view_conversation",
conversation_id=conversation.id))
to_user_id = None
# If the current_user is the user who recieved the message
# then we have to change the id's a bit.
if current_user.id == conversation.to_user_id:
to_user_id = conversation.from_user_id
else:
to_user_id = conversation.to_user_id
form.save(conversation=conversation, user_id=current_user.id)
# save the message in the recievers conversation
old_conv = conversation
conversation = Conversation.query.\
filter(
Conversation.user_id == to_user_id,
Conversation.shared_id == conversation.shared_id
).first()
# user deleted the conversation, start a new conversation with just
# the recieving message
if conversation is None:
conversation = Conversation(
subject=old_conv.subject,
from_user_id=current_user.id,
to_user=to_user_id,
user_id=to_user_id,
shared_id=old_conv.shared_id
)
conversation.save()
form.save(conversation=conversation, user_id=current_user.id,
unread=True)
return redirect(url_for("message.view_conversation",
conversation_id=old_conv.id))
return render_template("message/conversation.html",
conversation=conversation, form=form)
@message.route("/new", methods=["POST", "GET"])
@login_required
def new_conversation():
form = ConversationForm()
to_user = request.args.get("to_user")
message_count = Conversation.query.\
filter(Conversation.user_id == current_user.id).\
count()
if message_count >= flaskbb_config["MESSAGE_QUOTA"]:
flash(_("You cannot send any messages anymore because you have"
"reached your message limit."), "danger")
return redirect(url_for("message.inbox"))
if request.method == "POST":
if "save_message" in request.form and form.validate():
to_user = User.query.filter_by(username=form.to_user.data).first()
shared_id = uuid.uuid4()
form.save(from_user=current_user.id,
to_user=to_user.id,
user_id=current_user.id,
unread=False,
as_draft=True,
shared_id=shared_id)
flash(_("Message saved."), "success")
return redirect(url_for("message.drafts"))
if "send_message" in request.form and form.validate():
to_user = User.query.filter_by(username=form.to_user.data).first()
# this is the shared id between conversations because the messages
# are saved on both ends
shared_id = uuid.uuid4()
# Save the message in the current users inbox
form.save(from_user=current_user.id,
to_user=to_user.id,
user_id=current_user.id,
unread=False,
shared_id=shared_id)
# Save the message in the recievers inbox
form.save(from_user=current_user.id,
to_user=to_user.id,
user_id=to_user.id,
unread=True,
shared_id=shared_id)
to_user.invalidate_cache(permissions=False)
flash(_("Message sent."), "success")
return redirect(url_for("message.sent"))
else:
form.to_user.data = to_user
return render_template("message/message_form.html", form=form,
title=_("Compose Message"))
@message.route("/message/<int:message_id>/raw")
@login_required
def raw_message(message_id):
message = Message.query.filter_by(id=message_id).first_or_404()
# abort if the message was not the current_user's one or the one of the
# recieved ones
if not (message.conversation.from_user_id == current_user.id or
message.conversation.to_user_id == current_user.id):
abort(404)
return format_quote(username=message.user.username,
content=message.message)
@message.route("/<int:conversation_id>/edit", methods=["POST", "GET"])
@login_required
def edit_conversation(conversation_id):
conversation = Conversation.query.filter_by(
id=conversation_id,
user_id=current_user.id
).first_or_404()
if not conversation.draft:
flash(_("You cannot edit a sent message."), "danger")
return redirect(url_for("message.inbox"))
form = ConversationForm()
if request.method == "POST":
if "save_message" in request.form:
to_user = User.query.filter_by(username=form.to_user.data).first()
conversation.draft = True
conversation.to_user_id = to_user.id
conversation.first_message.message = form.message.data
conversation.save()
flash(_("Message saved."), "success")
return redirect(url_for("message.drafts"))
if "send_message" in request.form and form.validate():
to_user = User.query.filter_by(username=form.to_user.data).first()
# Save the message in the recievers inbox
form.save(from_user=current_user.id,
to_user=to_user.id,
user_id=to_user.id,
unread=True,
shared_id=conversation.shared_id)
# Move the message from ``Drafts`` to ``Sent``.
conversation.draft = False
conversation.to_user = to_user
conversation.date_created = datetime.utcnow()
conversation.save()
flash(_("Message sent."), "success")
return redirect(url_for("message.sent"))
else:
form.to_user.data = conversation.to_user.username
form.subject.data = conversation.subject
form.message.data = conversation.first_message.message
return render_template("message/message_form.html", form=form,
title=_("Edit Message"))
@message.route("/<int:conversation_id>/move", methods=["POST"])
@login_required
def move_conversation(conversation_id):
conversation = Conversation.query.filter_by(
id=conversation_id,
user_id=current_user.id
).first_or_404()
conversation.trash = True
conversation.save()
return redirect(url_for("message.inbox"))
@message.route("/<int:conversation_id>/restore", methods=["POST"])
@login_required
def restore_conversation(conversation_id):
conversation = Conversation.query.filter_by(
id=conversation_id,
user_id=current_user.id
).first_or_404()
conversation.trash = False
conversation.save()
return redirect(url_for("message.inbox"))
@message.route("/<int:conversation_id>/delete", methods=["POST"])
@login_required
def delete_conversation(conversation_id):
conversation = Conversation.query.filter_by(
id=conversation_id,
user_id=current_user.id
).first_or_404()
conversation.delete()
return redirect(url_for("message.inbox"))
@message.route("/sent")
@login_required
def sent():
page = request.args.get('page', 1, type=int)
conversations = Conversation.query.\
filter(
Conversation.user_id == current_user.id,
Conversation.draft == False,
Conversation.trash == False,
db.not_(Conversation.to_user_id == current_user.id)
).\
paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False)
message_count = Conversation.query.\
filter(Conversation.user_id == current_user.id).\
count()
return render_template("message/sent.html", conversations=conversations,
message_count=message_count)
@message.route("/draft")
@login_required
def drafts():
page = request.args.get('page', 1, type=int)
conversations = Conversation.query.\
filter(
Conversation.user_id == current_user.id,
Conversation.draft == True,
Conversation.trash == False
).\
paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False)
message_count = Conversation.query.\
filter(Conversation.user_id == current_user.id).\
count()
return render_template("message/drafts.html", conversations=conversations,
message_count=message_count)
@message.route("/trash")
@login_required
def trash():
page = request.args.get('page', 1, type=int)
conversations = Conversation.query.\
filter(
Conversation.user_id == current_user.id,
Conversation.trash == True,
).\
paginate(page, flaskbb_config['TOPICS_PER_PAGE'], False)
message_count = Conversation.query.\
filter(Conversation.user_id == current_user.id).\
count()
return render_template("message/trash.html", conversations=conversations,
message_count=message_count)
|
|
# External Dependencies
from __future__ import division
from numpy import isclose
from svgpathtools import Path
# Internal Dependencies
from misc4rings import isNear
class ClosedRingsOverlapError(Exception):
def __init__(self,mes):
self.mes = mes
def __str__(self):
return repr(self.mes)
def findAppropriateTstep(path, T, stepInPositiveDirection):
# Often the overlapping part of two paths is so small that when removed, pathXpathIntersections, will still consider the two curves as intersecting. This function is to find the smallest (signed) Tstep such that isNear(path(T),path(T+Tstep))==False.
# note: stepInPositiveDirection should be True if Tstep should be positve
# set initial guess as max possible step distance (and set sign of Tstep)
# T = float(T)
if stepInPositiveDirection:
Tstep = 1 - T
else:
Tstep = 0 - T
#check that what we're asking for is possible
if isNear(path.point(T + Tstep), path.point(T)):
raise Exception("An impossible Tstep was asked for.")
#Find a lower bound for Tstep by bisection
maxIts = 200 # limits Tstep to be > (1/2)**200
its = 0
while not isNear(path.point(T + Tstep), path.point(T)) and its < maxIts:
Tstep /= 2
its += 1
if its >= maxIts:
raise Exception("Max iterations reached in bisection to find "
"appropriate Tstep. This could theoretically be ok "
"if you have a curve with a huge number of "
"segments... just increase the maxIts in "
"findAppropriateTstep if you have a such a curve "
"(but I doubt that's the case - so tell Andy).")
return 2 * Tstep
def shortPart(path,T):
if isclose(T, 0) or isclose(T, 1):
return Path()
if T < 1-T: # T is closer to 0
# return cropPath(path,0,T)
return path.cropped(0, T)
else: # T is closer to 1
# return cropPath(path,T,1)
return path.cropped(T, 1)
def longPart(path, T, remove_a_little_extra=True):
if remove_a_little_extra:
if T < 1 - T: # T is closer to 0 than 1
extra = T
if isNear(path.point(T + extra), path.point(T)):
extra = findAppropriateTstep(path, T, True)
else: # T is closer to 1 than 0
extra = 1-T
if isNear(path.point(T+extra), path.point(T)):
extra = -1 * findAppropriateTstep(path, T, False)
else:
extra = 0
if T < 1 - T: #T is closer to 0 than 1
# return cropPath(path,T+extra,1)
return path.cropped(T + extra, 1)
else: #T is closer to 1 than 0
# return cropPath(path,0,T-extra)
return path.cropped(0, T - extra)
def remove_intersections(ipath, jpath, iclosed, jclosed, iringupdated=False, jringupdated=False): #removes one intersection at a time until all are gone
new_ipath = ipath
new_jpath = jpath
#find all intersections
res = ipath.intersect(jpath, justonemode=True)
# res = pathXpathIntersections(ipath, jpath, justonemode=True)
if res:
iT, iseg, i_t = res[0]
jT, jseg, j_t = res[1]
# iT = ipath.t2T(iseg, i_t)
# jT = jpath.t2T(jseg, j_t)
else:
run_again = False
return new_ipath, new_jpath, iringupdated, jringupdated, run_again
#Now find crop the path (if one ring is closed, crop the other ring)
if iclosed and jclosed: #then crop jpath
raise ClosedRingsOverlapError("")
elif jclosed: #jring closed so crop iring
new_ipath = longPart(ipath, iT)
new_jpath = jpath
iringupdated = True
elif iclosed: #iring closed so crop jring
new_jpath = longPart(jpath, jT)
new_ipath = ipath
jringupdated = True
else: #both rings are incomplete
if iT in [0, 1]:
new_ipath = longPart(ipath, iT)
new_jpath = jpath
iringupdated = True
elif jT in [0, 1]:
new_jpath = longPart(jpath, jT)
new_ipath = ipath
jringupdated = True
elif shortPart(ipath, iT).length() < shortPart(jpath, jT).length():
new_ipath = longPart(ipath, iT)
new_jpath = jpath
iringupdated = True
else:
new_jpath = longPart(jpath, jT)
new_ipath = ipath
jringupdated = True
run_again = True # might be more intersections to remove, so run again
return new_ipath, new_jpath, iringupdated, jringupdated, run_again
def remove_intersections_from_rings(rings):
from options4rings import intersection_removal_progress_output_on
from time import time as current_time
from andysmod import n_choose_k, format_time
[r.record_wasClosed() for r in rings] # record the current closure status
#for output
num_segments_in_ring_list = sum(len(r.path) for r in rings)
num_seg_pairs2check = n_choose_k(num_segments_in_ring_list, 2)
num_seg_pairs_checked = 0
current_percent_complete = 0
start_time = current_time()
count = 0
overlappingClosedRingPairs = []
for i in range(len(rings)):
iring = rings[i]
ipath = iring.path
new_ipath = ipath
iclosed = iring.wasClosed
iringupdated = False
num_segs_in_ipath = len(ipath) # for progress output
for j in range(i+1, len(rings)):
if rings[j].maxR < rings[i].minR or rings[i].maxR < rings[j].minR:
continue
jring = rings[j]
jpath = jring.path
new_jpath = jpath
jclosed = jring.wasClosed
jringupdated = False
num_segs_in_jpath = len(jpath) #for progress output
# while loop to remove intersections between iring and jring (if any exist)
run_again = True
maxits = 20
its = 0
while run_again and its < maxits:
try:
args = (new_ipath, new_jpath, iclosed, jclosed)
res = remove_intersections(*args, iringupdated=iringupdated, jringupdated=jringupdated)
new_ipath, new_jpath, iringupdated, jringupdated, run_again = res
except ClosedRingsOverlapError:
overlappingClosedRingPairs.append((i, j))
run_again = False
pass
its += 1
# raise Exception if while loop terminateded due to reaching max allowed iteratations
if its >= maxits:
# remove_intersections_from_rings([iring, jring])
# print(iring.xml)
# print(jring.xml)
raise Exception("Max iterations reached while removing intersections. Either the above two rings have over 20 intersections or this is a bug.")
# Output progess
if intersection_removal_progress_output_on.b:
num_seg_pairs_checked += num_segs_in_jpath*num_segs_in_ipath
if 100 * num_seg_pairs_checked / num_seg_pairs2check > int(100 * current_percent_complete):
current_percent_complete = num_seg_pairs_checked / num_seg_pairs2check
time_elapsed = current_time() - start_time
estimated_time_remaining = (1-current_percent_complete) * time_elapsed / current_percent_complete
stuff = (int(100 * current_percent_complete),
format_time(estimated_time_remaining),
format_time(time_elapsed))
mes = ("[%s%% complete || Est. Remaining Time = %s || "
"Elapsed Time = %s]\r" % stuff)
intersection_removal_progress_output_on.dprint(mes)
# update jring if jpath was trimmed
if jringupdated:
jring.updatePath(new_jpath)
count += 1
# update iring if ipath was trimmed
if iringupdated:
iring.updatePath(new_ipath)
count += 1
return rings, count, overlappingClosedRingPairs
|
|
from eventlet.support import get_errno
from eventlet.hubs import trampoline
BUFFER_SIZE = 4096
import errno
import os
import socket
from socket import socket as _original_socket
import sys
import time
import warnings
__all__ = ['GreenSocket', 'GreenPipe', 'shutdown_safe']
CONNECT_ERR = set((errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK))
CONNECT_SUCCESS = set((0, errno.EISCONN))
# Emulate _fileobject class in 3.x implementation
# Eventually this internal socket structure could be replaced with makefile calls.
try:
_fileobject = socket._fileobject
except AttributeError:
def _fileobject(sock, *args, **kwargs):
return _original_socket.makefile(sock, *args, **kwargs)
def socket_connect(descriptor, address):
"""
Attempts to connect to the address, returns the descriptor if it succeeds,
returns None if it needs to trampoline, and raises any exceptions.
"""
err = descriptor.connect_ex(address)
if err in CONNECT_ERR:
return None
if err not in CONNECT_SUCCESS:
raise socket.error(err, errno.errorcode[err])
return descriptor
def socket_accept(descriptor):
"""
Attempts to accept() on the descriptor, returns a client,address tuple
if it succeeds; returns None if it needs to trampoline, and raises
any exceptions.
"""
try:
return descriptor.accept()
except socket.error, e:
if get_errno(e) == errno.EWOULDBLOCK:
return None
raise
if sys.platform[:3]=="win":
# winsock sometimes throws ENOTCONN
SOCKET_BLOCKING = set((errno.EWOULDBLOCK,))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN))
else:
# oddly, on linux/darwin, an unconnected socket is expected to block,
# so we treat ENOTCONN the same as EWOULDBLOCK
SOCKET_BLOCKING = set((errno.EWOULDBLOCK, errno.ENOTCONN))
SOCKET_CLOSED = set((errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE))
def set_nonblocking(fd):
"""
Sets the descriptor to be nonblocking. Works on many file-like
objects as well as sockets. Only sockets can be nonblocking on
Windows, however.
"""
try:
setblocking = fd.setblocking
except AttributeError:
# fd has no setblocking() method. It could be that this version of
# Python predates socket.setblocking(). In that case, we can still set
# the flag "by hand" on the underlying OS fileno using the fcntl
# module.
try:
import fcntl
except ImportError:
# Whoops, Windows has no fcntl module. This might not be a socket
# at all, but rather a file-like object with no setblocking()
# method. In particular, on Windows, pipes don't support
# non-blocking I/O and therefore don't have that method. Which
# means fcntl wouldn't help even if we could load it.
raise NotImplementedError("set_nonblocking() on a file object "
"with no setblocking() method "
"(Windows pipes don't support non-blocking I/O)")
# We managed to import fcntl.
fileno = fd.fileno()
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
fcntl.fcntl(fileno, fcntl.F_SETFL, flags | os.O_NONBLOCK)
else:
# socket supports setblocking()
setblocking(0)
try:
from socket import _GLOBAL_DEFAULT_TIMEOUT
except ImportError:
_GLOBAL_DEFAULT_TIMEOUT = object()
class GreenSocket(object):
"""
Green version of socket.socket class, that is intended to be 100%
API-compatible.
"""
def __init__(self, family_or_realsock=socket.AF_INET, *args, **kwargs):
if isinstance(family_or_realsock, (int, long)):
fd = _original_socket(family_or_realsock, *args, **kwargs)
else:
fd = family_or_realsock
assert not args, args
assert not kwargs, kwargs
# import timeout from other socket, if it was there
try:
self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
except AttributeError:
self._timeout = socket.getdefaulttimeout()
set_nonblocking(fd)
self.fd = fd
# when client calls setblocking(0) or settimeout(0) the socket must
# act non-blocking
self.act_non_blocking = False
@property
def _sock(self):
return self
#forward unknown attibutes to fd
# cache the value for future use.
# I do not see any simple attribute which could be changed
# so caching everything in self is fine,
# If we find such attributes - only attributes having __get__ might be cahed.
# For now - I do not want to complicate it.
def __getattr__(self, name):
attr = getattr(self.fd, name)
setattr(self, name, attr)
return attr
def accept(self):
if self.act_non_blocking:
return self.fd.accept()
fd = self.fd
while True:
res = socket_accept(fd)
if res is not None:
client, addr = res
set_nonblocking(client)
return type(self)(client), addr
trampoline(fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
def connect(self, address):
if self.act_non_blocking:
return self.fd.connect(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
trampoline(fd, write=True)
else:
end = time.time() + self.gettimeout()
while True:
if socket_connect(fd, address):
return
if time.time() >= end:
raise socket.timeout("timed out")
trampoline(fd, write=True, timeout=end-time.time(),
timeout_exc=socket.timeout("timed out"))
def connect_ex(self, address):
if self.act_non_blocking:
return self.fd.connect_ex(address)
fd = self.fd
if self.gettimeout() is None:
while not socket_connect(fd, address):
try:
trampoline(fd, write=True)
except socket.error, ex:
return get_errno(ex)
else:
end = time.time() + self.gettimeout()
while True:
try:
if socket_connect(fd, address):
return 0
if time.time() >= end:
raise socket.timeout(errno.EAGAIN)
trampoline(fd, write=True, timeout=end-time.time(),
timeout_exc=socket.timeout(errno.EAGAIN))
except socket.error, ex:
return get_errno(ex)
def dup(self, *args, **kw):
sock = self.fd.dup(*args, **kw)
set_nonblocking(sock)
newsock = type(self)(sock)
newsock.settimeout(self.gettimeout())
return newsock
def makefile(self, *args, **kw):
return _fileobject(self.dup(), *args, **kw)
def makeGreenFile(self, *args, **kw):
warnings.warn("makeGreenFile has been deprecated, please use "
"makefile instead", DeprecationWarning, stacklevel=2)
return self.makefile(*args, **kw)
def recv(self, buflen, flags=0):
fd = self.fd
if self.act_non_blocking:
return fd.recv(buflen, flags)
while True:
try:
return fd.recv(buflen, flags)
except socket.error, e:
if get_errno(e) in SOCKET_BLOCKING:
pass
elif get_errno(e) in SOCKET_CLOSED:
return ''
else:
raise
trampoline(fd,
read=True,
timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
def recvfrom(self, *args):
if not self.act_non_blocking:
trampoline(self.fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return self.fd.recvfrom(*args)
def recvfrom_into(self, *args):
if not self.act_non_blocking:
trampoline(self.fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return self.fd.recvfrom_into(*args)
def recv_into(self, *args):
if not self.act_non_blocking:
trampoline(self.fd, read=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return self.fd.recv_into(*args)
def send(self, data, flags=0):
fd = self.fd
if self.act_non_blocking:
return fd.send(data, flags)
# blocking socket behavior - sends all, blocks if the buffer is full
total_sent = 0
len_data = len(data)
while 1:
try:
total_sent += fd.send(data[total_sent:], flags)
except socket.error, e:
if get_errno(e) not in SOCKET_BLOCKING:
raise
if total_sent == len_data:
break
trampoline(self.fd, write=True, timeout=self.gettimeout(),
timeout_exc=socket.timeout("timed out"))
return total_sent
def sendall(self, data, flags=0):
tail = self.send(data, flags)
len_data = len(data)
while tail < len_data:
tail += self.send(data[tail:], flags)
def sendto(self, *args):
trampoline(self.fd, write=True)
return self.fd.sendto(*args)
def setblocking(self, flag):
if flag:
self.act_non_blocking = False
self._timeout = None
else:
self.act_non_blocking = True
self._timeout = 0.0
def settimeout(self, howlong):
if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
self.setblocking(True)
return
try:
f = howlong.__float__
except AttributeError:
raise TypeError('a float is required')
howlong = f()
if howlong < 0.0:
raise ValueError('Timeout value out of range')
if howlong == 0.0:
self.setblocking(howlong)
else:
self._timeout = howlong
def gettimeout(self):
return self._timeout
class _SocketDuckForFd(object):
""" Class implementing all socket method used by _fileobject in cooperative manner using low level os I/O calls."""
def __init__(self, fileno):
self._fileno = fileno
@property
def _sock(self):
return self
def fileno(self):
return self._fileno
def recv(self, buflen):
while True:
try:
data = os.read(self._fileno, buflen)
return data
except OSError, e:
if get_errno(e) != errno.EAGAIN:
raise IOError(*e.args)
trampoline(self, read=True)
def sendall(self, data):
len_data = len(data)
os_write = os.write
fileno = self._fileno
try:
total_sent = os_write(fileno, data)
except OSError, e:
if get_errno(e) != errno.EAGAIN:
raise IOError(*e.args)
total_sent = 0
while total_sent <len_data:
trampoline(self, write=True)
try:
total_sent += os_write(fileno, data[total_sent:])
except OSError, e:
if get_errno(e) != errno. EAGAIN:
raise IOError(*e.args)
def __del__(self):
try:
os.close(self._fileno)
except:
# os.close may fail if __init__ didn't complete (i.e file dscriptor passed to popen was invalid
pass
def __repr__(self):
return "%s:%d" % (self.__class__.__name__, self._fileno)
def _operationOnClosedFile(*args, **kwargs):
raise ValueError("I/O operation on closed file")
class GreenPipe(_fileobject):
"""
GreenPipe is a cooperative replacement for file class.
It will cooperate on pipes. It will block on regular file.
Differneces from file class:
- mode is r/w property. Should re r/o
- encoding property not implemented
- write/writelines will not raise TypeError exception when non-string data is written
it will write str(data) instead
- Universal new lines are not supported and newlines property not implementeded
- file argument can be descriptor, file name or file object.
"""
def __init__(self, f, mode='r', bufsize=-1):
if not isinstance(f, (basestring, int, file)):
raise TypeError('f(ile) should be int, str, unicode or file, not %r' % f)
if isinstance(f, basestring):
f = open(f, mode, 0)
if isinstance(f, int):
fileno = f
self._name = "<fd:%d>" % fileno
else:
fileno = os.dup(f.fileno())
self._name = f.name
if f.mode != mode:
raise ValueError('file.mode %r does not match mode parameter %r' % (f.mode, mode))
self._name = f.name
f.close()
super(GreenPipe, self).__init__(_SocketDuckForFd(fileno), mode, bufsize)
set_nonblocking(self)
self.softspace = 0
@property
def name(self): return self._name
def __repr__(self):
return "<%s %s %r, mode %r at 0x%x>" % (
self.closed and 'closed' or 'open',
self.__class__.__name__,
self.name,
self.mode,
(id(self) < 0) and (sys.maxint +id(self)) or id(self))
def close(self):
super(GreenPipe, self).close()
for method in ['fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', 'xreadlines', '__iter__', 'writelines']:
setattr(self, method, _operationOnClosedFile)
if getattr(file, '__enter__', None):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def xreadlines(self, buffer):
return iterator(self)
def readinto(self, buf):
data = self.read(len(buf)) #FIXME could it be done without allocating intermediate?
n = len(data)
try:
buf[:n] = data
except TypeError, err:
if not isinstance(buf, array.array):
raise err
buf[:n] = array.array('c', data)
return n
def _get_readahead_len(self):
try:
return len(self._rbuf.getvalue()) # StringIO in 2.5
except AttributeError:
return len(self._rbuf) # str in 2.4
def _clear_readahead_buf(self):
len = self._get_readahead_len()
if len>0:
self.read(len)
def tell(self):
self.flush()
try:
return os.lseek(self.fileno(), 0, 1) - self._get_readahead_len()
except OSError, e:
raise IOError(*e.args)
def seek(self, offset, whence=0):
self.flush()
if whence == 1 and offset==0: # tell synonym
return self.tell()
if whence == 1: # adjust offset by what is read ahead
offset -= self.get_readahead_len()
try:
rv = os.lseek(self.fileno(), offset, whence)
except OSError, e:
raise IOError(*e.args)
else:
self._clear_readahead_buf()
return rv
if getattr(file, "truncate", None): # not all OSes implement truncate
def truncate(self, size=-1):
self.flush()
if size ==-1:
size = self.tell()
try:
rv = os.ftruncate(self.fileno(), size)
except OSError, e:
raise IOError(*e.args)
else:
self.seek(size) # move position&clear buffer
return rv
def isatty(self):
try:
return os.isatty(self.fileno())
except OSError, e:
raise IOError(*e.args)
# import SSL module here so we can refer to greenio.SSL.exceptionclass
try:
from OpenSSL import SSL
except ImportError:
# pyOpenSSL not installed, define exceptions anyway for convenience
class SSL(object):
class WantWriteError(object):
pass
class WantReadError(object):
pass
class ZeroReturnError(object):
pass
class SysCallError(object):
pass
def shutdown_safe(sock):
""" Shuts down the socket. This is a convenience method for
code that wants to gracefully handle regular sockets, SSL.Connection
sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.6
interchangeably. Both types of ssl socket require a shutdown() before
close, but they have different arity on their shutdown method.
Regular sockets don't need a shutdown before close, but it doesn't hurt.
"""
try:
try:
# socket, ssl.SSLSocket
return sock.shutdown(socket.SHUT_RDWR)
except TypeError:
# SSL.Connection
return sock.shutdown()
except socket.error, e:
# we don't care if the socket is already closed;
# this will often be the case in an http server context
if get_errno(e) != errno.ENOTCONN:
raise
|
|
"""DDNS without TSIG"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import srv_control
import misc
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.notsig
@pytest.mark.forward_reverse_add
def test_ddns4_notsig_forw_and_rev_add_success_hostname():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(20)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'aa.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'aa.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.notsig
@pytest.mark.forward_reverse_add
def test_ddns4_notsig_forw_and_rev_add_fail_hostname():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(20)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'aa.four.exae.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'aa.four.exae.com')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.notsig
@pytest.mark.forward_reverse_add
def test_ddns4_notsig_forw_and_rev_notenabled_hostname():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', False)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(20)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'aa.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'aa.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.notsig
@pytest.mark.forward_update
def test_ddns4_notsig_forw_and_rev_update_success_hostname():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.10')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(20)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'aa.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'aa.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
misc.test_setup()
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_some_data('leases')
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.11-192.168.50.11')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
misc.test_procedure()
srv_msg.client_requests_option(1)
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.11')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'aa.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'aa.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('aa.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.11')
srv_msg.dns_option_content('ANSWER', 'rrname', 'aa.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('11.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'aa.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '11.50.168.192.in-addr.arpa.')
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.notsig
@pytest.mark.forward_reverse_add
def test_ddns4_notsig_forw_and_rev_two_dhci_hostname():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.11')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(20)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('client1.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('client2.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'client1.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'client1.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('client1.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'client1.four.example.com.')
# Client 2 add
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:12')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:12')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.11')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'client2.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'client2.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('client2.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.11')
srv_msg.dns_option_content('ANSWER', 'rrname', 'client2.four.example.com.')
@pytest.mark.v4
@pytest.mark.ddns
@pytest.mark.notsig
@pytest.mark.forward_reverse_add
def test_ddns4_notsig_forw_and_rev_dhci_conflicts_hostname():
misc.test_setup()
srv_control.config_srv_subnet('192.168.50.0/24', '192.168.50.10-192.168.50.11')
srv_control.add_ddns_server('127.0.0.1', '53001')
srv_control.add_ddns_server_options('enable-updates', True)
srv_control.add_ddns_server_options('generated-prefix', 'four')
srv_control.add_ddns_server_options('qualifying-suffix', 'example.com')
srv_control.add_forward_ddns('four.example.com.', 'EMPTY_KEY')
srv_control.add_reverse_ddns('50.168.192.in-addr.arpa.', 'EMPTY_KEY')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
srv_control.use_dns_set_number(20)
srv_control.start_srv('DNS', 'started')
misc.test_procedure()
srv_msg.dns_question_record('client1.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('client2.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('11.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:11')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.10')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'client1.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.10')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'client1.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('client1.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'client1.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'client1.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
# Client 2 add
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:12')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:12')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.11')
srv_msg.client_requests_option(1)
srv_msg.client_does_include_with_value('hostname', 'client2.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'client2.four.example.com')
misc.test_procedure()
srv_msg.dns_question_record('client2.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.11')
srv_msg.dns_option_content('ANSWER', 'rrname', 'client2.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('11.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'client2.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '11.50.168.192.in-addr.arpa.')
# Client 2 try to update client's 1 domain
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:12')
srv_msg.client_send_msg('DISCOVER')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'OFFER')
srv_msg.response_check_include_option(1)
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'chaddr', '00:00:00:00:00:12')
srv_msg.client_copy_option('server_id')
srv_msg.client_does_include_with_value('requested_addr', '192.168.50.11')
srv_msg.client_does_include_with_value('hostname', 'client1.four.example.com')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ACK')
srv_msg.response_check_content('yiaddr', '192.168.50.11')
srv_msg.response_check_include_option(1)
srv_msg.response_check_option_content(1, 'value', '255.255.255.0')
srv_msg.response_check_include_option(12)
srv_msg.response_check_option_content(12, 'value', 'client1.four.example.com')
# address and domain name should not be changed!
misc.test_procedure()
srv_msg.dns_question_record('client1.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', '192.168.50.10')
srv_msg.dns_option_content('ANSWER', 'rrname', 'client1.four.example.com.')
misc.test_procedure()
srv_msg.dns_question_record('10.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER')
srv_msg.dns_option_content('ANSWER', 'rdata', 'client1.four.example.com.')
srv_msg.dns_option_content('ANSWER', 'rrname', '10.50.168.192.in-addr.arpa.')
misc.test_procedure()
srv_msg.dns_question_record('client2.four.example.com', 'A', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
misc.test_procedure()
srv_msg.dns_question_record('11.50.168.192.in-addr.arpa.', 'PTR', 'IN')
srv_msg.client_send_dns_query()
misc.pass_criteria()
srv_msg.send_wait_for_query('MUST')
srv_msg.dns_option('ANSWER', expect_include=False)
|
|
# Copyright 2009-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
"""
from __future__ import unicode_literals, print_function
from builtins import str
from django.conf import settings
from lino.utils import curry
from lino.core.frames import Frame
from lino.core.requests import InstanceAction
from lino.core.requests import VirtualRow
from lino.core.actions import ShowEmptyTable, Action
from lino.core import fields
from lino.modlib.printing.mixins import Printable
from lino.modlib.printing.mixins import DirectPrintAction
# from lino.modlib.printing.choicelists import SimpleBuildMethod
from etgen.html import E
class EmptyTableRow(VirtualRow, Printable):
"""
Base class for virtual rows of an :class:`EmptyTable`.
An EmptyTableRow instance
"""
class Meta(object):
abstract = True
pk = -99998
def __init__(self, table, **kw):
self._table = table
VirtualRow.__init__(self, **kw)
def __str__(self):
return str(self._table.label)
def before_printable_build(self, bm):
pass
def filename_root(self):
return self._table.app_label + '.' + self._table.__name__
# def get_print_language(self):
# # same as Printable.get_print_language
# return settings.SITE.DEFAULT_LANGUAGE.django_code
# def get_printable_context(self, ar=None, **kw):
# # same as Model.get_printable_context
# kw = ar.get_printable_context(**kw)
# kw.update(this=self) # preferred in new templates
# kw.update(language=self.get_print_language() \
# or settings.SITE.DEFAULT_LANGUAGE.django_code)
# return kw
def get_template_groups(self):
return self._table.get_template_groups()
def get_print_templates(self, *args):
"""Overrides
:meth:`lino.modlib.printing.mixins.Printable.get_print_templates`
"""
return self._table.get_print_templates(*args)
def get_build_method(self):
return self._table.build_method \
or super(EmptyTableRow, self).get_build_method()
def get_build_options(self, bm, **opts):
# header_center
return self._table.get_build_options(bm, **opts)
def get_subtitle(self, ar):
return ', '.join(self._table.get_title_tags(ar))
def __getattr__(self, name):
"""
Since there is only one EmptyTableRow class, we simulate a
getter here by manually creating an InstanceAction.
"""
# if name not in ('get_story'):
# raise Exception("20170910 %s" % name)
v = getattr(self._table, name)
if isinstance(v, Action):
return InstanceAction(v, self._table, self, None)
# 20130525 dd.Report calls `get_story` on `self`, not on the `cls`
if callable(v):
return curry(v, self)
#~ return v
#~ raise Exception("")
raise AttributeError(
"EmptyTableRow on %s has no action and no callable '%s'" % (
self._table, name))
class EmptyTable(Frame):
"""
A "Table" that has exactly one virtual row and thus is visible
only using a Detail view on that row.
Subclassed by :class:`lino.modlib.about.About` and
:class:`Report`.
"""
_detail_action_class = ShowEmptyTable
#~ debug_permissions = True
#~ has_navigator = False
#~ hide_top_toolbar = True
abstract = True
hide_navigator = True
default_list_action_name = 'show'
# default_elem_action_name = 'show'
display_mode = None # for Actor
build_method = None
@classmethod
def get_default_action(cls):
return cls.detail_action
@classmethod
def get_template_groups(self):
return [self.app_label + '/' + self.__name__]
@classmethod
def get_print_templates(self, bm, action):
"""Called from EmptyTableRow. """
return [bm.get_default_template(self)]
@classmethod
def get_build_options(self, bm, **opts):
return opts
@classmethod
def create_instance(self, ar, **kw):
if self.parameters:
kw.update(ar.param_values)
obj = EmptyTableRow(self, **kw)
kw = ar.ah.store.row2dict(ar, obj)
obj._data = kw
obj.update(**kw)
return obj
@classmethod
def wildcard_data_elems(self):
return self.parameters.values()
@classmethod
def get_data_elem(self, name):
de = super(EmptyTable, self).get_data_elem(name)
if de is not None:
return de
de = self.parameters.get(name, None)
if de is not None:
return de
a = name.split('.')
if len(a) == 2:
return getattr(getattr(settings.SITE.models, a[0]), a[1])
class Report(EmptyTable):
"""
A special kind of :class:`EmptyTable` used to create "reports". A report
is a series of headings, paragraphs and tables combined into a single
printable and previewable document.
When subclassing this, application code must either define
:attr:`report_items` or implement an alternative :meth:`get_story`.
Usage examples:
:class:`lino_xl.lib.courses.StatusReport`
:class:`lino_xl.lib.ledger.Situation`
:class:`lino_xl.lib.ledger.ActivityReport`
:class:`lino_welfare.modlib.integ.ActivityReport`
Note that there is also :class:`lino.modlib.users.UserPlan` and
:class:`lino.mixins.Story` for more expensive "reports" where you use
cached data :class:`lino_xl.lib.sheets.Report`.
"""
detail_layout = "body"
abstract = True
do_print = DirectPrintAction()
# go_button = ExplicitRefresh()
report_items = None
""" """
# @classmethod
# def request(self, **kw):
# """Return an action request on this actor.
# """
# kw.update(actor=self)
# return ActionRequest(**kw)
@classmethod
def get_template_groups(self):
return ['report', self.app_label + '/' + self.__name__]
# @classmethod
# def get_print_templates(self, bm, action):
# """Called from EmptyTableRow.
# Overrides
# :meth:`lino.modlib.printing.mixins.Printable.get_print_templates`
# """
# if isinstance(bm, SimpleBuildMethod):
# return ['Report'+bm.template_ext]
# return [bm.get_default_template(self)]
# return ['Report'+bm.template_ext, bm.get_default_template(self)]
@classmethod
def get_build_options(self, bm, **opts):
if bm.templates_name == 'wk':
opts['footer-left'] = "<p>Footer [page]</p>"
return opts
# @classmethod
# def get_title_base(self, ar):
# return self.title or self.label
@classmethod
def get_title(self, ar):
return self.title or self.label
@fields.virtualfield(fields.HtmlBox())
def body(cls, self, ar):
ar.master_instance = self
return ar.story2html(self.get_story(ar))
@classmethod
def as_appy_pod_xml(cls, self, apr):
chunks = tuple(apr.story2odt(
self.get_story(apr.ar), master_instance=self))
return str('').join(chunks) # must be utf8 encoded
@classmethod
def get_story(cls, self, ar):
"""
Yield a sequence of story items. Every item can be (1) an
ElementTree element or (2) a table or (3) an action request.
"""
# cls.check_params(cls.param_values)
if cls.report_items is None:
raise Exception("{0} has no report_items".format(cls))
for A in cls.report_items:
yield E.h2(str(A.label))
# if A.help_text:
# yield E.p(str(A.help_text))
yield A
@classmethod
def to_rst(self, ar, column_names=None, **kwargs):
raise Exception("To be replaced by rt.show()")
# obj = self.create_instance(ar)
# return """\
# .. raw:: html
# %s
# """ % tostring(obj.body).replace('\n', ' ')
|
|
# Copyright 2012 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the logic module of aeta."""
__author__ = '[email protected] (Robert Schuppenies)'
# Disable checking; pylint:disable-msg=C0111,C0103,W0212
# pylint:disable-msg=C0103,R0902,R0201,R0904
# - docstrings
# - access to protected members
# - too many public methods
# - setUp() and tearDown() method names
# - too many instance attributes
# - method could be a function
import copy
import inspect
import os
import sys
import types
import unittest
from aeta import config
from aeta import logic
from aeta import models
from tests import utils
_REL_PATH = 'a/relative/path'
_NATIVE_REL_PATH = os.sep.join(_REL_PATH.split('/'))
_DEEP_ROOT_PATH = os.sep.join(['a', 'deep', 'root', 'path'])
if sys.platform.startswith('win'):
_DEEP_ROOT_PATH = os.path.join('C:', _DEEP_ROOT_PATH)
else:
_DEEP_ROOT_PATH = os.sep + _DEEP_ROOT_PATH
# Pattern of test modules.
TEST_MODULE_PATTERN = r'^test_[\w]+$'
class GetAbsPathFromPackagenameTest(unittest.TestCase,
utils.MockAttributeMixin):
"""Tests for the get_abs_path_from_package_name function."""
# pylint:disable-msg=C0103
def setUp(self):
self.packagename = 'package.subpackage'
self.abs_packagepath = os.path.join(_DEEP_ROOT_PATH, 'package',
'subpackage')
# pylint:disable-msg=C0103
def tearDown(self):
self.tear_down_attributes()
def test_invalid_input(self):
self.assertRaises(TypeError, logic.get_abs_path_from_package_name, None)
def test_empty_package_name(self):
self.assertEqual(None, logic.get_abs_path_from_package_name(''))
def test_with_non_existing_package(self):
@self.mock(logic)
def load_module_from_module_name(name, errors, reload_mod,
include_test_functions=True):
return None
self.assertEqual(None,
logic.get_abs_path_from_package_name(self.packagename))
def test_with_existing_package(self):
# pylint: disable-msg=W0613
@self.mock(logic)
def load_module_from_module_name(packagename, errors, reload_mod,
include_test_functions=True):
return types.ModuleType('foo')
@self.mock(inspect)
def getfile(module):
return '%s/__init__.py' % _DEEP_ROOT_PATH
self.assertEqual(_DEEP_ROOT_PATH + os.sep,
logic.get_abs_path_from_package_name(''))
def test_init_module(self):
"""Ensure that the __init__ module is not considered a package."""
@self.mock(logic)
def load_module_from_module_name(packagename, errors, reload_mod,
include_test_functions=True):
return types.ModuleType('package.__init__')
@self.mock(inspect)
def getfile(module):
return '%s/__init__.py' % _DEEP_ROOT_PATH
self.assertEqual(None, logic.get_abs_path_from_package_name(''))
class GetRootRelativePathTest(unittest.TestCase):
"""Tests for the get_root_relative_path function."""
# pylint: disable-msg=C0103
def setUp(self):
self.orig_isdir = os.path.isdir
# pylint: disable-msg=C0103
def tearDown(self):
os.path.isdir = self.orig_isdir
def test_invalid_input(self):
self.assertRaises(TypeError, logic.get_root_relative_path, None, '')
self.assertRaises(TypeError, logic.get_root_relative_path, '', None)
def test_no_root(self):
self.assertEqual(None, logic.get_root_relative_path('', ''))
self.assertEqual(None, logic.get_root_relative_path('/', ''))
def test_with_root(self):
os.path.isdir = lambda x: True
if sys.platform.startswith('win'):
self.assertEqual(None, logic.get_root_relative_path('/', 'C:/'))
self.assertEqual('', logic.get_root_relative_path('C:/', 'C:/'))
else:
self.assertEqual('', logic.get_root_relative_path('/', '/'))
def test_invalid_relative_path(self):
os.path.isdir = lambda x: True
self.assertEqual(None,
logic.get_root_relative_path('',
'/a/deep/rootdirectory'))
self.assertEqual(None, logic.get_root_relative_path('/',
'/a/deep/rootdirectory'))
self.assertEqual(None, logic.get_root_relative_path('/',
'/a/deep/rootdirectory/'))
self.assertEqual(None, logic.get_root_relative_path('some/other/path',
'/a/deep/rootdirectory'))
self.assertEqual('', logic.get_root_relative_path(_DEEP_ROOT_PATH,
_DEEP_ROOT_PATH))
def test_valid_relative_path(self):
os.path.isdir = lambda x: True
self.assertEqual(_REL_PATH, logic.get_root_relative_path(
_DEEP_ROOT_PATH + os.sep + _REL_PATH,
_DEEP_ROOT_PATH))
# acceptable name - pylint: disable-msg=C0103
def test_relative_path_partly_included_in_root(self):
os.path.isdir = lambda x: True
path = '/home/foobar/hello'
root = '/home/foo'
self.assertEqual(None, logic.get_root_relative_path(path, root))
class LoadModuleFromModuleNameTest(unittest.TestCase, utils.TestDataMixin):
"""Tests for the load_module_from_module_name function."""
def setUp(self):
self.setup_test_data()
def tearDown(self):
self.tear_down_test_data()
def test_invalid_input(self):
self.assertRaises(TypeError, logic.load_module_from_module_name,
None, [])
self.assertRaises(TypeError, logic.load_module_from_module_name,
'', 5)
self.assertRaises(TypeError, logic.load_module_from_module_name,
'', [], reload_mod=None)
self.assertRaises(TypeError, logic.load_module_from_module_name,
'', [], include_import_error=None)
self.assertRaises(TypeError, logic.load_module_from_module_name,
'', [], include_test_functions=None)
def test_empty_module_name(self):
self.assertEqual(None, logic.load_module_from_module_name('', []))
def test_invalid_package(self):
result_errors = []
result_module = logic.load_module_from_module_name('--', result_errors)
self.assertEqual(None, result_module)
self.assertEqual([], result_errors)
def test_include_import_error(self):
result_errors = []
result_module = logic.load_module_from_module_name(
'--', result_errors, include_import_error=True)
self.assertEqual(None, result_module)
self.assertEqual(1, len(result_errors))
self.assertEqual('--', result_errors[0][0])
def test_valid_package(self):
result_errors = []
result_module = logic.load_module_from_module_name(self.test_package_name,
result_errors)
self.assertNotEqual(None, result_module)
self.assertEqual([], result_errors)
def test_broken_module(self):
modulename = self.test_package_name + '.test_brokenmodule'
result_errors = []
result_module = logic.load_module_from_module_name(modulename,
result_errors)
self.assertEqual(None, result_module)
self.assertEqual(1, len(result_errors))
def test_valid_module(self):
modulename = self.test_package_name + '.test_goodmodule'
result_errors = []
result_module = logic.load_module_from_module_name(modulename,
result_errors)
self.assertNotEqual(None, result_module)
self.assertEqual([], result_errors)
def test_test_functions(self):
modulename = self.test_package_name + '.test_test_functions'
result_errors = []
# include_test_functions is True by default.
result_module = logic.load_module_from_module_name(modulename,
result_errors)
class_name = 'TestTestFunctionsWrappedTestFunctions'
result_class = getattr(result_module, class_name, None)
self.assertTrue(isinstance(result_class, type))
self.assertTrue(issubclass(result_class, unittest.TestCase))
def test_no_test_functions(self):
modulename = self.test_package_name + '.test_test_functions'
result_errors = []
# The module might have been loaded by the previous test function, which
# would have wrapped test functions.
sys.modules.pop(modulename, None)
result_module = logic.load_module_from_module_name(
modulename, result_errors, include_test_functions=False)
class_name = 'TestTestFunctionsWrappedTestFunctions'
self.assertFalse(hasattr(result_module, class_name))
class GetModuleNamesInPackageTest(unittest.TestCase, utils.TestDataMixin):
"""Tests for the get_module_names_in_package function."""
# pylint: disable-msg=C0103
def setUp(self):
self.setup_test_data()
# pylint: disable-msg=C0103
def tearDown(self):
self.tear_down_test_data()
def test_invalid_input(self):
self.assertRaises(TypeError, logic.get_module_names_in_package, None, [])
self.assertRaises(TypeError, logic.get_module_names_in_package, '', None)
self.assertRaises(TypeError, logic.get_module_names_in_package, '', [],
module_pattern=None)
self.assertRaises(TypeError, logic.get_module_names_in_package, '', [],
depth=None)
def test_empty_package_name(self):
result_modules = logic.get_module_names_in_package('', TEST_MODULE_PATTERN)
self.assertEqual([], result_modules)
def test_invalid_package_name(self):
result_modules = logic.get_module_names_in_package('2',
TEST_MODULE_PATTERN)
self.assertEqual([], result_modules)
# acceptable name - pylint: disable-msg=C0103
def test_valid_path_with_one_broken_module(self):
result_modules = logic.get_module_names_in_package(self.test_package_name,
TEST_MODULE_PATTERN)
self.assertEqual(9, len(result_modules))
def test_depth_smaller_than_zero(self):
self.assertRaises(ValueError, logic.get_module_names_in_package,
self.test_package_name, TEST_MODULE_PATTERN, depth=-1)
def test_depth_limited(self):
modules = logic.get_module_names_in_package(
self.test_package_name, TEST_MODULE_PATTERN, depth=1)
found_mod_from_subpackage = False
for mod in modules:
if 'subpackage' in mod:
found_mod_from_subpackage = True
break
self.assertFalse(found_mod_from_subpackage)
def test_depth_unlimited(self):
modules = logic.get_module_names_in_package(
self.test_package_name, TEST_MODULE_PATTERN, depth=0)
found_mod_from_subpackage = False
for mod in modules:
if 'subpackage' in mod:
found_mod_from_subpackage = True
break
self.assertTrue(found_mod_from_subpackage)
class IsPrefixTest(unittest.TestCase):
"""Tests for the _is_prefix function."""
def test_is_prefix(self):
self.assertTrue(logic._is_prefix('package.module',
'package.module.Class.method'))
self.assertTrue(logic._is_prefix('', 'something'))
self.assertFalse(logic._is_prefix('a.b', 'a'))
self.assertFalse(logic._is_prefix('a.b', 'a.c'))
self.assertFalse(logic._is_prefix('package.module', 'package.module1'))
self.assertFalse(logic._is_prefix('a', ''))
class IsInTestPackageTest(unittest.TestCase):
"""Tests for the _is_in_test_package function."""
def test_is_in_test_package(self):
conf = copy.copy(config.get_config())
conf.test_package_names = ['a', 'b.c']
self.assertTrue(logic._is_in_test_package('a', conf))
self.assertTrue(logic._is_in_test_package('a.z', conf))
self.assertTrue(logic._is_in_test_package('b.c', conf))
self.assertTrue(logic._is_in_test_package('b.c.x', conf))
self.assertFalse(logic._is_in_test_package('c', conf))
self.assertFalse(logic._is_in_test_package('aaa', conf))
self.assertFalse(logic._is_in_test_package('', conf))
class GetRequestedObjectTest(unittest.TestCase, utils.TestDataMixin):
"""Tests for the get_requested_object function."""
def setUp(self):
self.setup_test_data()
self.config = copy.copy(config.get_config())
self.config.test_package_names = [self.test_package_name]
def tearDown(self):
self.tear_down_test_data()
def test_invalid_input(self):
self.assertRaises(TypeError, logic.get_requested_object, None, self.config)
def test_root(self):
obj = logic.get_requested_object('', self.config)
self.assertTrue(isinstance(obj, logic.Root))
def test_invalid_name(self):
fullname = self.test_package_name + '.a.non.existing.fullname'
obj = logic.get_requested_object(fullname, self.config)
self.assertTrue(isinstance(obj, logic.BadTest))
self.assertFalse(obj.exists)
self.assertEqual(1, len(obj.load_errors))
fullname = self.test_package_name + '.no_elements_fullname'
obj = logic.get_requested_object(fullname, self.config)
self.assertTrue(isinstance(obj, logic.BadTest))
self.assertFalse(obj.exists)
self.assertEqual(1, len(obj.load_errors))
def test_outside_test_package(self):
self.config.test_package_names = [self.test_package_name + '.subpackage']
fullname = self.test_package_name + '.test_goodmodule'
obj = logic.get_requested_object(fullname, self.config)
self.assertTrue(isinstance(obj, logic.BadTest))
self.assertFalse(obj.exists)
self.assertEqual(1, len(obj.load_errors))
def test_package(self):
result = logic.get_requested_object(self.test_package_name, self.config)
self.assertTrue(isinstance(result, logic.Package))
self.assertEqual(self.test_package_name, result.fullname)
def test_module(self):
fullname = self.test_package_name + '.test_goodmodule'
result = logic.get_requested_object(fullname, self.config)
self.assertTrue(isinstance(result, logic.Module))
self.assertEqual(fullname, result.fullname)
self.assertEqual(fullname, result.module.__name__)
def test_class(self):
fullname = self.test_package_name + '.test_goodmodule.Foo'
result = logic.get_requested_object(fullname, self.config)
self.assertTrue(isinstance(result, logic.Class))
self.assertEqual(fullname, result.fullname)
cls_name = '%s.%s' % (result.class_.__module__, result.class_.__name__)
self.assertEqual(fullname, cls_name)
def test_method(self):
fullname = self.test_package_name + '.test_goodmodule.Foo.bar'
result = logic.get_requested_object(fullname, self.config)
self.assertTrue(isinstance(result, logic.Method))
self.assertEqual(fullname, result.fullname)
method_name = '%s.%s.%s' % (result.class_.__module__,
result.class_.__name__, result.method_name)
self.assertEqual(fullname, method_name)
def test_broken_module(self):
fullname = self.test_package_name + '.test_brokenmodule'
result = logic.get_requested_object(fullname, self.config)
self.assertTrue(isinstance(result, logic.BadTest))
self.assertEqual(1, len(result.load_errors))
class GetUnitsTest(unittest.TestCase, utils.TestDataMixin):
"""Tests for TestObject.get_units."""
def setUp(self):
self.setup_test_data()
self.config = copy.copy(config.get_config())
self.config.test_package_names = [self.test_package_name]
self.config.test_module_pattern = '^test_[\w]+$'
self.module_fixture = self.test_package_name + '.test_module_fixture'
self.class_fixture = self.test_package_name + '.test_class_fixture'
self.badnames = self.test_package_name + '.test_badnames'
def tearDown(self):
self.tear_down_test_data()
def check(self, fullname, exp_names, exp_errs=None):
"""Checks that get_units returns what is expected.
Args:
fullname: The name to get test units in.
exp_names: The expected test unit names, in any order. The names are
relative to fullname.
exp_errs: The expected names of objects that failed to load, in any
order.
"""
errors_out = []
units = (logic.get_requested_object(fullname, self.config)
.get_units(self.config, errors_out))
exp_fullnames = sorted([fullname + name for name in exp_names])
self.assertEqual(exp_fullnames, sorted([u.fullname for u in units]))
load_failed = [err[0] for err in errors_out]
self.assertEqual(sorted(exp_errs or []), sorted(load_failed))
def test_invalid_object(self):
self.check('bad', [], ['bad'])
def test_root(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
subpackage = self.test_package_name + '.subpackage'
self.config.test_package_names = [subpackage]
self.check('', [subpackage + '.test_ham.FooTest.test_fail',
subpackage + '.test_ham.FooTest.test_pass'])
def test_package(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
subpackage = self.test_package_name + '.subpackage'
self.check(subpackage, ['.test_ham.FooTest.test_fail',
'.test_ham.FooTest.test_pass'])
def test_module_with_fixture(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
self.check(self.module_fixture, [''])
def test_module_without_fixture(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
self.check(self.class_fixture,
['.HasClassFixture', '.HasNoClassFixture.test_fail',
'.HasNoClassFixture.test_pass'])
def test_no_parallel_classes(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = False
self.check(self.class_fixture, [''])
def test_no_parallel_methods(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = False
self.check(self.class_fixture, ['.HasClassFixture',
'.HasNoClassFixture'])
def test_class_with_fixture(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
self.check(self.class_fixture + '.HasClassFixture', [''])
def test_class_without_fixture(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
self.check(self.class_fixture + '.HasNoClassFixture',
['.test_fail', '.test_pass'])
def test_load_error(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
broken_module = self.test_package_name + '.test_brokenmodule'
self.check(broken_module, [], [broken_module])
def test_module_bad_name(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = False
# The module should be found despite its __name__ not matching
# test_module_pattern.
units = (logic.get_requested_object(self.test_package_name, self.config)
.get_units(self.config))
self.assertTrue(self.badnames in [unit.fullname for unit in units])
def test_class_bad_names(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.check(self.badnames,
['.ClassWithDifferentMethodNames', '.ClassWithDifferentModule',
'.ClassWithDifferentName1', '.ClassWithDifferentName2'])
def test_method_bad_names(self):
self.config.parallelize_modules = True
self.config.parallelize_classes = True
self.config.parallelize_methods = True
class_name = self.badnames + '.ClassWithDifferentMethodNames'
self.check(class_name, ['.test_method1', '.test_method2'])
def check_names_and_errors(self, fullname, exp_names, fullnames, exp_errs,
errs):
exp_fullnames = [fullname + name for name in exp_names]
self.assertEqual(sorted(exp_fullnames), sorted(fullnames))
load_failed = [err[0] for err in errors_out]
self.assertEqual(sorted(exp_errs or []), sorted(load_failed))
class GetMethodsTest(unittest.TestCase, utils.TestDataMixin):
"""Tests for the TestObject.get_methods method."""
def setUp(self):
self.setup_test_data()
self.config = copy.copy(config.get_config())
self.config.test_package_names = [self.test_package_name]
self.config.test_module_pattern = '^test_[\w]+$'
self.class_fixture = self.test_package_name + '.test_class_fixture'
self.class_with_fixture = self.class_fixture + '.HasClassFixture'
self.class_without_fixture = self.class_fixture + '.HasNoClassFixture'
def tearDown(self):
self.tear_down_test_data()
def check(self, fullname, exp_names, exp_errs=None):
"""Checks that get_methods returns what is expected.
Args:
fullname: The name to get test methods in.
exp_names: The expected test method names, in any order. The names are
relative to fullname.
exp_errs: The expected names of objects that failed to load, in any
order.
"""
errors_out = []
methods = (logic.get_requested_object(fullname, self.config)
.get_methods(self.config, errors_out))
fullnames = [method.fullname for method in methods]
exp_fullnames = [fullname + name for name in exp_names]
self.assertEqual(sorted(exp_fullnames), sorted(fullnames))
load_failed = [err[0] for err in errors_out]
self.assertEqual(sorted(exp_errs or []), sorted(load_failed))
def test_invalid_name(self):
self.check('bad', [], ['bad'])
def test_root(self):
subpackage = self.test_package_name + '.subpackage'
self.config.test_package_names = [subpackage]
prefix = subpackage + '.test_ham.FooTest'
self.check('', [prefix + '.test_pass', prefix + '.test_fail'])
def test_root_module(self):
module = self.test_package_name + '.test_one_testcase'
self.config.test_package_names = [module]
self.check('', [module + '.SimpleTestCase.test_pass',
module + '.SimpleTestCase.test_fail'])
def test_root_bad_module(self):
module = self.test_package_name + '.test_badmodule'
self.config.test_package_names = [module]
self.check('', [], [module])
def test_package(self):
subpackage = self.test_package_name + '.subpackage'
self.check(subpackage, ['.test_ham.FooTest.test_pass',
'.test_ham.FooTest.test_fail'])
def test_module(self):
self.check(self.class_fixture,
['.HasClassFixture.test_has_class_value',
'.HasClassFixture.test_has_bad_class_value',
'.HasNoClassFixture.test_pass',
'.HasNoClassFixture.test_fail'])
def test_class(self):
self.check(self.class_with_fixture,
['.test_has_class_value', '.test_has_bad_class_value'])
def test_method(self):
method = self.class_with_fixture + '.test_has_class_value'
self.check(method, [''])
def test_bad_names(self):
badnames = self.test_package_name + '.test_badnames'
self.check(badnames, ['.ClassWithDifferentModule.test_method',
'.ClassWithDifferentName1.test_method',
'.ClassWithDifferentName2.test_method',
'.ClassWithDifferentMethodNames.test_method1',
'.ClassWithDifferentMethodNames.test_method2'])
class GetTestSuiteTest(unittest.TestCase, utils.TestDataMixin):
"""Tests for TestObject.get_test_suite."""
def setUp(self):
self.setup_test_data()
self.config = copy.copy(config.get_config())
self.config.test_package_names = [self.test_package_name]
self.config.test_module_pattern = '^test_[\w]+$'
self.class_fixture = self.test_package_name + '.test_class_fixture'
self.class_with_fixture = self.class_fixture + '.HasClassFixture'
self.class_without_fixture = self.class_fixture + '.HasNoClassFixture'
def tearDown(self):
self.tear_down_test_data()
def check(self, fullname, exp_names, exp_errs=None):
"""Checks that get_suite returns what is expected.
Args:
fullname: The name to get a test suite from.
exp_names: The expected test method names, in any order. The names are
relative to fullname.
exp_errs: The expected names of objects that failed to load, in any
order.
"""
errors_out = []
suite = (logic.get_requested_object(fullname, self.config)
.get_suite(self.config, errors_out))
self.assertTrue(isinstance(suite, unittest.TestSuite))
fullnames = [test.fullname for test in suite]
exp_fullnames = [fullname + name for name in exp_names]
self.assertEqual(sorted(exp_fullnames), sorted(fullnames))
load_failed = [err[0] for err in errors_out]
self.assertEqual(sorted(exp_errs or []), sorted(load_failed))
def test_invalid_name(self):
self.check('bad', [], ['bad'])
def test_package(self):
subpackage = self.test_package_name + '.subpackage'
self.check(subpackage, ['.test_ham.FooTest.test_pass',
'.test_ham.FooTest.test_fail'])
def test_bad_names(self):
badnames = self.test_package_name + '.test_badnames'
self.check(badnames, ['.ClassWithDifferentModule.test_method',
'.ClassWithDifferentName1.test_method',
'.ClassWithDifferentName2.test_method',
'.ClassWithDifferentMethodNames.test_method1',
'.ClassWithDifferentMethodNames.test_method2'])
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
import datetime
from django.conf import settings
from django.utils.functional import cached_property # noqa
from django.utils.translation import ugettext_lazy as _
import six
from novaclient import client as nova_client
from novaclient import exceptions as nova_exceptions
from novaclient.v2.contrib import instance_action as nova_instance_action
from novaclient.v2.contrib import list_extensions as nova_list_extensions
from novaclient.v2 import security_group_rules as nova_rules
from novaclient.v2 import security_groups as nova_security_groups
from novaclient.v2 import servers as nova_servers
from horizon import conf
from horizon import exceptions as horizon_exceptions
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from horizon.utils.memoized import memoized_with_request # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
LOG = logging.getLogger(__name__)
# Supported compute versions
VERSIONS = base.APIVersionManager("compute", preferred_version=2)
VERSIONS.load_supported_version(1.1, {"client": nova_client, "version": 1.1})
VERSIONS.load_supported_version(2, {"client": nova_client, "version": 2})
VERSIONS.load_supported_version(2.9, {"client": nova_client, "version": 2.9})
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
INSECURE = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
CACERT = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
def probation(result):
created_at = getattr(result, 'created_at')
starttime = getattr(result, 'starttime')
ct = datetime.datetime.strptime(created_at, '%Y-%m-%dT%H:%M:%S.%f')
rt = datetime.datetime.strptime(starttime, '%Y-%m-%dT%H:%M:%S.%f')
setattr(result, 'probation', False)
if ct and rt:
created_time = datetime.datetime(ct.year, ct.month,\
ct.day, ct.hour, ct.minute)
run_time = datetime.datetime(rt.year, rt.month,\
rt.day, rt.hour, rt.minute)
probation_at = created_time + datetime.timedelta(days = 30)
if run_time <= probation_at:
setattr(result, 'probation', True)
return result
class VNCConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class RDPConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_rdp_console method.
"""
_attrs = ['url', 'type']
class SerialConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_serial_console method.
"""
_attrs = ['url', 'type']
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server.
Preserves the request info so image name can later be retrieved.
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links', 'vcpus', 'rams',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'locked', 'vm_hypervisor_type',
'OS-EXT-STS:power_state', 'OS-EXT-STS:task_state',
'OS-EXT-SRV-ATTR:instance_name', 'OS-EXT-SRV-ATTR:host',
'OS-EXT-AZ:availability_zone', 'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
import glanceclient.exc as glance_exceptions # noqa
from openstack_dashboard.api import glance # noqa
if not self.image:
return _("-")
if hasattr(self.image, 'name'):
return self.image.name
if 'name' in self.image:
return self.image['name']
else:
try:
image = glance.image_get(self.request, self.image['id'])
return image.name
except (glance_exceptions.ClientException,
horizon_exceptions.ServiceCatalogException):
return _("-")
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
@property
def host_server(self):
return getattr(self, 'OS-EXT-SRV-ATTR:host', '')
class Hypervisor(base.APIDictWrapper):
"""Simple wrapper around novaclient.hypervisors.Hypervisor."""
_attrs = ['manager', '_loaded', '_info', 'hypervisor_hostname', 'id',
'servers']
@property
def servers(self):
# if hypervisor doesn't have servers, the attribute is not present
servers = []
try:
servers = self._apidict.servers
except Exception:
pass
return servers
class NovaUsage(base.APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': self.vcpus,
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours,
'memory_mb_hours': self.memory_mb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_vcpus_usage", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
@property
def memory_mb_hours(self):
return getattr(self, "total_memory_mb_usage", 0)
class SecurityGroup(base.APIResourceWrapper):
"""Wrapper around novaclient.security_groups.SecurityGroup.
Wraps its rules in SecurityGroupRule objects and allows access to them.
"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@cached_property
def rules(self):
"""Wraps transmitted rule info in the novaclient rule class."""
manager = nova_rules.SecurityGroupRuleManager(None)
rule_objs = [nova_rules.SecurityGroupRule(manager, rule)
for rule in self._apiresource.rules]
return [SecurityGroupRule(rule) for rule in rule_objs]
def to_dict(self):
return self._apiresource.to_dict()
@six.python_2_unicode_compatible
class SecurityGroupRule(base.APIResourceWrapper):
"""Wrapper for individual rules in a SecurityGroup."""
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range', 'group']
def __str__(self):
vals = {
'range': '%s:%s' % (self.from_port, self.to_port),
'ip_protocol': self.ip_protocol
}
if self.from_port == -1 and self.to_port == -1:
vals['range'] = 'any port'
if 'name' in self.group:
vals['group'] = self.group['name']
return (_('ALLOW %(range)s/%(ip_protocol)s from %(group)s') % vals)
else:
vals['cidr'] = self.ip_range['cidr']
return (_('ALLOW %(range)s/%(ip_protocol)s from %(cidr)s') % vals)
# The following attributes are defined to keep compatibility with Neutron
@property
def ethertype(self):
return None
@property
def direction(self):
return 'ingress'
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'nova'
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list(self):
return [SecurityGroup(g) for g
in self.client.security_groups.list()]
def get(self, sg_id):
return SecurityGroup(self.client.security_groups.get(sg_id))
def create(self, name, desc):
return SecurityGroup(self.client.security_groups.create(name, desc))
def update(self, sg_id, name, desc):
return SecurityGroup(self.client.security_groups.update(sg_id,
name, desc))
def delete(self, security_group_id):
self.client.security_groups.delete(security_group_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
# Nova Security Group API does not use direction and ethertype fields.
try:
sg = self.client.security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id)
except nova_exceptions.BadRequest:
raise horizon_exceptions.Conflict(
_('Security group rule already exists.'))
return SecurityGroupRule(sg)
def rule_delete(self, security_group_rule_id):
self.client.security_group_rules.delete(security_group_rule_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
# TODO(gabriel): This needs to be moved up to novaclient, and should
# be removed once novaclient supports this call.
security_groups = []
nclient = self.client
resp, body = nclient.client.get('/servers/%s/os-security-groups'
% instance_id)
if body:
# Wrap data in SG objects as novaclient would.
sg_objs = [
nova_security_groups.SecurityGroup(
nclient.security_groups, sg, loaded=True)
for sg in body.get('security_groups', [])]
# Then wrap novaclient's object with our own. Yes, sadly wrapping
# with two layers of objects is necessary.
security_groups = [SecurityGroup(sg) for sg in sg_objs]
return security_groups
def update_instance_security_group(self, instance_id,
new_security_group_ids):
try:
all_groups = self.list()
except Exception:
raise Exception(_("Couldn't get security group list."))
wanted_groups = set([sg.name for sg in all_groups
if sg.id in new_security_group_ids])
try:
current_groups = self.list_by_instance(instance_id)
except Exception:
raise Exception(_("Couldn't get current security group "
"list for instance %s.")
% instance_id)
current_group_names = set([sg.name for sg in current_groups])
groups_to_add = wanted_groups - current_group_names
groups_to_remove = current_group_names - wanted_groups
num_groups_to_modify = len(groups_to_add | groups_to_remove)
try:
for group in groups_to_add:
self.client.servers.add_security_group(instance_id, group)
num_groups_to_modify -= 1
for group in groups_to_remove:
self.client.servers.remove_security_group(instance_id, group)
num_groups_to_modify -= 1
except nova_exceptions.ClientException as err:
LOG.error(_("Failed to modify %(num_groups_to_modify)d instance "
"security groups: %(err)s") %
dict(num_groups_to_modify=num_groups_to_modify,
err=err))
# reraise novaclient.exceptions.ClientException, but with
# a sanitized error message so we don't risk exposing
# sensitive information to the end user. This has to be
# novaclient.exceptions.ClientException, not just
# Exception, since the former is recognized as a
# "recoverable" exception by horizon, and therefore the
# error message is passed along to the end user, while
# Exception is swallowed alive by horizon and a generic
# error message is given to the end user
raise nova_exceptions.ClientException(
err.code,
_("Failed to modify %d instance security groups") %
num_groups_to_modify)
return True
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
class FloatingIp(base.APIResourceWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip.__setattr__('port_id', fip.instance_id)
fip.__setattr__('instance_type',
'compute' if fip.instance_id else None)
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
def __init__(self, pool):
pool_dict = {'id': pool.name,
'name': pool.name}
super(FloatingIpPool, self).__init__(pool_dict)
class FloatingIpTarget(base.APIDictWrapper):
def __init__(self, server):
server_dict = {'name': '%s (%s)' % (server.name, server.id),
'id': server.id}
super(FloatingIpTarget, self).__init__(server_dict)
class FloatingIpManager(network_base.FloatingIpManager):
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list_pools(self):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
def list(self, all_tenants=False):
return [FloatingIp(fip) for fip in
self.client.floating_ips.list(
all_tenants=all_tenants)]
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
def allocate(self, pool, tenant_id=None, **params):
# NOTE: tenant_id will never be used here.
return FloatingIp(self.client.floating_ips.create(pool=pool))
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
def disassociate(self, floating_ip_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
def get_target_id_by_instance(self, instance_id, target_list=None):
return instance_id
def list_target_id_by_instance(self, instance_id, target_list=None):
return [instance_id, ]
def is_simple_associate_supported(self):
return conf.HORIZON_CONFIG["simple_ip_management"]
def is_supported(self):
return True
def get_auth_params_from_request(request):
"""Extracts the properties from the request object needed by the novaclient
call below. These will be used to memoize the calls to novaclient
"""
return (
request.user.username,
request.user.token.id,
request.user.tenant_id,
request.META['REMOTE_ADDR'],
base.url_for(request, 'compute'),
base.url_for(request, 'identity')
)
@memoized_with_request(get_auth_params_from_request)
def novaclient(request_auth_params):
#username, token_id, project_id, nova_url, auth_url = request_auth_params
username, token_id, project_id, ip_address, nova_url, auth_url = request_auth_params
c = nova_client.Client(VERSIONS.get_active_version()['version'],
username,
token_id,
project_id=project_id,
auth_url=auth_url,
insecure=INSECURE,
cacert=CACERT,
ipaddr=ip_address,
http_log_debug=settings.DEBUG)
c.client.auth_token = token_id
c.client.management_url = nova_url
return c
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(
instance_id, console_type)['console'])
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
def server_rdp_console(request, instance_id, console_type='rdp-html5'):
return RDPConsole(novaclient(request).servers.get_rdp_console(
instance_id, console_type)['console'])
def server_serial_console(request, instance_id, console_type='serial'):
return SerialConsole(novaclient(request).servers.get_serial_console(
instance_id, console_type)['console'])
def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
ephemeral=0, swap=0, metadata=None, is_public=True,
rxtx_factor=1):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
flavorid=flavorid,
ephemeral=ephemeral,
swap=swap, is_public=is_public,
rxtx_factor=rxtx_factor)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id, get_extras=False):
flavor = novaclient(request).flavors.get(flavor_id)
if get_extras:
flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
return flavor
@memoized
def flavor_list(request, is_public=True, get_extras=False):
"""Get the list of available instance sizes (flavors)."""
flavors = novaclient(request).flavors.list(is_public=is_public)
if get_extras:
for flavor in flavors:
flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
return flavors
def update_pagination(entities, page_size, marker, sort_dir, sort_key,
reversed_order):
has_more_data = has_prev_data = False
if len(entities) > page_size:
has_more_data = True
entities.pop()
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif reversed_order and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
# restore the original ordering here
if reversed_order:
entities = sorted(entities, key=lambda entity:
(getattr(entity, sort_key) or '').lower(),
reverse=(sort_dir == 'asc'))
return entities, has_more_data, has_prev_data
@memoized
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None,
paginate=False, sort_key="name", sort_dir="desc",
reversed_order=False):
"""Get the list of available instance sizes (flavors)."""
has_more_data = False
has_prev_data = False
if paginate:
if reversed_order:
sort_dir = 'desc' if sort_dir == 'asc' else 'asc'
page_size = utils.get_page_size(request)
flavors = novaclient(request).flavors.list(is_public=is_public,
marker=marker,
limit=page_size + 1,
sort_key=sort_key,
sort_dir=sort_dir)
flavors, has_more_data, has_prev_data = update_pagination(
flavors, page_size, marker, sort_dir, sort_key, reversed_order)
else:
flavors = novaclient(request).flavors.list(is_public=is_public)
if get_extras:
for flavor in flavors:
flavor.extras = flavor_get_extras(request, flavor.id, True, flavor)
return (flavors, has_more_data, has_prev_data)
@memoized_with_request(novaclient)
def flavor_access_list(nova_api, flavor=None):
"""Get the list of access instance sizes (flavors)."""
return nova_api.flavor_access.list(flavor=flavor)
def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant)
def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant)
def flavor_get_extras(request, flavor_id, raw=False, flavor=None):
"""Get flavor extra specs."""
if flavor is None:
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return novaclient(request).keypairs.list()
def keypair_get(request, keypair_id):
return novaclient(request).keypairs.get(keypair_id)
def server_create(request, name, image, pool, flavor, key_name, user_data,
security_groups, terminal=None, block_device_mapping=None,
block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=None,
disk_config=None, config_drive=None, meta=None,
scheduler_hints=None):
return Server(novaclient(request).servers.create(
name, image, pool, flavor, userdata=user_data,
security_groups=security_groups, terminal=terminal,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
min_count=instance_count, admin_pass=admin_pass,
disk_config=disk_config, config_drive=config_drive,
meta=meta, scheduler_hints=scheduler_hints), request)
def server_delete(request, instance_id):
novaclient(request).servers.delete(instance_id)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_paging(request, search_opts=None, all_tenants=True, hyper_type="QEMU"):
page_size = utils.get_page_size(request)
c = novaclient(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [Server(s, request)
for s in c.servers.list(True, search_opts)
if s.vm_hypervisor_type == hyper_type]
has_more_data = False
has_prev_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
has_prev_data = True
elif paginate and len(servers) == getattr(\
settings, 'API_RESULT_LIMIT',1000):
has_more_data = True
else:
has_prev_data = True
return (servers, has_more_data, has_prev_data)
def server_list(request, search_opts=None, all_tenants=False):
page_size = utils.get_page_size(request)
c = novaclient(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [Server(s, request)
for s in c.servers.list(True, search_opts)]
has_more_data = False
has_prev_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT',
1000):
has_more_data = True
else:
has_prev_data = True
return (servers, has_more_data)
def server_vgpu(request, search_opts=None, all_tenants=False, hyper_type="QEMU"):
page_size = utils.get_page_size(request)
c = novaclient(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [Server(s, request)
for s in c.servers.list(True, search_opts)
if s.vm_hypervisor_type != hyper_type]
has_more_data = False
has_prev_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
has_prev_data = True
elif paginate and len(servers) == getattr(\
settings, 'API_RESULT_LIMIT',1000):
has_more_data = True
else:
has_prev_data = True
return (servers, has_more_data, has_prev_data)
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_shelve(request, instance_id):
novaclient(request).servers.shelve(instance_id)
def server_unshelve(request, instance_id):
novaclient(request).servers.unshelve(instance_id)
def server_reboot(request, instance_id, soft_reboot=False):
hardness = nova_servers.REBOOT_HARD
if soft_reboot:
hardness = nova_servers.REBOOT_SOFT
novaclient(request).servers.reboot(instance_id, hardness)
def server_rebuild(request, instance_id, image_id, password=None,
disk_config=None):
return novaclient(request).servers.rebuild(instance_id, image_id,
password, disk_config)
def server_update(request, instance_id, name, terminal=None):
return novaclient(request).servers.update(\
instance_id, name=name, terminal=terminal)
def reallocation(request, instance_id, project, user):
return novaclient(request).servers.reallocation(instance_id, project=project, user=user)
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
def server_live_migrate(request, instance_id, host, block_migration=False,
disk_over_commit=False):
novaclient(request).servers.live_migrate(instance_id, host,
block_migration,
disk_over_commit)
def server_resize(request, instance_id, flavor, disk_config=None, **kwargs):
novaclient(request).servers.resize(instance_id, flavor,
disk_config, **kwargs)
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
def server_start(request, instance_id):
novaclient(request).servers.start(instance_id)
def server_stop(request, instance_id):
novaclient(request).servers.stop(instance_id)
def server_lock(request, instance_id):
novaclient(request).servers.lock(instance_id)
def server_unlock(request, instance_id):
novaclient(request).servers.unlock(instance_id)
def server_metadata_update(request, instance_id, metadata):
novaclient(request).servers.set_meta(instance_id, metadata)
def server_metadata_delete(request, instance_id, keys):
novaclient(request).servers.delete_meta(instance_id, keys)
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
if kwargs:
novaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def default_quota_update(request, **kwargs):
novaclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def get_x509_credentials(request):
return novaclient(request).certs.create()
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
def get_password(request, instance_id, private_key=None):
return novaclient(request).servers.get_password(instance_id, private_key)
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api import cinder
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinder.cinderclient(request).volumes.get(volume.id)
volume.name = cinder.Volume(volume_data).name
return volumes
def hypervisor_list(request):
return novaclient(request).hypervisors.list()
def hypervisor_stats(request):
return novaclient(request).hypervisors.statistics()
def hypervisor_search(request, query, servers=True):
return novaclient(request).hypervisors.search(query, servers)
def evacuate_host(request, host, target=None, on_shared_storage=False):
# TODO(jmolle) This should be change for nova atomic api host_evacuate
hypervisors = novaclient(request).hypervisors.search(host, True)
response = []
err_code = None
for hypervisor in hypervisors:
hyper = Hypervisor(hypervisor)
# if hypervisor doesn't have servers, the attribute is not present
for server in hyper.servers:
try:
novaclient(request).servers.evacuate(server['uuid'],
target,
on_shared_storage)
except nova_exceptions.ClientException as err:
err_code = err.code
msg = _("Name: %(name)s ID: %(uuid)s")
msg = msg % {'name': server['name'], 'uuid': server['uuid']}
response.append(msg)
if err_code:
msg = _('Failed to evacuate instances: %s') % ', '.join(response)
raise nova_exceptions.ClientException(err_code, msg)
return True
def migrate_host(request, host, live_migrate=False, disk_over_commit=False,
block_migration=False):
hypervisors = novaclient(request).hypervisors.search(host, True)
response = []
err_code = None
for hyper in hypervisors:
for server in getattr(hyper, "servers", []):
try:
if live_migrate:
instance = server_get(request, server['uuid'])
# Checking that instance can be live-migrated
if instance.status in ["ACTIVE", "PAUSED"]:
novaclient(request).servers.live_migrate(
server['uuid'],
None,
block_migration,
disk_over_commit
)
else:
novaclient(request).servers.migrate(server['uuid'])
else:
novaclient(request).servers.migrate(server['uuid'])
except nova_exceptions.ClientException as err:
err_code = err.code
msg = _("Name: %(name)s ID: %(uuid)s")
msg = msg % {'name': server['name'], 'uuid': server['uuid']}
response.append(msg)
if err_code:
msg = _('Failed to migrate instances: %s') % ', '.join(response)
raise nova_exceptions.ClientException(err_code, msg)
return True
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# Workaround for nova bug 1370867 that absolute_limits
# returns negative value for total.*Used instead of 0.
# For such case, replace negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def availability_zone_list(request, detailed=False):
return novaclient(request).availability_zones.list(detailed=detailed)
def server_group_list(request):
return novaclient(request).server_groups.list()
def service_list(request, binary=None):
return novaclient(request).services.list(binary=binary)
def service_enable(request, host, binary):
return novaclient(request).services.enable(host, binary)
def service_disable(request, host, binary, reason=None):
if reason:
return novaclient(request).services.disable_log_reason(host,
binary, reason)
else:
return novaclient(request).services.disable(host, binary)
def aggregate_details_list(request):
result = []
c = novaclient(request)
for aggregate in c.aggregates.list():
result.append(c.aggregates.get_details(aggregate.id))
return result
def aggregate_create(request, name, availability_zone=None):
return novaclient(request).aggregates.create(name, availability_zone)
def aggregate_delete(request, aggregate_id):
return novaclient(request).aggregates.delete(aggregate_id)
def aggregate_get(request, aggregate_id):
return novaclient(request).aggregates.get(aggregate_id)
def aggregate_update(request, aggregate_id, values):
return novaclient(request).aggregates.update(aggregate_id, values)
def aggregate_set_metadata(request, aggregate_id, metadata):
return novaclient(request).aggregates.set_metadata(aggregate_id, metadata)
def host_list(request):
return novaclient(request).hosts.list()
def add_host_to_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.add_host(aggregate_id, host)
def remove_host_from_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.remove_host(aggregate_id, host)
def interface_attach(request,
server, port_id=None, net_id=None, fixed_ip=None):
return novaclient(request).servers.interface_attach(server,
port_id,
net_id,
fixed_ip)
####################################################################################
def interface_list(request, instance_id):
return novaclient(request).servers.interface_list(instance_id)
def iso_list(request):
return novaclient(request).images.list()
def cdrom_list(request, instance_id):
return novaclient(request).cdrom.cdrom_list(instance_id)
def cdrom_attach(request, instance_id, dev, image):
return novaclient(request).cdrom.attach_server_cdrom(instance_id, dev, image)
def interface_detach(request, server, port_id):
return novaclient(request).servers.interface_detach(server, port_id)
def create_remote(request, remote):
return novaclient(request).servers.create_remote(remote=remote)
def delete_remote(request, instance_id):
return novaclient(request).servers.cancel_remote_assistance(instance_id)
def remote_assistance_list(request, instance_id=None):
return novaclient(request).servers.remote_assistance_list(instance_id=instance_id)
def get_vm_cipher(request, server):
return novaclient(request).servers.get_vm_password(server)
def get_licence(request):
licence = novaclient(request).licence.get(id=1)
return probation(licence)
def update_licence(request, **kwargs):
return novaclient(request).licence.update(id=1, **kwargs)
def create_dev_snapshot(request, **kwargs):
return novaclient(request).snapshot.create(**kwargs)
def delete_dev_snapshot(request, instance_id, name):
return novaclient(request).snapshot.delete(instance_id, name)
def dev_snapshot_list(request, instance_id):
return novaclient(request).snapshot.list(instance_id)
def set_dev_snapshot(request, instance_id, name):
return novaclient(request).snapshot.set_dev_snapshot(instance_id, name)
def revert_dev_snapshot(request, instance_id, name):
return novaclient(request).snapshot.revert_dev_snapshot(instance_id, name)
def instance_timing(request, data):
return novaclient(request).servers.timing_interface(data=data)
def get_object_info(request, instance_id):
return novaclient(request).servers.get_object_info(instance_id)
def get_spice_secure(request, instance_id):
return novaclient(request).servers.get_spice_secure(instance_id)
def systemlogs_create(request, name, event_subject, result=True, detail='-'):
return novaclient(request).systemlogs.create(name, event_subject,result, detail)
def systemlogs_list(request, filters=None):
return novaclient(request).systemlogs.list(filters)
####################################################################################
@memoized_with_request(novaclient)
def list_extensions(nova_api):
"""List all nova extensions, except the ones in the blacklist."""
blacklist = set(getattr(settings,
'OPENSTACK_NOVA_EXTENSIONS_BLACKLIST', []))
return tuple(
extension for extension in
nova_list_extensions.ListExtManager(nova_api).show_all()
if extension.name not in blacklist
)
@memoized_with_request(list_extensions, 1)
def extension_supported(extension_name, extensions):
"""Determine if nova supports a given extension name.
Example values for the extension_name include AdminActions, ConsoleOutput,
etc.
"""
for extension in extensions:
if extension.name == extension_name:
return True
return False
def can_set_server_password():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('can_set_password', False)
def instance_action_list(request, instance_id):
return nova_instance_action.InstanceActionManager(
novaclient(request)).list(instance_id)
def can_set_mount_point():
"""Return the Hypervisor's capability of setting mount points."""
hypervisor_features = getattr(
settings, "OPENSTACK_HYPERVISOR_FEATURES", {})
return hypervisor_features.get("can_set_mount_point", False)
def requires_keypair():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('requires_keypair', False)
def can_set_quotas():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('enable_quotas', True)
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Software construction toolkit site_scons configuration.
This module sets up SCons for use with this toolkit. This should contain setup
which occurs outside of environments. If a method operates within the context
of an environment, it should instead go in a tool in site_tools and be invoked
for the target environment.
"""
import __builtin__
import sys
import SCons
import usage_log
import time
def _HostPlatform():
"""Returns the current host platform.
That is, the platform we're actually running SCons on. You shouldn't use
this inside your SConscript files; instead, include the appropriate
target_platform tool for your environments. When you call
BuildEnvironments(), only environments with the current host platform will be
built. If for some reason you really need to examine the host platform,
check env.Bit('host_windows') / env.Bit('host_linux') / env.Bit('host_mac').
Returns:
The host platform name - one of ('WINDOWS', 'LINUX', 'MAC').
"""
platform_map = {
'win32': 'WINDOWS',
'cygwin': 'WINDOWS',
'linux': 'LINUX',
'linux2': 'LINUX',
'linux3': 'LINUX',
'darwin': 'MAC',
}
if sys.platform not in platform_map:
print ('site_init.py warning: platform "%s" is not in platfom map.' %
sys.platform)
return platform_map.get(sys.platform, sys.platform)
def BuildEnvironmentSConscripts(env):
"""Evaluates SConscripts for the environment.
Called by BuildEnvironments().
"""
# Read SConscript for each component
# TODO: Remove BUILD_COMPONENTS once all projects have transitioned to the
# BUILD_SCONSCRIPTS nomenclature.
for c in env.SubstList2('$BUILD_SCONSCRIPTS', '$BUILD_COMPONENTS'):
# Clone the environment so components can't interfere with each other
ec = env.Clone()
if ec.Entry(c).isdir():
# The component is a directory, so assume it contains a SConscript
# file.
c_dir = ec.Dir(c)
# Use 'build.scons' as the default filename, but if that doesn't
# exist, fall back to 'SConscript'.
c_script = c_dir.File('build.scons')
if not c_script.exists():
c_script = c_dir.File('SConscript')
else:
# The component is a SConscript file.
c_script = ec.File(c)
c_dir = c_script.dir
# Make c_dir a string.
c_dir = str(c_dir)
# Use build_dir differently depending on where the SConscript is.
if not ec.RelativePath('$TARGET_ROOT', c_dir).startswith('..'):
# The above expression means: if c_dir is $TARGET_ROOT or anything
# under it. Going from c_dir to $TARGET_ROOT and dropping the not fails
# to include $TARGET_ROOT.
# We want to be able to allow people to use addRepository to back things
# under $TARGET_ROOT/$OBJ_ROOT with things from above the current
# directory. When we are passed a SConscript that is already under
# $TARGET_ROOT, we should not use build_dir.
start = time.clock()
ec.SConscript(c_script, exports={'env': ec}, duplicate=0)
if SCons.Script.ARGUMENTS.get('verbose'):
print "[%5d] Loaded" % (1000 * (time.clock() - start)), c_script
elif not ec.RelativePath('$MAIN_DIR', c_dir).startswith('..'):
# The above expression means: if c_dir is $MAIN_DIR or anything
# under it. Going from c_dir to $TARGET_ROOT and dropping the not fails
# to include $MAIN_DIR.
# Also, if we are passed a SConscript that
# is not under $MAIN_DIR, we should fail loudly, because it is unclear how
# this will correspond to things under $OBJ_ROOT.
start = time.clock()
ec.SConscript(c_script, variant_dir='$OBJ_ROOT/' + c_dir,
exports={'env': ec}, duplicate=0)
if SCons.Script.ARGUMENTS.get('verbose'):
print "[%5d] Loaded" % (1000 * (time.clock() - start)), c_script
else:
raise SCons.Error.UserError(
'Bad location for a SConscript. "%s" is not under '
'\$TARGET_ROOT or \$MAIN_DIR' % c_script)
def FilterEnvironments(environments):
"""Filters out the environments to be actually build from the specified list
Args:
environments: List of SCons environments.
Returns:
List of environments which were matched
"""
# Get options
build_modes = SCons.Script.GetOption('build_mode')
# TODO: Remove support legacy MODE= argument, once everyone has transitioned
# to --mode.
legacy_mode_option = SCons.Script.ARGUMENTS.get('MODE')
if legacy_mode_option:
build_modes = legacy_mode_option
environment_map = dict((env['BUILD_TYPE'], env) for env in environments)
# Add aliases for the host platform so that the caller of Scons does
# not need to work out which platform they are running on.
platform_map = {
'win32': 'win',
'cygwin': 'win',
'linux': 'linux',
'linux2': 'linux',
'darwin': 'mac',
}
if sys.platform in platform_map:
name = platform_map[sys.platform]
environment_map['opt-host'] = environment_map['opt-%s' % name]
environment_map['dbg-host'] = environment_map['dbg-%s' % name]
environment_map['coverage-host'] = environment_map['coverage-%s' % name]
matched_envs = []
for mode in build_modes.split(','):
if mode not in environment_map:
raise Exception('Build mode "%s" is not defined' % mode)
matched_envs.append(environment_map[mode])
return matched_envs
def BuildEnvironments(environments):
"""Build a collection of SConscripts under a collection of environments.
The environments are subject to filtering (c.f. FilterEnvironments)
Args:
environments: List of SCons environments.
Returns:
List of environments which were actually evaluated (built).
"""
usage_log.log.AddEntry('BuildEnvironments start')
for e in environments:
# Make this the root environment for deferred functions, so they don't
# execute until our call to ExecuteDefer().
e.SetDeferRoot()
# Defer building the SConscripts, so that other tools can do
# per-environment setup first.
e.Defer(BuildEnvironmentSConscripts)
# Execute deferred functions
e.ExecuteDefer()
# Add help on targets.
AddTargetHelp()
usage_log.log.AddEntry('BuildEnvironments done')
#------------------------------------------------------------------------------
def _ToolExists():
"""Replacement for SCons tool module exists() function, if one isn't present.
Returns:
True. This enables modules which always exist not to need to include a
dummy exists() function.
"""
return True
def _ToolModule(self):
"""Thunk for SCons.Tool.Tool._tool_module to patch in exists() function.
Returns:
The module from the original SCons.Tool.Tool._tool_module call, with an
exists() method added if it wasn't present.
"""
module = self._tool_module_orig()
if not hasattr(module, 'exists'):
module.exists = _ToolExists
return module
#------------------------------------------------------------------------------
def AddSiteDir(site_dir):
"""Adds a site directory, as if passed to the --site-dir option.
Args:
site_dir: Site directory path to add, relative to the location of the
SConstruct file.
This may be called from the SConscript file to add a local site scons
directory for a project. This does the following:
* Adds site_dir/site_scons to sys.path.
* Imports site_dir/site_init.py.
* Adds site_dir/site_scons to the SCons tools path.
"""
# Call the same function that SCons does for the --site-dir option.
SCons.Script.Main._load_site_scons_dir(
SCons.Node.FS.get_default_fs().SConstruct_dir, site_dir)
#------------------------------------------------------------------------------
_new_options_help = '''
Additional options for SCons:
--mode=MODE Specify build mode, e.g. "dbg-linux,nacl".
--host-platform=PLATFORM Force SCons to use PLATFORM as the host platform,
instead of the actual platform on which SCons is
run. Useful for examining the dependency tree
which would be created, but not useful for
actually running the build because it'll attempt
to use the wrong tools for your actual platform.
--site-path=DIRLIST Comma-separated list of additional site
directory paths; each is processed as if passed
to --site-dir.
--usage-log=FILE Write XML usage log to FILE.
'''
def SiteInitMain():
"""Main code executed in site_init."""
# Bail out if we've been here before. This is needed to handle the case where
# this site_init.py has been dropped into a project directory.
if hasattr(__builtin__, 'BuildEnvironments'):
return
usage_log.log.AddEntry('Software Construction Toolkit site init')
# Let people use new global methods directly.
__builtin__.AddSiteDir = AddSiteDir
__builtin__.FilterEnvironments = FilterEnvironments
__builtin__.BuildEnvironments = BuildEnvironments
# Legacy method names
# TODO: Remove these once they're no longer used anywhere.
__builtin__.BuildComponents = BuildEnvironments
# Set list of default tools for component_setup
__builtin__.component_setup_tools = [
# Defer must be first so other tools can register environment
# setup/cleanup functions.
'defer',
# Component_targets must precede component_builders so builders can
# define target groups.
'component_targets',
'command_output',
'component_bits',
'component_builders',
'environment_tools',
'publish',
'replicate',
'wix',
]
# Patch Tool._tool_module method to fill in an exists() method for the
# module if it isn't present.
# TODO: This functionality should be patched into SCons itself by changing
# Tool.__init__().
SCons.Tool.Tool._tool_module_orig = SCons.Tool.Tool._tool_module
SCons.Tool.Tool._tool_module = _ToolModule
# Add our options
SCons.Script.AddOption(
'--mode', '--build-mode',
dest='build_mode',
nargs=1, type='string',
action='store',
metavar='MODE',
default='opt-host,nacl',
help='build mode(s)')
SCons.Script.AddOption(
'--host-platform',
dest='host_platform',
nargs=1, type='string',
action='store',
metavar='PLATFORM',
help='build mode(s)')
SCons.Script.AddOption(
'--site-path',
dest='site_path',
nargs=1, type='string',
action='store',
metavar='PATH',
help='comma-separated list of site directories')
SCons.Script.AddOption(
'--usage-log',
dest='usage_log',
nargs=1, type='string',
action='store',
metavar='PATH',
help='file to write XML usage log to')
SCons.Script.Help(_new_options_help)
# Set up usage log
usage_log_file = SCons.Script.GetOption('usage_log')
if usage_log_file:
usage_log.log.SetOutputFile(usage_log_file)
# Set current host platform
host_platform = SCons.Script.GetOption('host_platform')
if not host_platform:
host_platform = _HostPlatform()
__builtin__.HOST_PLATFORM = host_platform
# Check for site path. This is a list of site directories which each are
# processed as if they were passed to --site-dir.
site_path = SCons.Script.GetOption('site_path')
if site_path:
for site_dir in site_path.split(','):
AddSiteDir(site_dir)
# Since our site dir was specified on the SCons command line, SCons will
# normally only look at our site dir. Add back checking for project-local
# site_scons directories.
if not SCons.Script.GetOption('no_site_dir'):
SCons.Script.Main._load_site_scons_dir(
SCons.Node.FS.get_default_fs().SConstruct_dir, None)
# Run main code
SiteInitMain()
|
|
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 6000 Series All-Flash Array Fibrechannel Driver
"""
import mock
from oslo_utils import units
from cinder import context
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder import test
from cinder.tests import fake_vmem_xgtools_client as vxg
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v6000_common
from cinder.volume.drivers.violin import v6000_fcp
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant",
'wwpns': [u'50014380186b3f65', u'50014380186b3f67'],
}
FC_TARGET_WWPNS = [
'31000024ff45fb22', '21000024ff45fb23',
'51000024ff45f1be', '41000024ff45f1bf'
]
FC_INITIATOR_WWPNS = [
'50014380186b3f65', '50014380186b3f67'
]
FC_FABRIC_MAP = {
'fabricA':
{'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
'fabricB':
{'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
}
FC_INITIATOR_TARGET_MAP = {
FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
}
class V6000FCPDriverTestCase(test.TestCase):
"""Test cases for VMEM FCP driver."""
def setUp(self):
super(V6000FCPDriverTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v6000_fcp.V6000FCDriver(configuration=self.conf)
self.driver.common.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
self.stats = {}
self.driver.set_initialized()
def tearDown(self):
super(V6000FCPDriverTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v6000_fcp'
config.san_ip = '1.1.1.1'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.gateway_mga = '2.2.2.2'
config.gateway_mgb = '3.3.3.3'
config.use_igroups = False
config.request_timeout = 300
config.container = 'myContainer'
return config
def setup_mock_vshare(self, m_conf=None):
"""Create a fake VShare communication object."""
_m_vshare = mock.Mock(name='VShare',
version='1.1.1',
spec=vxg.mock_client_conf)
if m_conf:
_m_vshare.configure_mock(**m_conf)
return _m_vshare
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
def test_check_for_setup_error(self, m_setup_func):
"""No setup errors are found."""
result = self.driver.check_for_setup_error()
m_setup_func.assert_called_with()
self.assertTrue(result is None)
@mock.patch.object(v6000_common.V6000Common, 'check_for_setup_error')
def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
"""No wwns were found during setup."""
self.driver.gateway_fc_wwns = []
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_create_volume(self):
"""Volume created successfully."""
self.driver.common._create_lun = mock.Mock()
result = self.driver.create_volume(VOLUME)
self.driver.common._create_lun.assert_called_with(VOLUME)
self.assertTrue(result is None)
def test_delete_volume(self):
"""Volume deleted successfully."""
self.driver.common._delete_lun = mock.Mock()
result = self.driver.delete_volume(VOLUME)
self.driver.common._delete_lun.assert_called_with(VOLUME)
self.assertTrue(result is None)
def test_create_snapshot(self):
"""Snapshot created successfully."""
self.driver.common._create_lun_snapshot = mock.Mock()
result = self.driver.create_snapshot(SNAPSHOT)
self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertTrue(result is None)
def test_delete_snapshot(self):
"""Snapshot deleted successfully."""
self.driver.common._delete_lun_snapshot = mock.Mock()
result = self.driver.delete_snapshot(SNAPSHOT)
self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
self.assertTrue(result is None)
@mock.patch.object(context, 'get_admin_context')
def test_create_volume_from_snapshot(self, m_context_func):
"""Volume created from a snapshot successfully."""
m_context_func.return_value = None
self.driver.common._create_lun = mock.Mock()
self.driver.copy_volume_data = mock.Mock()
result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
m_context_func.assert_called_with()
self.driver.common._create_lun.assert_called_with(VOLUME)
self.driver.copy_volume_data.assert_called_with(None, SNAPSHOT, VOLUME)
self.assertTrue(result is None)
@mock.patch.object(context, 'get_admin_context')
def test_create_cloned_volume(self, m_context_func):
"""Volume clone created successfully."""
m_context_func.return_value = None
self.driver.common._create_lun = mock.Mock()
self.driver.copy_volume_data = mock.Mock()
result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
m_context_func.assert_called_with()
self.driver.common._create_lun.assert_called_with(VOLUME)
self.driver.copy_volume_data.assert_called_with(None, SRC_VOL, VOLUME)
self.assertTrue(result is None)
def test_initialize_connection(self):
lun_id = 1
igroup = None
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
volume = mock.Mock(spec=models.Volume)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._export_lun = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(volume, CONNECTOR)
self.driver._export_lun.assert_called_with(volume, CONNECTOR, igroup)
self.driver.common.vip.basic.save_config.assert_called_with()
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(True, props['data']['target_discovered'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_initialize_connection_with_snapshot_object(self):
lun_id = 1
igroup = None
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
snapshot = mock.Mock(spec=models.Snapshot)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._export_snapshot = mock.Mock(return_value=lun_id)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.initialize_connection(snapshot, CONNECTOR)
self.driver._export_snapshot.assert_called_with(
snapshot, CONNECTOR, igroup)
self.driver.common.vip.basic.save_config.assert_called_with()
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(True, props['data']['target_discovered'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(lun_id, props['data']['target_lun'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_terminate_connection(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
volume = mock.Mock(spec=models.Volume)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._unexport_lun = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(volume, CONNECTOR)
self.driver._unexport_lun.assert_called_with(volume)
self.driver.common.vip.basic.save_config.assert_called_with()
self.driver._is_initiator_connected_to_array.assert_called_with(
CONNECTOR)
self.driver._build_initiator_target_map.assert_called_with(
CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_terminate_connection_snapshot_object(self):
target_wwns = self.driver.gateway_fc_wwns
init_targ_map = {}
snapshot = mock.Mock(spec=models.Snapshot)
self.driver.common.vip = self.setup_mock_vshare()
self.driver._unexport_snapshot = mock.Mock()
self.driver._is_initiator_connected_to_array = mock.Mock(
return_value=False)
self.driver._build_initiator_target_map = mock.Mock(
return_value=(target_wwns, init_targ_map))
props = self.driver.terminate_connection(snapshot, CONNECTOR)
self.assertEqual("fibre_channel", props['driver_volume_type'])
self.assertEqual(target_wwns, props['data']['target_wwn'])
self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
def test_get_volume_stats(self):
self.driver._update_stats = mock.Mock()
self.driver._update_stats()
result = self.driver.get_volume_stats(True)
self.driver._update_stats.assert_called_with()
self.assertEqual(self.driver.stats, result)
def test_export_lun(self):
lun_id = '1'
igroup = 'test-igroup-1'
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
self.driver.common._get_lun_id = mock.Mock(return_value=lun_id)
result = self.driver._export_lun(VOLUME, CONNECTOR, igroup)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vip.lun.export_lun,
self.driver.common._wait_for_export_config, '',
[self.driver.common.container, VOLUME['id'], 'all',
igroup, 'auto'], [VOLUME['id'], 'state=True'])
self.driver.common._get_lun_id.assert_called_with(VOLUME['id'])
self.assertEqual(lun_id, result)
def test_export_lun_fails_with_exception(self):
lun_id = '1'
igroup = 'test-igroup-1'
response = {'code': 14000, 'message': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=failure(response['message']))
self.driver.common._get_lun_id = mock.Mock(return_value=lun_id)
self.assertRaises(failure, self.driver._export_lun,
VOLUME, CONNECTOR, igroup)
def test_unexport_lun(self):
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
return_value=response)
result = self.driver._unexport_lun(VOLUME)
self.driver.common._send_cmd_and_verify.assert_called_with(
self.driver.common.vip.lun.unexport_lun,
self.driver.common._wait_for_export_config, '',
[self.driver.common.container, VOLUME['id'], 'all', 'all', 'auto'],
[VOLUME['id'], 'state=False'])
self.assertTrue(result is None)
def test_unexport_lun_fails_with_exception(self):
response = {'code': 14000, 'message': 'Generic error'}
failure = exception.ViolinBackendErr
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd_and_verify = mock.Mock(
side_effect=failure(response['message']))
self.assertRaises(failure, self.driver._unexport_lun, VOLUME)
def test_export_snapshot(self):
lun_id = '1'
igroup = 'test-igroup-1'
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd = mock.Mock(return_value=response)
self.driver.common._wait_for_export_config = mock.Mock()
self.driver.common._get_snapshot_id = mock.Mock(return_value=lun_id)
result = self.driver._export_snapshot(SNAPSHOT, CONNECTOR, igroup)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vip.snapshot.export_lun_snapshot, '',
self.driver.common.container, SNAPSHOT['volume_id'],
SNAPSHOT['id'], igroup, 'all', 'auto')
self.driver.common._wait_for_export_config.assert_called_with(
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=True)
self.driver.common._get_snapshot_id.assert_called_once_with(
SNAPSHOT['volume_id'], SNAPSHOT['id'])
self.assertEqual(lun_id, result)
def test_unexport_snapshot(self):
response = {'code': 0, 'message': ''}
self.driver.common.vip = self.setup_mock_vshare()
self.driver.common._send_cmd = mock.Mock(return_value=response)
self.driver.common._wait_for_export_config = mock.Mock()
result = self.driver._unexport_snapshot(SNAPSHOT)
self.driver.common._send_cmd.assert_called_with(
self.driver.common.vip.snapshot.unexport_lun_snapshot, '',
self.driver.common.container, SNAPSHOT['volume_id'],
SNAPSHOT['id'], 'all', 'all', 'auto', False)
self.driver.common._wait_for_export_config.assert_called_with(
SNAPSHOT['volume_id'], SNAPSHOT['id'], state=False)
self.assertTrue(result is None)
def test_add_igroup_member(self):
igroup = 'test-group-1'
response = {'code': 0, 'message': 'success'}
wwpns = ['wwn.50:01:43:80:18:6b:3f:65', 'wwn.50:01:43:80:18:6b:3f:67']
conf = {
'igroup.add_initiators.return_value': response,
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
return_value=wwpns)
result = self.driver._add_igroup_member(CONNECTOR, igroup)
self.driver._convert_wwns_openstack_to_vmem.assert_called_with(
CONNECTOR['wwpns'])
self.driver.common.vip.igroup.add_initiators.assert_called_with(
igroup, wwpns)
self.assertTrue(result is None)
def test_build_initiator_target_map(self):
"""Successfully build a map when zoning is enabled."""
expected_targ_wwns = FC_TARGET_WWPNS
expected_init_targ_map = FC_INITIATOR_TARGET_MAP
self.driver.lookup_service = mock.Mock()
self.driver.lookup_service.get_device_mapping_from_network.\
return_value = FC_FABRIC_MAP
(targ_wwns, init_targ_map) = \
self.driver._build_initiator_target_map(CONNECTOR)
self.driver.lookup_service.get_device_mapping_from_network.\
assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns)
self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
self.assertEqual(expected_init_targ_map, init_targ_map)
def test_build_initiator_target_map_no_lookup_service(self):
"""Successfully build a map when zoning is disabled."""
expected_targ_wwns = FC_TARGET_WWPNS
expected_init_targ_map = {
CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
}
self.driver.lookup_service = None
targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
CONNECTOR)
self.assertEqual(expected_targ_wwns, targ_wwns)
self.assertEqual(expected_init_targ_map, init_targ_map)
def test_is_initiator_connected_to_array(self):
"""Successfully finds an initiator with remaining active session."""
converted_wwpns = ['50:01:43:80:18:6b:3f:65',
'50:01:43:80:18:6b:3f:67']
prefix = "/vshare/config/export/container"
bn = "%s/%s/lun/**" % (prefix, self.driver.common.container)
resp_binding0 = "%s/%s/lun/%s/target/hba-a1/initiator/%s" \
% (prefix, self.driver.common.container, VOLUME['id'],
converted_wwpns[0])
resp_binding1 = "%s/%s/lun/%s/target/hba-a1/initiator/%s" \
% (prefix, self.driver.common.container, VOLUME['id'],
converted_wwpns[1])
response = {
resp_binding0: converted_wwpns[0],
resp_binding1: converted_wwpns[1]
}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
return_value=converted_wwpns)
self.assertTrue(self.driver._is_initiator_connected_to_array(
CONNECTOR))
self.driver.common.vip.basic.get_node_values.assert_called_with(bn)
def test_is_initiator_connected_to_array_empty_response(self):
"""Successfully finds no initiators with remaining active sessions."""
converted_wwpns = ['50:01:43:80:18:6b:3f:65',
'50:01:43:80:18:6b:3f:67']
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._convert_wwns_openstack_to_vmem = mock.Mock(
return_value=converted_wwpns)
self.assertFalse(self.driver._is_initiator_connected_to_array(
CONNECTOR))
def test_update_stats(self):
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
tot_bytes = 100 * units.Gi
free_bytes = 50 * units.Gi
bn0 = '/cluster/state/master_id'
bn1 = "/vshare/state/global/1/container/myContainer/total_bytes"
bn2 = "/vshare/state/global/1/container/myContainer/free_bytes"
response1 = {bn0: '1'}
response2 = {bn1: tot_bytes, bn2: free_bytes}
conf = {
'basic.get_node_values.side_effect': [response1, response2],
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._update_stats()
calls = [mock.call(bn0), mock.call([bn1, bn2])]
self.driver.common.vip.basic.get_node_values.assert_has_calls(calls)
self.assertEqual(100, self.driver.stats['total_capacity_gb'])
self.assertEqual(50, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
self.assertTrue(result is None)
def test_update_stats_fails_data_query(self):
backend_name = self.conf.volume_backend_name
vendor_name = "Violin Memory, Inc."
bn0 = '/cluster/state/master_id'
response1 = {bn0: '1'}
response2 = {}
conf = {
'basic.get_node_values.side_effect': [response1, response2],
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
self.assertTrue(self.driver._update_stats() is None)
self.assertEqual(0, self.driver.stats['total_capacity_gb'])
self.assertEqual(0, self.driver.stats['free_capacity_gb'])
self.assertEqual(backend_name,
self.driver.stats['volume_backend_name'])
self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
def test_get_active_fc_targets(self):
bn0 = '/vshare/state/global/*'
response0 = {'/vshare/state/global/1': 1,
'/vshare/state/global/2': 2}
bn1 = '/vshare/state/global/1/target/fc/**'
response1 = {'/vshare/state/global/1/target/fc/hba-a1/wwn':
'wwn.21:00:00:24:ff:45:fb:22'}
bn2 = '/vshare/state/global/2/target/fc/**'
response2 = {'/vshare/state/global/2/target/fc/hba-a1/wwn':
'wwn.21:00:00:24:ff:45:e2:30'}
wwpns = ['21000024ff45fb22', '21000024ff45e230']
conf = {
'basic.get_node_values.side_effect':
[response0, response1, response2],
}
self.driver.common.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_active_fc_targets()
calls = [mock.call(bn0), mock.call(bn1), mock.call(bn2)]
self.driver.common.vip.basic.get_node_values.assert_has_calls(
calls, any_order=True)
self.assertEqual(wwpns, result)
def test_convert_wwns_openstack_to_vmem(self):
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
openstack_wwns = ['50014380186b3f65']
result = self.driver._convert_wwns_openstack_to_vmem(openstack_wwns)
self.assertEqual(vmem_wwns, result)
def test_convert_wwns_vmem_to_openstack(self):
vmem_wwns = ['wwn.50:01:43:80:18:6b:3f:65']
openstack_wwns = ['50014380186b3f65']
result = self.driver._convert_wwns_vmem_to_openstack(vmem_wwns)
self.assertEqual(openstack_wwns, result)
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "scmtiles"
cfg.versionfile_source = "scmtiles/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
|
import json
from unittest2 import TestCase
from werkzeug.wrappers import Response
from werkzeug.test import Client as TestClient
from cosmic.api import API
from cosmic.http import Server
from cosmic.models import BaseModel
from cosmic.globals import cosmos
from cosmic.types import *
cookbook_spec = {
u'name': u'cookbook',
u'actions': {
u'map': {
u'cabbage': {
u'accepts': {
u'type': u"Struct",
u"param": {
u"map": {
u"spicy": {
u"required": True,
u"schema": {u"type": u"Boolean"}
},
u"capitalize": {
u"required": False,
u"schema": {u"type": u"Boolean"}
}
},
u"order": [u"spicy", u"capitalize"]
}
},
u'returns': {u'type': u'String'},
u'doc': u"Yay cabbage"
},
u'noop': {
u'doc': u"Does not do anything"
}
},
u'order': [u'cabbage', u'noop']
},
u"models": {
u"map": {
u"Recipe": {
u"properties": {
u"map": {
u"name": {
u"required": True,
u"schema": {u"type": u"String"}
},
},
u"order": [u"name"]
},
u"links": {
u"map": {},
u"order": []
},
u"query_fields": {
u"map": {},
u"order": []
},
u"list_metadata": {
u"map": {},
u"order": []
},
u'methods': {
u'get_by_id': False,
u'get_list': False,
u'create': False,
u'update': False,
u'delete': False,
},
},
u"Author": {
u"properties": {
u"map": {
u"is_gordon_ramsay": {
u"required": True,
u"schema": {u"type": u"Boolean"}
},
},
u"order": [u"is_gordon_ramsay"]
},
u"links": {
u"map": {},
u"order": []
},
u"query_fields": {
u"map": {
u"is_gordon_ramsay": {
u"required": True,
u"schema": {u"type": u"Boolean"}
},
},
u"order": [u"is_gordon_ramsay"]
},
u"list_metadata": {
u"map": {},
u"order": []
},
u'methods': {
u'get_by_id': False,
u'get_list': True,
u'create': False,
u'update': False,
u'delete': False,
},
}
},
u"order": [u"Recipe", u"Author"]
}
}
class TestAPI(TestCase):
def setUp(self):
self.maxDiff = None
self._old_cosmos = cosmos.data
cosmos.data = {}
self.cookbook = cookbook = API(u'cookbook')
@cookbook.action(
accepts=Struct([
required(u"spicy", Boolean),
optional(u"capitalize", Boolean)
]),
returns=String)
def cabbage(spicy, capitalize=False):
"Yay cabbage"
if spicy:
c = "kimchi"
else:
c = "sauerkraut"
if capitalize:
return c.capitalize()
else:
return c
@cookbook.action(accepts=None, returns=None)
def noop():
"Does not do anything"
pass
@cookbook.model
class Recipe(BaseModel):
properties = [
required(u"name", String)
]
@classmethod
def validate_patch(cls, datum):
if datum["name"] == "bacon":
raise ValidationError("Not kosher")
@cookbook.model
class Author(BaseModel):
methods = ['get_list']
properties = [
required(u"is_gordon_ramsay", Boolean)
]
query_fields = [
required(u"is_gordon_ramsay", Boolean)
]
@classmethod
def get_list(cls, is_gordon_ramsey):
return [("0", {"is_gordon_ramsey": True})]
self.Author = Author
self.server = Server(self.cookbook)
self.server.debug = True
self.app = self.server.wsgi_app
self.client = TestClient(self.app, response_wrapper=Response)
def tearDown(self):
cosmos.data = self._old_cosmos
def test_get_list_missing(self):
resp = self.client.get('/Author')
self.assertEqual(resp.status_code, 400)
def test_model(self):
d = {
"_links": {
"self": {"href": "/Recipe/24"}
},
"name": "pancake"
}
(id, rep) = Representation(Model('cookbook.Recipe')).from_json(d)
self.assertEqual(rep['name'], "pancake")
self.assertEqual(Representation(Model('cookbook.Recipe')).to_json((id, rep)), d)
def test_model_deserialize_okay(self):
(id, rep) = Representation(Model('cookbook.Recipe')).from_json({
"_links": {
"self": {"href": "/Recipe/14"}
},
"name": "turkey"
})
self.assertEqual(rep['name'], "turkey")
def test_subclassing_hook(self):
self.assertEqual(set(self.cookbook.models.__dict__.keys()), set(["Recipe", "Author"]))
def test_recursive_subclassing_hook(self):
@self.cookbook.model
class ChocolateAuthor(self.Author):
pass
self.assertEqual(set(self.cookbook.models.__dict__.keys()), set(["Recipe", "Author", "ChocolateAuthor"]))
def test_model_schema_validation(self):
with self.assertRaises(ValidationError):
Representation(Model('cookbook.Recipe')).from_json(1.1)
def test_model_custom_validation(self):
with self.assertRaisesRegexp(ValidationError, "kosher"):
(id, rep) = Representation(Model('cookbook.Recipe')).from_json({
"_links": {
"self": {"href": "/Recipe/123"}
},
"name": "bacon"
})
self.cookbook.models.Recipe.validate_patch(rep)
def test_serialize(self):
self.assertEqual(APISpec.to_json(self.cookbook.spec), cookbook_spec)
def test_call_action_with_args(self):
self.assertEqual(self.cookbook.actions.cabbage(spicy=False), "sauerkraut")
def test_spec_endpoint(self):
res = self.client.get('/spec.json')
self.assertEqual(json.loads(res.data), cookbook_spec)
def test_spec_wrong_method(self):
res = self.client.get('/actions/noop')
self.assertEqual(res.status_code, 404)
res = self.client.post('/spec.json')
self.assertEqual(res.status_code, 404)
def test_wrong_content_type(self):
res = self.client.post('/actions/cabbage', data="1", content_type="application/xml")
self.assertEqual(res.status_code, 400)
self.assertRegexpMatches(res.data, "Content-Type")
def test_action_okay(self):
data = json.dumps({"spicy": True})
res = self.client.post('/actions/cabbage', data=data, content_type="application/json")
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, '"kimchi"')
def test_noop_action_okay(self):
res = self.client.post('/actions/noop', data='')
self.assertEqual(res.status_code, 204)
self.assertEqual(res.data, '')
def test_schema(self):
APISpec.from_json(APISpec.to_json(self.cookbook.spec))
|
|
"""
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('TestCase', 'TransactionTestCase', 'APITestCase', 'RuleTestCase',
'PermissionTestCase', 'PluginTestCase')
import base64
import os.path
import urllib
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, Exam
from nydus.db import create_cluster
from rest_framework.test import APITestCase as BaseAPITestCase
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, OrganizationMemberType, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from .fixtures import Fixtures
from .helpers import get_auth_header
def create_redis_conn():
options = {
'engine': 'nydus.db.backends.redis.Redis',
}
options.update(settings.SENTRY_REDIS_OPTIONS)
return create_cluster(options)
_redis_conn = create_redis_conn()
def flush_redis():
_redis_conn.flushdb()
class BaseTestCase(Fixtures, Exam):
urls = 'tests.sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'] == 'http://testserver' + reverse('sentry-login')
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
flush_redis()
def _makeMessage(self, data):
return json.dumps(data)
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.settings(CELERY_ALWAYS_EAGER=True):
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header('_postWithHeader', key, secret),
)
return resp
def _getWithReferer(self, data, key=None, referer='getsentry.com', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.settings(CELERY_ALWAYS_EAGER=True):
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urllib.urlencode(qs)),
**headers
)
return resp
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, data=None):
return self.rule_cls(
project=self.project,
data=data or {},
)
def get_state(self, **kwargs):
kwargs.setdefault('is_new', True)
kwargs.setdefault('is_regression', True)
kwargs.setdefault('is_sample', True)
kwargs.setdefault('rule_is_active', False)
kwargs.setdefault('rule_last_active', None)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user()
self.organization = self.create_organization(owner=self.owner)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 300
def assert_team_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
)
self.assert_can_access(user, path)
def assert_org_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
)
self.assert_can_access(user, path)
def assert_teamless_member_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
)
self.assert_can_access(user, path)
def assert_team_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
)
self.assert_cannot_access(user, path)
def assert_org_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
)
self.assert_cannot_access(user, path)
def assert_teamless_member_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
)
self.assert_cannot_access(user, path)
def assert_team_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_org_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_teamless_admin_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.ADMIN,
)
self.assert_can_access(user, path)
def assert_team_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_org_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_teamless_admin_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.ADMIN,
)
self.assert_cannot_access(user, path)
def assert_team_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_org_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_teamless_owner_can_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.OWNER,
)
self.assert_can_access(user, path)
def assert_team_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False, teams=[self.team],
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_org_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=True,
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_teamless_owner_cannot_access(self, path):
user = self.create_user()
self.create_member(
user=user, organization=self.organization,
has_global_access=False,
type=OrganizationMemberType.OWNER,
)
self.assert_cannot_access(user, path)
def assert_non_member_cannot_access(self, path):
user = self.create_user()
self.assert_cannot_access(user, path)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
|
|
#!/usr/bin/env python
'''Example of training a named entity recognition system from scratch using spaCy
This example is written to be self-contained and reasonably transparent.
To achieve that, it duplicates some of spaCy's internal functionality.
Specifically, in this example, we don't use spaCy's built-in Language class to
wire together the Vocab, Tokenizer and EntityRecognizer. Instead, we write
our own simle Pipeline class, so that it's easier to see how the pieces
interact.
Input data:
https://www.lt.informatik.tu-darmstadt.de/fileadmin/user_upload/Group_LangTech/data/GermEval2014_complete_data.zip
Developed for: spaCy 1.7.1
Last tested for: spaCy 1.7.1
'''
from __future__ import unicode_literals, print_function
import plac
from pathlib import Path
import random
import json
import spacy.orth as orth_funcs
from spacy.vocab import Vocab
from spacy.pipeline import BeamEntityRecognizer
from spacy.pipeline import EntityRecognizer
from spacy.tokenizer import Tokenizer
from spacy.tokens import Doc
from spacy.attrs import *
from spacy.gold import GoldParse
from spacy.gold import _iob_to_biluo as iob_to_biluo
from spacy.scorer import Scorer
try:
unicode
except NameError:
unicode = str
def init_vocab():
return Vocab(
lex_attr_getters={
LOWER: lambda string: string.lower(),
SHAPE: orth_funcs.word_shape,
PREFIX: lambda string: string[0],
SUFFIX: lambda string: string[-3:],
CLUSTER: lambda string: 0,
IS_ALPHA: orth_funcs.is_alpha,
IS_ASCII: orth_funcs.is_ascii,
IS_DIGIT: lambda string: string.isdigit(),
IS_LOWER: orth_funcs.is_lower,
IS_PUNCT: orth_funcs.is_punct,
IS_SPACE: lambda string: string.isspace(),
IS_TITLE: orth_funcs.is_title,
IS_UPPER: orth_funcs.is_upper,
IS_STOP: lambda string: False,
IS_OOV: lambda string: True
})
def save_vocab(vocab, path):
path = Path(path)
if not path.exists():
path.mkdir()
elif not path.is_dir():
raise IOError("Can't save vocab to %s\nNot a directory" % path)
with (path / 'strings.json').open('w') as file_:
vocab.strings.dump(file_)
vocab.dump((path / 'lexemes.bin').as_posix())
def load_vocab(path):
path = Path(path)
if not path.exists():
raise IOError("Cannot load vocab from %s\nDoes not exist" % path)
if not path.is_dir():
raise IOError("Cannot load vocab from %s\nNot a directory" % path)
return Vocab.load(path)
def init_ner_model(vocab, features=None):
if features is None:
features = tuple(EntityRecognizer.feature_templates)
return EntityRecognizer(vocab, features=features)
def save_ner_model(model, path):
path = Path(path)
if not path.exists():
path.mkdir()
if not path.is_dir():
raise IOError("Can't save model to %s\nNot a directory" % path)
model.model.dump((path / 'model').as_posix())
with (path / 'config.json').open('w') as file_:
data = json.dumps(model.cfg)
if not isinstance(data, unicode):
data = data.decode('utf8')
file_.write(data)
def load_ner_model(vocab, path):
return EntityRecognizer.load(path, vocab)
class Pipeline(object):
@classmethod
def load(cls, path):
path = Path(path)
if not path.exists():
raise IOError("Cannot load pipeline from %s\nDoes not exist" % path)
if not path.is_dir():
raise IOError("Cannot load pipeline from %s\nNot a directory" % path)
vocab = load_vocab(path)
tokenizer = Tokenizer(vocab, {}, None, None, None)
ner_model = load_ner_model(vocab, path / 'ner')
return cls(vocab, tokenizer, ner_model)
def __init__(self, vocab=None, tokenizer=None, entity=None):
if vocab is None:
vocab = init_vocab()
if tokenizer is None:
tokenizer = Tokenizer(vocab, {}, None, None, None)
if entity is None:
entity = init_ner_model(self.vocab)
self.vocab = vocab
self.tokenizer = tokenizer
self.entity = entity
self.pipeline = [self.entity]
def __call__(self, input_):
doc = self.make_doc(input_)
for process in self.pipeline:
process(doc)
return doc
def make_doc(self, input_):
if isinstance(input_, bytes):
input_ = input_.decode('utf8')
if isinstance(input_, unicode):
return self.tokenizer(input_)
else:
return Doc(self.vocab, words=input_)
def make_gold(self, input_, annotations):
doc = self.make_doc(input_)
gold = GoldParse(doc, entities=annotations)
return gold
def update(self, input_, annot):
doc = self.make_doc(input_)
gold = self.make_gold(input_, annot)
for ner in gold.ner:
if ner not in (None, '-', 'O'):
action, label = ner.split('-', 1)
self.entity.add_label(label)
return self.entity.update(doc, gold)
def evaluate(self, examples):
scorer = Scorer()
for input_, annot in examples:
gold = self.make_gold(input_, annot)
doc = self(input_)
scorer.score(doc, gold)
return scorer.scores
def average_weights(self):
self.entity.model.end_training()
def save(self, path):
path = Path(path)
if not path.exists():
path.mkdir()
elif not path.is_dir():
raise IOError("Can't save pipeline to %s\nNot a directory" % path)
save_vocab(self.vocab, path / 'vocab')
save_ner_model(self.entity, path / 'ner')
def train(nlp, train_examples, dev_examples, ctx, nr_epoch=5):
next_epoch = train_examples
print("Iter", "Loss", "P", "R", "F")
for i in range(nr_epoch):
this_epoch = next_epoch
next_epoch = []
loss = 0
for input_, annot in this_epoch:
loss += nlp.update(input_, annot)
if (i+1) < nr_epoch:
next_epoch.append((input_, annot))
random.shuffle(next_epoch)
scores = nlp.evaluate(dev_examples)
report_scores(i, loss, scores)
nlp.average_weights()
scores = nlp.evaluate(dev_examples)
report_scores(channels, i+1, loss, scores)
def report_scores(i, loss, scores):
precision = '%.2f' % scores['ents_p']
recall = '%.2f' % scores['ents_r']
f_measure = '%.2f' % scores['ents_f']
print('%d %s %s %s' % (int(loss), precision, recall, f_measure))
def read_examples(path):
path = Path(path)
with path.open() as file_:
sents = file_.read().strip().split('\n\n')
for sent in sents:
if not sent.strip():
continue
tokens = sent.split('\n')
while tokens and tokens[0].startswith('#'):
tokens.pop(0)
words = []
iob = []
for token in tokens:
if token.strip():
pieces = token.split()
words.append(pieces[1])
iob.append(pieces[2])
yield words, iob_to_biluo(iob)
@plac.annotations(
model_dir=("Path to save the model", "positional", None, Path),
train_loc=("Path to your training data", "positional", None, Path),
dev_loc=("Path to your development data", "positional", None, Path),
)
def main(model_dir=Path('/home/matt/repos/spaCy/spacy/data/de-1.0.0'),
train_loc=None, dev_loc=None, nr_epoch=30):
train_examples = read_examples(train_loc)
dev_examples = read_examples(dev_loc)
nlp = Pipeline.load(model_dir)
train(nlp, train_examples, list(dev_examples), ctx, nr_epoch)
nlp.save(model_dir)
if __name__ == '__main__':
main()
|
|
"""Base class for sparse matrice with a .data attribute
subclasses must provide a _with_data() method that
creates a new matrix with the same sparsity pattern
as self but with a different data array
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .base import spmatrix, _ufuncs_with_fixed_point_at_zero
from .sputils import isscalarlike, validateaxis, matrix
__all__ = []
# TODO implement all relevant operations
# use .data.__methods__() instead of /=, *=, etc.
class _data_matrix(spmatrix):
def __init__(self):
spmatrix.__init__(self)
def _get_dtype(self):
return self.data.dtype
def _set_dtype(self, newtype):
self.data.dtype = newtype
dtype = property(fget=_get_dtype, fset=_set_dtype)
def _deduped_data(self):
if hasattr(self, 'sum_duplicates'):
self.sum_duplicates()
return self.data
def __abs__(self):
return self._with_data(abs(self._deduped_data()))
def __round__(self, ndigits=0):
return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
def _real(self):
return self._with_data(self.data.real)
def _imag(self):
return self._with_data(self.data.imag)
def __neg__(self):
if self.dtype.kind == 'b':
raise NotImplementedError('negating a sparse boolean '
'matrix is not supported')
return self._with_data(-self.data)
def __imul__(self, other): # self *= other
if isscalarlike(other):
self.data *= other
return self
else:
return NotImplemented
def __itruediv__(self, other): # self /= other
if isscalarlike(other):
recip = 1.0 / other
self.data *= recip
return self
else:
return NotImplemented
def astype(self, dtype, casting='unsafe', copy=True):
dtype = np.dtype(dtype)
if self.dtype != dtype:
return self._with_data(
self._deduped_data().astype(dtype, casting=casting, copy=copy),
copy=copy)
elif copy:
return self.copy()
else:
return self
astype.__doc__ = spmatrix.astype.__doc__
def conj(self, copy=True):
if np.issubdtype(self.dtype, np.complexfloating):
return self._with_data(self.data.conj(), copy=copy)
elif copy:
return self.copy()
else:
return self
conj.__doc__ = spmatrix.conj.__doc__
def copy(self):
return self._with_data(self.data.copy(), copy=True)
copy.__doc__ = spmatrix.copy.__doc__
def count_nonzero(self):
return np.count_nonzero(self._deduped_data())
count_nonzero.__doc__ = spmatrix.count_nonzero.__doc__
def power(self, n, dtype=None):
"""
This function performs element-wise power.
Parameters
----------
n : n is a scalar
dtype : If dtype is not specified, the current dtype will be preserved.
"""
if not isscalarlike(n):
raise NotImplementedError("input is not scalar")
data = self._deduped_data()
if dtype is not None:
data = data.astype(dtype)
return self._with_data(data ** n)
###########################
# Multiplication handlers #
###########################
def _mul_scalar(self, other):
return self._with_data(self.data * other)
# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix.
for npfunc in _ufuncs_with_fixed_point_at_zero:
name = npfunc.__name__
def _create_method(op):
def method(self):
result = op(self._deduped_data())
return self._with_data(result, copy=True)
method.__doc__ = ("Element-wise %s.\n\n"
"See numpy.%s for more information." % (name, name))
method.__name__ = name
return method
setattr(_data_matrix, name, _create_method(npfunc))
def _find_missing_index(ind, n):
for k, a in enumerate(ind):
if k != a:
return k
k += 1
if k < n:
return k
else:
return -1
class _minmax_mixin(object):
"""Mixin for min and max methods.
These are not implemented for dia_matrix, hence the separate class.
"""
def _min_or_max_axis(self, axis, min_or_max):
N = self.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = self.shape[1 - axis]
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
major_index, value = mat._minor_reduce(min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from . import coo_matrix
if axis == 0:
return coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=self.dtype, shape=(1, M))
else:
return coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=self.dtype, shape=(M, 1))
def _min_or_max(self, axis, out, min_or_max):
if out is not None:
raise ValueError(("Sparse matrices do not support "
"an 'out' parameter."))
validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("zero-size array to reduction operation")
zero = self.dtype.type(0)
if self.nnz == 0:
return zero
m = min_or_max.reduce(self._deduped_data().ravel())
if self.nnz != np.prod(self.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return self._min_or_max_axis(axis, min_or_max)
else:
raise ValueError("axis out of range")
def _arg_min_or_max_axis(self, axis, op, compare):
if self.shape[axis] == 0:
raise ValueError("Can't apply the operation along a zero-sized "
"dimension.")
if axis < 0:
axis += 2
zero = self.dtype.type(0)
mat = self.tocsc() if axis == 0 else self.tocsr()
mat.sum_duplicates()
ret_size, line_size = mat._swap(mat.shape)
ret = np.zeros(ret_size, dtype=int)
nz_lines, = np.nonzero(np.diff(mat.indptr))
for i in nz_lines:
p, q = mat.indptr[i:i + 2]
data = mat.data[p:q]
indices = mat.indices[p:q]
am = op(data)
m = data[am]
if compare(m, zero) or q - p == line_size:
ret[i] = indices[am]
else:
zero_ind = _find_missing_index(indices, line_size)
if m == zero:
ret[i] = min(am, zero_ind)
else:
ret[i] = zero_ind
if axis == 1:
ret = ret.reshape(-1, 1)
return matrix(ret)
def _arg_min_or_max(self, axis, out, op, compare):
if out is not None:
raise ValueError("Sparse matrices do not support "
"an 'out' parameter.")
validateaxis(axis)
if axis is None:
if 0 in self.shape:
raise ValueError("Can't apply the operation to "
"an empty matrix.")
if self.nnz == 0:
return 0
else:
zero = self.dtype.type(0)
mat = self.tocoo()
mat.sum_duplicates()
am = op(mat.data)
m = mat.data[am]
if compare(m, zero):
return mat.row[am] * mat.shape[1] + mat.col[am]
else:
size = np.prod(mat.shape)
if size == mat.nnz:
return am
else:
ind = mat.row * mat.shape[1] + mat.col
zero_ind = _find_missing_index(ind, size)
if m == zero:
return min(zero_ind, am)
else:
return zero_ind
return self._arg_min_or_max_axis(axis, op, compare)
def max(self, axis=None, out=None):
"""
Return the maximum of the matrix or maximum along an axis.
This takes all elements into account, not just the non-zero ones.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the maximum over all the matrix elements, returning
a scalar (i.e. `axis` = `None`).
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except
for the default value, as this argument is not used.
Returns
-------
amax : coo_matrix or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is a sparse.coo_matrix of dimension
``a.ndim - 1``.
See Also
--------
min : The minimum value of a sparse matrix along a given axis.
numpy.matrix.max : NumPy's implementation of 'max' for matrices
"""
return self._min_or_max(axis, out, np.maximum)
def min(self, axis=None, out=None):
"""
Return the minimum of the matrix or maximum along an axis.
This takes all elements into account, not just the non-zero ones.
Parameters
----------
axis : {-2, -1, 0, 1, None} optional
Axis along which the sum is computed. The default is to
compute the minimum over all the matrix elements, returning
a scalar (i.e. `axis` = `None`).
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
amin : coo_matrix or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is a sparse.coo_matrix of dimension
``a.ndim - 1``.
See Also
--------
max : The maximum value of a sparse matrix along a given axis.
numpy.matrix.min : NumPy's implementation of 'min' for matrices
"""
return self._min_or_max(axis, out, np.minimum)
def argmax(self, axis=None, out=None):
"""Return indices of maximum elements along an axis.
Implicit zero elements are also taken into account. If there are
several maximum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmax is computed. If None (default), index
of the maximum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : numpy.matrix or int
Indices of maximum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmax, np.greater)
def argmin(self, axis=None, out=None):
"""Return indices of minimum elements along an axis.
Implicit zero elements are also taken into account. If there are
several minimum values, the index of the first occurrence is returned.
Parameters
----------
axis : {-2, -1, 0, 1, None}, optional
Axis along which the argmin is computed. If None (default), index
of the minimum element in the flatten data is returned.
out : None, optional
This argument is in the signature *solely* for NumPy
compatibility reasons. Do not pass in anything except for
the default value, as this argument is not used.
Returns
-------
ind : numpy.matrix or int
Indices of minimum elements. If matrix, its size along `axis` is 1.
"""
return self._arg_min_or_max(axis, out, np.argmin, np.less)
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import exceptions as lib_exc
from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseIdentityTest(tempest.test.BaseTestCase):
@classmethod
def disable_user(cls, user_name):
user = cls.get_user_by_name(user_name)
cls.client.enable_disable_user(user['id'], False)
@classmethod
def disable_tenant(cls, tenant_name):
tenant = cls.get_tenant_by_name(tenant_name)
cls.client.update_tenant(tenant['id'], enabled=False)
@classmethod
def get_user_by_name(cls, name):
users = cls.client.get_users()['users']
user = [u for u in users if u['name'] == name]
if len(user) > 0:
return user[0]
@classmethod
def get_tenant_by_name(cls, name):
try:
tenants = cls.client.list_tenants()['tenants']
except AttributeError:
tenants = cls.client.list_projects()['projects']
tenant = [t for t in tenants if t['name'] == name]
if len(tenant) > 0:
return tenant[0]
@classmethod
def get_role_by_name(cls, name):
roles = cls.client.list_roles()['roles']
role = [r for r in roles if r['name'] == name]
if len(role) > 0:
return role[0]
class BaseIdentityV2Test(BaseIdentityTest):
credentials = ['primary']
# identity v2 tests should obtain tokens and create accounts via v2
# regardless of the configured CONF.identity.auth_version
identity_version = 'v2'
@classmethod
def setup_clients(cls):
super(BaseIdentityV2Test, cls).setup_clients()
cls.non_admin_client = cls.os.identity_public_client
cls.non_admin_token_client = cls.os.token_client
@classmethod
def resource_setup(cls):
super(BaseIdentityV2Test, cls).resource_setup()
@classmethod
def resource_cleanup(cls):
super(BaseIdentityV2Test, cls).resource_cleanup()
class BaseIdentityV2AdminTest(BaseIdentityV2Test):
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseIdentityV2AdminTest, cls).setup_clients()
cls.client = cls.os_adm.identity_client
cls.non_admin_client = cls.os.identity_client
cls.token_client = cls.os_adm.token_client
@classmethod
def resource_setup(cls):
super(BaseIdentityV2AdminTest, cls).resource_setup()
cls.data = DataGenerator(cls.client)
@classmethod
def resource_cleanup(cls):
cls.data.teardown_all()
super(BaseIdentityV2AdminTest, cls).resource_cleanup()
class BaseIdentityV3Test(BaseIdentityTest):
credentials = ['primary']
# identity v3 tests should obtain tokens and create accounts via v3
# regardless of the configured CONF.identity.auth_version
identity_version = 'v3'
@classmethod
def setup_clients(cls):
super(BaseIdentityV3Test, cls).setup_clients()
cls.non_admin_client = cls.os.identity_v3_client
cls.non_admin_token = cls.os.token_v3_client
cls.non_admin_endpoints_client = cls.os.endpoints_client
cls.non_admin_region_client = cls.os.region_client
cls.non_admin_service_client = cls.os.service_client
cls.non_admin_policy_client = cls.os.policy_client
cls.non_admin_creds_client = cls.os.credentials_client
@classmethod
def resource_cleanup(cls):
super(BaseIdentityV3Test, cls).resource_cleanup()
class BaseIdentityV3AdminTest(BaseIdentityV3Test):
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseIdentityV3AdminTest, cls).setup_clients()
cls.client = cls.os_adm.identity_v3_client
cls.token = cls.os_adm.token_v3_client
cls.endpoints_client = cls.os_adm.endpoints_client
cls.region_client = cls.os_adm.region_client
cls.data = DataGenerator(cls.client)
cls.service_client = cls.os_adm.service_client
cls.policy_client = cls.os_adm.policy_client
cls.creds_client = cls.os_adm.credentials_client
@classmethod
def resource_cleanup(cls):
cls.data.teardown_all()
super(BaseIdentityV3AdminTest, cls).resource_cleanup()
@classmethod
def get_user_by_name(cls, name):
users = cls.client.get_users()['users']
user = [u for u in users if u['name'] == name]
if len(user) > 0:
return user[0]
@classmethod
def get_tenant_by_name(cls, name):
tenants = cls.client.list_projects()['projects']
tenant = [t for t in tenants if t['name'] == name]
if len(tenant) > 0:
return tenant[0]
@classmethod
def get_role_by_name(cls, name):
roles = cls.client.list_roles()['roles']
role = [r for r in roles if r['name'] == name]
if len(role) > 0:
return role[0]
def delete_domain(self, domain_id):
# NOTE(mpavlase) It is necessary to disable the domain before deleting
# otherwise it raises Forbidden exception
self.client.update_domain(domain_id, enabled=False)
self.client.delete_domain(domain_id)
class DataGenerator(object):
def __init__(self, client):
self.client = client
self.users = []
self.tenants = []
self.roles = []
self.role_name = None
self.v3_users = []
self.projects = []
self.v3_roles = []
self.domains = []
@property
def test_credentials(self):
return cred_provider.get_credentials(username=self.test_user,
user_id=self.user['id'],
password=self.test_password,
tenant_name=self.test_tenant,
tenant_id=self.tenant['id'])
def setup_test_user(self):
"""Set up a test user."""
self.setup_test_tenant()
self.test_user = data_utils.rand_name('test_user')
self.test_password = data_utils.rand_name('pass')
self.test_email = self.test_user + '@testmail.tm'
self.user = self.client.create_user(self.test_user,
self.test_password,
self.tenant['id'],
self.test_email)['user']
self.users.append(self.user)
def setup_test_tenant(self):
"""Set up a test tenant."""
self.test_tenant = data_utils.rand_name('test_tenant')
self.test_description = data_utils.rand_name('desc')
self.tenant = self.client.create_tenant(
name=self.test_tenant,
description=self.test_description)['tenant']
self.tenants.append(self.tenant)
def setup_test_role(self):
"""Set up a test role."""
self.test_role = data_utils.rand_name('role')
self.role = self.client.create_role(self.test_role)['role']
self.roles.append(self.role)
def setup_test_v3_user(self):
"""Set up a test v3 user."""
self.setup_test_project()
self.test_user = data_utils.rand_name('test_user')
self.test_password = data_utils.rand_name('pass')
self.test_email = self.test_user + '@testmail.tm'
self.v3_user = self.client.create_user(
self.test_user,
password=self.test_password,
project_id=self.project['id'],
email=self.test_email)['user']
self.v3_users.append(self.v3_user)
def setup_test_project(self):
"""Set up a test project."""
self.test_project = data_utils.rand_name('test_project')
self.test_description = data_utils.rand_name('desc')
self.project = self.client.create_project(
name=self.test_project,
description=self.test_description)['project']
self.projects.append(self.project)
def setup_test_v3_role(self):
"""Set up a test v3 role."""
self.test_role = data_utils.rand_name('role')
self.v3_role = self.client.create_role(self.test_role)['role']
self.v3_roles.append(self.v3_role)
def setup_test_domain(self):
"""Set up a test domain."""
self.test_domain = data_utils.rand_name('test_domain')
self.test_description = data_utils.rand_name('desc')
self.domain = self.client.create_domain(
name=self.test_domain,
description=self.test_description)['domain']
self.domains.append(self.domain)
@staticmethod
def _try_wrapper(func, item, **kwargs):
try:
if kwargs:
func(item['id'], **kwargs)
else:
func(item['id'])
except lib_exc.NotFound:
pass
except Exception:
LOG.exception("Unexpected exception occurred in %s deletion."
" But ignored here." % item['id'])
def teardown_all(self):
# NOTE(masayukig): v3 client doesn't have v2 method.
# (e.g. delete_tenant) So we need to check resources existence
# before using client methods.
for user in self.users:
self._try_wrapper(self.client.delete_user, user)
for tenant in self.tenants:
self._try_wrapper(self.client.delete_tenant, tenant)
for role in self.roles:
self._try_wrapper(self.client.delete_role, role)
for v3_user in self.v3_users:
self._try_wrapper(self.client.delete_user, v3_user)
for v3_project in self.projects:
self._try_wrapper(self.client.delete_project, v3_project)
for v3_role in self.v3_roles:
self._try_wrapper(self.client.delete_role, v3_role)
for domain in self.domains:
self._try_wrapper(self.client.update_domain, domain,
enabled=False)
self._try_wrapper(self.client.delete_domain, domain)
|
|
from __future__ import division, absolute_import, print_function
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import warnings
import ast
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar
# While not in __all__, matrix_power used to be defined here, so we import
# it for backward compatibility.
from numpy.linalg import matrix_power
def _convert_from_string(data):
for char in '[]':
data = data.replace(char, '')
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(ast.literal_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError("Rows not the same size.")
count += 1
newdata.append(newrow)
return newdata
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
Unlike `matrix`, `asmatrix` does not make a copy if the input is already
a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
Parameters
----------
data : array_like
Input data.
dtype : data-type
Data-type of the output matrix.
Returns
-------
mat : matrix
`data` interpreted as a matrix.
Examples
--------
>>> x = np.array([[1, 2], [3, 4]])
>>> m = np.asmatrix(x)
>>> x[0,0] = 5
>>> m
matrix([[5, 2],
[3, 4]])
"""
return matrix(data, dtype=dtype, copy=False)
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
.. note:: It is no longer recommended to use this class, even for linear
algebra. Instead use regular arrays. The class may be removed
in the future.
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
(matrix multiplication) and ``**`` (matrix power).
Parameters
----------
data : array_like or string
If `data` is a string, it is interpreted as a matrix with commas
or spaces separating columns, and semicolons separating rows.
dtype : data-type
Data-type of the output matrix.
copy : bool
If `data` is already an `ndarray`, then this flag determines
whether the data is copied (the default), or whether a view is
constructed.
See Also
--------
array
Examples
--------
>>> a = np.matrix('1 2; 3 4')
>>> print(a)
[[1 2]
[3 4]]
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
[3, 4]])
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
warnings.warn('the matrix subclass is not the recommended way to '
'represent matrices or deal with linear algebra (see '
'https://docs.scipy.org/doc/numpy/user/'
'numpy-for-matlab-users.html). '
'Please adjust your code to use regular ndarray.',
PendingDeprecationWarning, stacklevel=2)
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy: return new.copy()
else: return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
newshape = self.shape
if ndim == 0:
self.shape = (1, 1)
elif ndim == 1:
self.shape = (1, newshape[0])
return
def __getitem__(self, index):
self._getitem = True
try:
out = N.ndarray.__getitem__(self, index)
finally:
self._getitem = False
if not isinstance(out, N.ndarray):
return out
if out.ndim == 0:
return out[()]
if out.ndim == 1:
sh = out.shape[0]
# Determine when we should have a column array
try:
n = len(index)
except Exception:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
else:
out.shape = (1, sh)
return out
def __mul__(self, other):
if isinstance(other, (N.ndarray, list, tuple)) :
# This promotes 1-D vectors to row vectors
return N.dot(self, asmatrix(other))
if isscalar(other) or not hasattr(other, '__rmul__') :
return N.dot(self, other)
return NotImplemented
def __rmul__(self, other):
return N.dot(other, self)
def __imul__(self, other):
self[:] = self * other
return self
def __pow__(self, other):
return matrix_power(self, other)
def __ipow__(self, other):
self[:] = self ** other
return self
def __rpow__(self, other):
return NotImplemented
def _align(self, axis):
"""A convenience function for operations that need to preserve axis
orientation.
"""
if axis is None:
return self[0, 0]
elif axis==0:
return self
elif axis==1:
return self.transpose()
else:
raise ValueError("unsupported axis")
def _collapse(self, axis):
"""A convenience function for operations that want to collapse
to a scalar like _align, but are using keepdims=True
"""
if axis is None:
return self[0, 0]
else:
return self
# Necessary because base-class tolist expects dimension
# reduction by x[0]
def tolist(self):
"""
Return the matrix as a (possibly nested) list.
See `ndarray.tolist` for full documentation.
See Also
--------
ndarray.tolist
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.tolist()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self.__array__().tolist()
# To preserve orientation of result...
def sum(self, axis=None, dtype=None, out=None):
"""
Returns the sum of the matrix elements, along the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum
Notes
-----
This is the same as `ndarray.sum`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix([[1, 2], [4, 3]])
>>> x.sum()
10
>>> x.sum(axis=1)
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
matrix([[ 3.],
[ 7.]])
>>> out = np.zeros((1, 2), dtype='float')
>>> x.sum(axis=1, dtype='float', out=out)
matrix([[ 3.],
[ 7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
# To update docstring from array to matrix...
def squeeze(self, axis=None):
"""
Return a possibly reshaped matrix.
Refer to `numpy.squeeze` for more documentation.
Parameters
----------
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one,
an error is raised.
Returns
-------
squeezed : matrix
The matrix, but as a (1, N) matrix if it had shape (N, 1).
See Also
--------
numpy.squeeze : related function
Notes
-----
If `m` has a single column then that column is returned
as the single row of a matrix. Otherwise `m` is returned.
The returned matrix is always either `m` itself or a view into `m`.
Supplying an axis keyword argument will not affect the returned matrix
but it may cause an error to be raised.
Examples
--------
>>> c = np.matrix([[1], [2]])
>>> c
matrix([[1],
[2]])
>>> c.squeeze()
matrix([[1, 2]])
>>> r = c.T
>>> r
matrix([[1, 2]])
>>> r.squeeze()
matrix([[1, 2]])
>>> m = np.matrix([[1, 2], [3, 4]])
>>> m.squeeze()
matrix([[1, 2],
[3, 4]])
"""
return N.ndarray.squeeze(self, axis=axis)
# To update docstring from array to matrix...
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the matrix elements along the given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean
Notes
-----
Same as `ndarray.mean` except that, where that returns an `ndarray`,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.mean()
5.5
>>> x.mean(0)
matrix([[ 4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
[ 9.5]])
"""
return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
def std(self, axis=None, dtype=None, out=None, ddof=0):
"""
Return the standard deviation of the array elements along the given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std
Notes
-----
This is the same as `ndarray.std`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
3.4520525295346629
>>> x.std(0)
matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
[ 1.11803399]])
"""
return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def var(self, axis=None, dtype=None, out=None, ddof=0):
"""
Returns the variance of the matrix elements, along the given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var
Notes
-----
This is the same as `ndarray.var`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.var()
11.916666666666666
>>> x.var(0)
matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
>>> x.var(1)
matrix([[ 1.25],
[ 1.25],
[ 1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Refer to `prod` for full documentation.
See Also
--------
prod, ndarray.prod
Notes
-----
Same as `ndarray.prod`, except, where that returns an `ndarray`, this
returns a `matrix` object instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.prod()
0
>>> x.prod(0)
matrix([[ 0, 45, 120, 231]])
>>> x.prod(1)
matrix([[ 0],
[ 840],
[7920]])
"""
return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
def any(self, axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Refer to `numpy.any` for full documentation.
Parameters
----------
axis : int, optional
Axis along which logical OR is performed
out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
Returns
-------
any : bool, ndarray
Returns a single bool if `axis` is ``None``; otherwise,
returns `ndarray`
"""
return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
def all(self, axis=None, out=None):
"""
Test whether all matrix elements along a given axis evaluate to True.
Parameters
----------
See `numpy.all` for complete descriptions
See Also
--------
numpy.all
Notes
-----
This is the same as `ndarray.all`, but it returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = x[0]; y
matrix([[0, 1, 2, 3]])
>>> (x == y)
matrix([[ True, True, True, True],
[False, False, False, False],
[False, False, False, False]])
>>> (x == y).all()
False
>>> (x == y).all(0)
matrix([[False, False, False, False]])
>>> (x == y).all(1)
matrix([[ True],
[False],
[False]])
"""
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
def max(self, axis=None, out=None):
"""
Return the maximum value along an axis.
Parameters
----------
See `amax` for complete descriptions
See Also
--------
amax, ndarray.max
Notes
-----
This is the same as `ndarray.max`, but returns a `matrix` object
where `ndarray.max` would return an ndarray.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.max()
11
>>> x.max(0)
matrix([[ 8, 9, 10, 11]])
>>> x.max(1)
matrix([[ 3],
[ 7],
[11]])
"""
return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
def argmax(self, axis=None, out=None):
"""
Indexes of the maximum values along an axis.
Return the indexes of the first occurrences of the maximum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmax` for complete descriptions
See Also
--------
numpy.argmax
Notes
-----
This is the same as `ndarray.argmax`, but returns a `matrix` object
where `ndarray.argmax` would return an `ndarray`.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.argmax()
11
>>> x.argmax(0)
matrix([[2, 2, 2, 2]])
>>> x.argmax(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmax(self, axis, out)._align(axis)
def min(self, axis=None, out=None):
"""
Return the minimum value along an axis.
Parameters
----------
See `amin` for complete descriptions.
See Also
--------
amin, ndarray.min
Notes
-----
This is the same as `ndarray.min`, but returns a `matrix` object
where `ndarray.min` would return an ndarray.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.min()
-11
>>> x.min(0)
matrix([[ -8, -9, -10, -11]])
>>> x.min(1)
matrix([[ -3],
[ -7],
[-11]])
"""
return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
def argmin(self, axis=None, out=None):
"""
Indexes of the minimum values along an axis.
Return the indexes of the first occurrences of the minimum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmin` for complete descriptions.
See Also
--------
numpy.argmin
Notes
-----
This is the same as `ndarray.argmin`, but returns a `matrix` object
where `ndarray.argmin` would return an `ndarray`.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.argmin()
11
>>> x.argmin(0)
matrix([[2, 2, 2, 2]])
>>> x.argmin(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmin(self, axis, out)._align(axis)
def ptp(self, axis=None, out=None):
"""
Peak-to-peak (maximum - minimum) value along the given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp
Notes
-----
Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.ptp()
11
>>> x.ptp(0)
matrix([[8, 8, 8, 8]])
>>> x.ptp(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self))
def getA(self):
"""
Return `self` as an `ndarray` object.
Equivalent to ``np.asarray(self)``.
Parameters
----------
None
Returns
-------
ret : ndarray
`self` as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA()
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
"""
return self.__array__()
def getA1(self):
"""
Return `self` as a flattened `ndarray`.
Equivalent to ``np.asarray(x).ravel()``
Parameters
----------
None
Returns
-------
ret : ndarray
`self`, 1-D, as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return self.__array__().ravel()
def ravel(self, order='C'):
"""
Return a flattened matrix.
Refer to `numpy.ravel` for more documentation.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `m` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
ret : matrix
Return the matrix flattened to shape `(1, N)` where `N`
is the number of elements in the original matrix.
A copy is made only if necessary.
See Also
--------
matrix.flatten : returns a similar output matrix but always a copy
matrix.flat : a flat iterator on the array.
numpy.ravel : related function which returns an ndarray
"""
return N.ndarray.ravel(self, order=order)
def getT(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
def getH(self):
"""
Returns the (complex) conjugate transpose of `self`.
Equivalent to ``np.transpose(self)`` if `self` is real-valued.
Parameters
----------
None
Returns
-------
ret : matrix object
complex conjugate transpose of `self`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4)))
>>> z = x - 1j*x; z
matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
[ 1. +1.j, 5. +5.j, 9. +9.j],
[ 2. +2.j, 6. +6.j, 10.+10.j],
[ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
return self.transpose().conjugate()
else:
return self.transpose()
T = property(getT, None)
A = property(getA, None)
A1 = property(getA1, None)
H = property(getH, None)
I = property(getI, None)
def _from_string(str, gdict, ldict):
rows = str.split(';')
rowtup = []
for row in rows:
trow = row.split(',')
newrow = []
for x in trow:
newrow.extend(x.split())
trow = newrow
coltup = []
for col in trow:
col = col.strip()
try:
thismat = ldict[col]
except KeyError:
try:
thismat = gdict[col]
except KeyError:
raise KeyError("%s not found" % (col,))
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
return concatenate(rowtup, axis=0)
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
Parameters
----------
obj : str or array_like
Input data. If a string, variables in the current scope may be
referenced by name.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
Ignored if `obj` is not a string or `gdict` is `None`.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
Returns
-------
out : matrix
Returns a matrix object, which is a specialized 2-D array.
See Also
--------
block :
A generalization of this function for N-d arrays, that returns normal
ndarrays.
Examples
--------
>>> A = np.mat('1 1; 1 1')
>>> B = np.mat('2 2; 2 2')
>>> C = np.mat('3 4; 5 6')
>>> D = np.mat('7 8; 9 0')
All the following expressions construct the same block matrix:
>>> np.bmat([[A, B], [C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat('A,B; C,D')
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
"""
if isinstance(obj, str):
if gdict is None:
# get previous frame
frame = sys._getframe().f_back
glob_dict = frame.f_globals
loc_dict = frame.f_locals
else:
glob_dict = gdict
loc_dict = ldict
return matrix(_from_string(obj, glob_dict, loc_dict))
if isinstance(obj, (tuple, list)):
# [[A,B],[C,D]]
arr_rows = []
for row in obj:
if isinstance(row, N.ndarray): # not 2-d
return matrix(concatenate(obj, axis=-1))
else:
arr_rows.append(concatenate(row, axis=-1))
return matrix(concatenate(arr_rows, axis=0))
if isinstance(obj, N.ndarray):
return matrix(obj)
mat = asmatrix
|
|
__all__ = ['Counter', 'deque', 'defaultdict', 'namedtuple', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
from _abcoll import *
import _abcoll
__all__ += _abcoll.__all__
from _collections import deque, defaultdict
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap, \
ifilter as _ifilter, imap as _imap
################################################################################
### OrderedDict
################################################################################
class OrderedDict(dict, MutableMapping):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [None, None, None] # sentinel node
PREV = 0
NEXT = 1
root[PREV] = root[NEXT] = root
self.__map = {}
self.update(*args, **kwds)
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[PREV]
last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, PREV=0, NEXT=1, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link[PREV]
link_next = link[NEXT]
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
def __iter__(self, NEXT=1, KEY=2):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[NEXT]
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self, PREV=0, KEY=2):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root[PREV]
while curr is not root:
yield curr[KEY]
curr = curr[PREV]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__root
del self.__map, self.__root
inst_dict = vars(self).copy()
self.__map, self.__root = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
self.__root[:] = [self.__root, self.__root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
setdefault = MutableMapping.setdefault
update = MutableMapping.update
pop = MutableMapping.pop
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
iteritems = MutableMapping.iteritems
__ne__ = MutableMapping.__ne__
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
key = next(reversed(self) if last else iter(self))
value = self.pop(key)
return key, value
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(_imap(_eq, self.iteritems(), other.iteritems()))
return dict.__eq__(self, other)
def __del__(self):
self.clear() # eliminate cyclical references
################################################################################
### namedtuple
################################################################################
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not all(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
'Create new instance of %(typename)s(%(argtxt)s)'
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
'Return a nicely formatted representation string'
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new OrderedDict which maps field names to their values'
return OrderedDict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += " %s = _property(_itemgetter(%d), doc='Alias for field number %d')\n" % (name, i, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
OrderedDict=OrderedDict, _property=property, _tuple=tuple)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
########################################################################
### Counter
########################################################################
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abracadabra') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('r', 2), ('b', 2)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'r']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbcdrr'
>>> sum(c.values()) # total of all counts
11
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['r'] # remove all 'r'
>>> c['r'] # now there are zero 'r'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abracadabra').most_common(3)
[('a', 5), ('r', 2), ('b', 2)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.iteritems(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.iteritems():
self[elem] = self_get(elem, 0) + count
else:
dict.update(self, iterable) # fast path when counter is empty
else:
self_get = self.get
for elem in iterable:
self[elem] = self_get(elem, 0) + 1
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Like dict.copy() but returns a Counter instance instead of a dict.'
return Counter(self)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
dict.__delitem__(self, elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] + other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
newcount = self[elem] - other[elem]
if newcount > 0:
result[elem] = newcount
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem in set(self) | set(other):
p, q = self[elem], other[elem]
newcount = q if p < q else p
if newcount > 0:
result[elem] = newcount
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
if len(self) < len(other):
self, other = other, self
for elem in _ifilter(self.__contains__, other):
p, q = self[elem], other[elem]
newcount = p if p < q else q
if newcount > 0:
result[elem] = newcount
return result
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print(t.generate(myvalue="XXX"))
`Loader` is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print(loader.load("test.html").generate(myvalue="XXX"))
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
`.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and
`~.RequestHandler.render_string` methods of
`tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `.Application` setting.
Variable names beginning with ``_tt_`` are reserved by the template
system and should not be used by application code.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
These tags may be escaped as ``{{!``, ``{%!``, and ``{#!``
if you need to include a literal ``{{``, ``{%``, or ``{#`` in the output.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
Note that as an implementation detail apply blocks are implemented
as nested functions and thus may interact strangely with variables
set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
within loops.
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `.Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``UIModules`` are a feature of the `tornado.web.RequestHandler`
class (and specifically its ``render`` method) and will not work
when the template system is used on its own in other contexts.
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% whitespace *mode* %}``
Sets the whitespace mode for the remainder of the current file
(or until the next ``{% whitespace %}`` directive). See
`filter_whitespace` for available options. New in Tornado 4.3.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from tornado import escape
from tornado.log import app_log
from tornado.util import ObjectDict, exec_in, unicode_type, PY3
if PY3:
from io import StringIO
else:
from cStringIO import StringIO
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
def filter_whitespace(mode, text):
"""Transform whitespace in ``text`` according to ``mode``.
Available modes are:
* ``all``: Return all whitespace unmodified.
* ``single``: Collapse consecutive whitespace with a single whitespace
character, preserving newlines.
* ``oneline``: Collapse all runs of whitespace into a single space
character, removing all newlines in the process.
.. versionadded:: 4.3
"""
if mode == 'all':
return text
elif mode == 'single':
text = re.sub(r"([\t ]+)", " ", text)
text = re.sub(r"(\s*\n\s*)", "\n", text)
return text
elif mode == 'oneline':
return re.sub(r"(\s+)", " ", text)
else:
raise Exception("invalid whitespace mode %s" % mode)
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
# note that the constructor's signature is not extracted with
# autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=_UNSET, autoescape=_UNSET,
whitespace=None):
"""Construct a Template.
:arg str template_string: the contents of the template file.
:arg str name: the filename from which the template was loaded
(used for error message).
:arg tornado.template.BaseLoader loader: the `~tornado.template.BaseLoader` responsible for this template,
used to resolve ``{% include %}`` and ``{% extend %}``
directives.
:arg bool compress_whitespace: Deprecated since Tornado 4.3.
Equivalent to ``whitespace="single"`` if true and
``whitespace="all"`` if false.
:arg str autoescape: The name of a function in the template
namespace, or ``None`` to disable escaping by default.
:arg str whitespace: A string specifying treatment of whitespace;
see `filter_whitespace` for options.
.. versionchanged:: 4.3
Added ``whitespace`` parameter; deprecated ``compress_whitespace``.
"""
self.name = escape.native_str(name)
if compress_whitespace is not _UNSET:
# Convert deprecated compress_whitespace (bool) to whitespace (str).
if whitespace is not None:
raise Exception("cannot set both whitespace and compress_whitespace")
whitespace = "single" if compress_whitespace else "all"
if whitespace is None:
if loader and loader.whitespace:
whitespace = loader.whitespace
else:
# Whitespace defaults by filename.
if name.endswith(".html") or name.endswith(".js"):
whitespace = "single"
else:
whitespace = "all"
# Validate the whitespace setting.
filter_whitespace(whitespace, '')
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string),
whitespace)
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
# The dont_inherit flag prevents template.py's future imports
# from being applied to the generated code.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'),
"exec", dont_inherit=True)
except Exception:
formatted_code = _format_code(self.code).rstrip()
app_log.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_tt_utf8": escape.utf8, # for internal use
"_tt_string_types": (unicode_type, bytes),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec_in(self.compiled, namespace)
execute = namespace["_tt_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
return execute()
def _generate_python(self, loader):
buffer = StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader,
ancestors[0].template)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders.
You must use a template loader to use template constructs like
``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time.
"""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None,
whitespace=None):
"""Construct a template loader.
:arg str autoescape: The name of a function in the template
namespace, such as "xhtml_escape", or ``None`` to disable
autoescaping by default.
:arg dict namespace: A dictionary to be added to the default template
namespace, or ``None``.
:arg str whitespace: A string specifying default behavior for
whitespace in templates; see `filter_whitespace` for options.
Default is "single" for files ending in ".html" and ".js" and
"all" for other files.
.. versionchanged:: 4.3
Added ``whitespace`` parameter.
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.whitespace = whitespace
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
with open(path, "rb") as f:
template = Template(f.read(), name=name, loader=self)
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
# Just in case the body was empty
writer.write_line("pass", self.line)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
# In case the previous block was empty
writer.write_line("pass", self.line)
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tt_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
" _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_tt_append(_tt_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_tt_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line, whitespace):
self.value = value
self.line = line
self.whitespace = whitespace
def generate(self, writer):
value = self.value
# Compress whitespace if requested, with a crude heuristic to avoid
# altering preformatted whitespace.
if "<pre>" not in value:
value = filter_whitespace(self.whitespace, value)
if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors.
``ParseError`` instances have ``filename`` and ``lineno`` attributes
indicating the position of the error.
.. versionchanged:: 4.3
Added ``filename`` and ``lineno`` attributes.
"""
def __init__(self, message, filename=None, lineno=0):
self.message = message
# The names "filename" and "lineno" are chosen for consistency
# with python SyntaxError.
self.filename = filename
self.lineno = lineno
def __str__(self):
return '%s at %s:%d' % (self.message, self.filename, self.lineno)
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent is None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
class _TemplateReader(object):
def __init__(self, name, text, whitespace):
self.name = name
self.text = text
self.whitespace = whitespace
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def raise_parse_error(self, msg):
raise ParseError(msg, self.name, self.line)
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None, in_loop=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
reader.raise_parse_error(
"Missing {%% end %%} block for %s" % in_block)
body.chunks.append(_Text(reader.consume(), reader.line,
reader.whitespace))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line,
reader.whitespace))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line,
reader.whitespace))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
reader.raise_parse_error("Missing end comment #}")
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
reader.raise_parse_error("Missing end expression }}")
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
reader.raise_parse_error("Empty expression")
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
reader.raise_parse_error("Missing end block %}")
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
reader.raise_parse_error("Empty block tag ({% %})")
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
reader.raise_parse_error("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
reader.raise_parse_error(
"%s block cannot be attached to %s block" %
(operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
reader.raise_parse_error("Extra {% end %} block")
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "whitespace", "raw",
"module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
reader.raise_parse_error("extends missing file path")
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
reader.raise_parse_error("import missing statement")
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
reader.raise_parse_error("include missing file path")
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
reader.raise_parse_error("set missing statement")
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "whitespace":
mode = suffix.strip()
# Validate the selected mode
filter_whitespace(mode, '')
reader.whitespace = mode
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
if operator in ("for", "while"):
block_body = _parse(reader, template, operator, operator)
elif operator == "apply":
# apply creates a nested function so syntactically it's not
# in the loop.
block_body = _parse(reader, template, operator, None)
else:
block_body = _parse(reader, template, operator, in_loop)
if operator == "apply":
if not suffix:
reader.raise_parse_error("apply missing method name")
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
reader.raise_parse_error("block missing name")
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
elif operator in ("break", "continue"):
if not in_loop:
reader.raise_parse_error("%s outside %s block" %
(operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line))
continue
else:
reader.raise_parse_error("unknown operator: %r" % operator)
|
|
# encoding: utf8
from django.test import TestCase
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.state import ProjectState, ModelState
from django.db.migrations.graph import MigrationGraph
from django.db import models
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))])
author_name_longer = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=400))])
author_name_renamed = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("names", models.CharField(max_length=200))])
author_with_book = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))])
author_with_publisher = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("publisher", models.ForeignKey("testapp.Publisher"))])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
publisher = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100))])
publisher_with_author = ModelState("testapp", "Publisher", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("name", models.CharField(max_length=100))])
other_pony = ModelState("otherapp", "Pony", [("id", models.AutoField(primary_key=True))])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))])
book_unique = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": [("author", "title")]})
book_unique_2 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": [("title", "author")]})
edition = ModelState("thirdapp", "Edition", [("id", models.AutoField(primary_key=True)), ("book", models.ForeignKey("otherapp.Book"))])
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model_state(model_state.clone())
return project_state
def test_arrange_for_graph(self):
"Tests auto-naming of migrations for graph matching."
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency(("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency(("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector._arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"Tests that trim does not remove dependencies but does remove unwanted apps"
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner(defaults={"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector._arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, set(["testapp"]))
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_new_model(self):
"Tests autodetection of new models"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(action.name, "Author")
def test_old_model(self):
"Tests deletion of old models"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Author")
def test_add_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "name")
def test_remove_field(self):
"Tests autodetection of removed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "name")
def test_alter_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_longer])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
def test_rename_field(self):
"Tests autodetection of renamed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameField")
self.assertEqual(action.old_name, "name")
self.assertEqual(action.new_name, "names")
def test_fk_dependency(self):
"Tests that having a ForeignKey automatically adds a dependency"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_name, self.book, self.edition])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 1)
self.assertEqual(len(changes['thirdapp']), 1)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['thirdapp'][0]
self.assertEqual(len(migration3.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
# Right dependencies?
self.assertEqual(migration1.dependencies, [])
self.assertEqual(migration2.dependencies, [("testapp", "auto_1")])
self.assertEqual(migration3.dependencies, [("otherapp", "auto_1")])
def test_same_app_no_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 2)
# Right actions?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration.operations[1]
self.assertEqual(action.__class__.__name__, "CreateModel")
# Right dependencies?
self.assertEqual(migration.dependencies, [])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_book, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 2)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['otherapp'][1]
self.assertEqual(len(migration2.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(len(action.fields), 2)
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "author")
# Right dependencies?
self.assertEqual(migration1.dependencies, [("otherapp", "auto_1")])
self.assertEqual(migration2.dependencies, [])
self.assertEqual(set(migration3.dependencies), set([("otherapp", "auto_1"), ("testapp", "auto_1")]))
def test_same_app_circular_fk_dependency(self):
"""
Tests that a migration with a FK between two models of the same app
does not have a dependency to itself.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_publisher, self.publisher_with_author])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 2)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 2)
migration2 = changes['testapp'][1]
self.assertEqual(len(migration2.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration1.operations[1]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "publisher")
# Right dependencies?
self.assertEqual(migration1.dependencies, [])
self.assertEqual(migration2.dependencies, [("testapp", "auto_1")])
def test_unique_together(self):
"Tests unique_together detection"
# Make state
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, set([("author", "title")]))
def test_unique_together_ordering(self):
"Tests that unique_together also triggers on ordering changes"
# Make state
before = self.make_project_state([self.author_empty, self.book_unique])
after = self.make_project_state([self.author_empty, self.book_unique_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, set([("title", "author")]))
def test_proxy_ignorance(self):
"Tests that the autodetector correctly ignores proxy models"
# First, we test adding a proxy model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
# Now, we test turning a proxy model into a non-proxy model
before = self.make_project_state([self.author_empty, self.author_proxy])
after = self.make_project_state([self.author_empty, self.author_proxy_notproxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(action.name, "AuthorProxy")
|
|
#! /usr/bin/env phenix.python
# Implementation of the EMRinger method, with plots, for use with the
# EMRinger workflow.
# References:
# Lang PT, Ng HL, Fraser JS, Corn JE, Echols N, Sales M, Holton JM, Alber T.
# Automated electron-density sampling reveals widespread conformational
# polymorphism in proteins. Protein Sci. 2010 Jul;19(7):1420-31. PubMed PMID:
# 20499387
#
# Barad BA, Echols N, Wang RY-R, Cheng YC, DiMaio F, Adams PD, Fraser JS.
# EMRinger: side-chain-directed model and map validation for 3D electron
# cryomicroscopy. Nature Methods published online 17 August 2015;
# doi:10.1038/nmeth.3541.
# Any software that wants to use the pkl output of this tool
# should import ringer_residue and ringer_chi from it.
import libtbx.phil
import numpy
from libtbx import easy_pickle
from libtbx import easy_mp
from libtbx.str_utils import make_header
from libtbx.utils import Sorry, Usage
from libtbx import adopt_init_args, Auto
from cStringIO import StringIO
import time
import os
import sys
import sqlite3
master_phil = libtbx.phil.parse("""
pdb_file = None
.type = path
cif_file = None
.type = path
.multiple = True
map_coeffs = None
.type = path
map_label = None
.type = str
map_file = None
.type = path
sampling_method = linear *spline direct
.type = choice(multi=False)
sampling_angle = 5
.type = int
grid_spacing = 1./5
.type = float
scaling = *sigma volume
.type = choice(multi=False)
skip_alt_confs = True
.type = bool
nproc = 1
.type = int
show_gui = False
.type = bool
output_base = None
.type = str
""")
class ringer_chi (object) :
def __init__ (self, id, angle_current, densities, sampling) :
adopt_init_args(self, locals())
if (angle_current < 0) :
self.angle_current = 360 + angle_current
self.peakchi, self.peakrho = self.find_peaks(densities)
self.deviation = self.deviate(self.peakchi)
self.meanrho = numpy.mean(densities)
# Add a tiny number to avoid dividing by 0 (which shouldn't happen anyway)
self.relrho = self.peakrho/(self.meanrho+.000000000000000001)
def format_csv (self) :
return "chi%d,%.1f,%s" % (self.id, self.angle_current, ",".join(
[ "%.3f" % x for x in self.densities ]))
def find_peaks (self, densities) :
for i, j in enumerate(densities):
if j == max(densities):
i = i * 5
return i, j
# This should never happen, but just in case, dump this in
# place of throwing an error.
return 0,0
def deviate(self, chi):
return min(abs(chi-i) for i in [60, 180, 300])
class ringer_residue (object) :
def __init__ (self, resname, chain_id, resid, altloc, n_chi) :
adopt_init_args(self, locals())
self._angles = {}
def format (self) :
if (self.altloc == "") :
return "%s%2s%s" % (self.resname, self.chain_id, self.resid)
else :
return "%s%2s%s (conformer %s)" % (self.resname, self.chain_id,
self.resid, self.altloc)
def format_csv (self) :
if (self.altloc == "") :
prefix = "%s%2s%s," % (self.resname, self.chain_id, self.resid)
else :
prefix = "%s%2s%s %s," % (self.resname, self.chain_id, self.resid,
self.altloc)
lines = []
for i in range(1, self.n_chi+1) :
chi = self.get_angle(i)
if (chi is not None) :
lines.append(prefix + chi.format_csv())
return "\n".join(lines)
def add_angle (self, **kwds) :
chi = ringer_chi(**kwds)
self._angles[chi.id] = chi
def get_angle (self, id) :
return self._angles.get(id, None)
def sample_angle (
i_seqs,
sites_cart,
map_coeffs,
real_map,
sigma,
angle_start,
params,
unit_cell=None) :
frac_matrix = None
if (unit_cell is None) :
assert (map_coeffs is not None)
unit_cell = map_coeffs.unit_cell()
frac_matrix = unit_cell.fractionalization_matrix()
assert (params.sampling_method != "direct") or (map_coeffs is not None)
from cctbx import maptbx
from scitbx.matrix import rotate_point_around_axis
point = rotate_point_around_axis(
axis_point_1=sites_cart[1],
axis_point_2=sites_cart[2],
point=sites_cart[3],
angle=-angle_start,
deg=True)
# TODO: present option to have point (sites_cart[3]) be generated based on idealized geometry.
n_degrees = 0
densities = []
while (n_degrees < 360) :
point = rotate_point_around_axis(
axis_point_1=sites_cart[1],
axis_point_2=sites_cart[2],
point=point,
angle=params.sampling_angle,
deg=True)
point_frac = unit_cell.fractionalize(site_cart=point)
if (params.sampling_method == "spline") and (map_coeffs is not None) :
rho = real_map.tricubic_interpolation(point_frac)
elif (params.sampling_method == "linear") or (map_coeffs is None) :
if (map_coeffs is None) :
rho = maptbx.non_crystallographic_eight_point_interpolation(
map=real_map,
gridding_matrix=frac_matrix,
site_cart=point)
#allow_out_of_bounds=True)
else :
rho = real_map.eight_point_interpolation(point_frac)
else :
rho = map_coeffs.direct_summation_at_point(
site_frac=point_frac,
sigma=sigma).real
densities.append(rho)
n_degrees += params.sampling_angle
#print densities
return densities
class iterate_over_residues (object) :
def __init__ (self,
pdb_hierarchy,
params,
map_coeffs=None,
ccp4_map=None,
unit_cell=None,
log=None) :
if (log is None) : log = sys.stdout
adopt_init_args(self, locals())
models = pdb_hierarchy.models()
if (len(models) > 1) :
raise Sorry("Multi-model PDB files not supported.")
self.sigma = self.real_map = None
if (map_coeffs is not None) :
self.unit_cell = map_coeffs.unit_cell()
if (params.sampling_method == "direct") :
self.map_coeffs = self.map_coeffs.expand_to_p1()
if (not map_coeffs.anomalous_flag()) :
self.map_coeffs = self.map_coeffs.generate_bijvoet_mates()
if (params.sampling_method != "direct") or (params.scaling == "sigma") :
fft_map = self.map_coeffs.fft_map(resolution_factor=params.grid_spacing)
if (params.scaling == "sigma") :
self.sigma = fft_map.statistics().sigma()
fft_map.apply_sigma_scaling()
else :
fft_map.apply_volume_scaling()
self.real_map = fft_map.real_map_unpadded()
else :
assert (ccp4_map is not None)
print >> self.log, "CCP4 map statistics:"
ccp4_map.show_summary(out=self.log, prefix=" ")
self.real_map = ccp4_map.data.as_double()
# XXX assume that the map is already scaled properly (in the original
# unit cell)
self.sigma = 1 #ccp4_map.statistics().sigma()
# XXX the unit cell that we need for the non-crystallographic
# interpolation is not what comes out of the map - it's the
from cctbx import uctbx
unit_cell = ccp4_map.unit_cell()
a = unit_cell.parameters()[0] / ccp4_map.unit_cell_grid[0]
b = unit_cell.parameters()[1] / ccp4_map.unit_cell_grid[1]
c = unit_cell.parameters()[2] / ccp4_map.unit_cell_grid[2]
alpha,beta,gamma = unit_cell.parameters()[3:6]
self.unit_cell = uctbx.unit_cell((a,b,c,alpha,beta,gamma))
# FIXME should use this instead (once it's available)
#self.unit_cell = ccp4_map.grid_unit_cell()
results = []
from mmtbx.rotamer import sidechain_angles
self.angle_lookup = sidechain_angles.SidechainAngles(False)
self.sites_cart = pdb_hierarchy.atoms().extract_xyz()
self.residue_groups = []
for chain in models[0].chains() :
self.residue_groups.extend(chain.residue_groups())
if (params.nproc in [None,Auto]) or (params.nproc > 1) :
# this will be a list of lists
results_ = easy_mp.pool_map(
processes=params.nproc,
fixed_func=self.sample_density,
args=range(len(self.residue_groups)))
# now flatten it out
self.results = []
for result_list in results_ : self.results.extend(result_list)
else :
self.results = []
for i_res in range(len(self.residue_groups)) :
self.results.extend(self.sample_density(i_res, verbose=True))
def sample_density (self, i_res, verbose=False) :
import iotbx.pdb
get_class = iotbx.pdb.common_residue_names_get_class
residue_group = self.residue_groups[i_res]
conformers = residue_group.conformers()
results = []
for i_conf, conformer in enumerate(residue_group.conformers()) :
if (i_conf > 0) and (self.params.skip_alt_confs) :
continue
residue = conformer.only_residue()
if (get_class(residue.resname) == "common_amino_acid") :
n_chi = int(self.angle_lookup.chisPerAA.get(residue.resname.lower(),0))
if (n_chi == 0) : continue
res_out = ringer_residue(
#residue_id_str=residue.id_str(),
resname=residue.resname,
chain_id=residue_group.parent().id,
resid=residue.resid(),
altloc=conformer.altloc,
n_chi=n_chi)
if (verbose) :
print >> self.log, " %s:" % residue.id_str()
for i in range(1, n_chi+1) :
try :
atoms = self.angle_lookup.extract_chi_atoms("chi%d" % i, residue)
except AttributeError :
print >> "AttributeError"
pass
else :
# Skip a chi angle if it doesn't work.
try:
i_seqs = [ atom.i_seq for atom in atoms ]
sites_chi = [ self.sites_cart[i_seq] for i_seq in i_seqs ]
from cctbx.geometry_restraints import dihedral
chi = dihedral(
sites=sites_chi,
angle_ideal=0,
weight=0)
if (verbose) :
print >> self.log, " chi%d = %.1f" % (i, chi.angle_model)
densities = sample_angle(
i_seqs=i_seqs,
sites_cart=sites_chi,
map_coeffs=self.map_coeffs,
real_map=self.real_map,
unit_cell=self.unit_cell,
angle_start=chi.angle_model,
sigma=self.sigma,
params=self.params)
if (verbose) : pass
res_out.add_angle(
id=i,
angle_current=chi.angle_model,
densities=densities,
sampling=self.params.sampling_angle)
# This is a pretty bad way to deal with it but I don't want to stop
# the whole program because of a problem such as a missing atom...
except: print "Problem with ringing"
results.append(res_out)
return results
def run (args, out=None, verbose=True) :
t0 = time.time()
if (out is None) : out = sys.stdout
if (len(args) == 0) :
phil_out = StringIO()
master_phil.show(out=phil_out, prefix=" ")
raise Usage("ringer.py [model.pdb] [map.mtz] [cif_file ...] [options]\n"+
" References:\n"+
" Lang PT, Ng HL, Fraser JS, Corn JE, Echols N, Sales M, Holton\n"+
" JM, Alber T. Automated electron-density sampling reveals\n"+
" widespread conformational polymorphism in proteins. Protein Sci.\n"+
" 2010 Jul;19(7):1420-31. PubMed PMID: 20499387\n"+
" \n"+
" Barad BA, Echols N, Wang RYR, Cheng YC, DiMaio F, Adams PD,\n"+
" Fraser JS. Side-chain-directed model and map validation for 3D \n"+
" Electron Cryomicroscopy. Manuscript in preparation.\n"+
" Full parameters:\n%s" % phil_out.getvalue())
from iotbx import file_reader
import iotbx.phil
cmdline = iotbx.phil.process_command_line_with_files(
args=args,
master_phil=master_phil,
pdb_file_def="pdb_file",
reflection_file_def="map_coeffs",
map_file_def="map_file",
cif_file_def="cif_file")
params = cmdline.work.extract()
validate_params(params)
pdb_in = file_reader.any_file(params.pdb_file, force_type="pdb")
pdb_in.check_file_type("pdb")
hierarchy = pdb_in.file_object.construct_hierarchy()
hierarchy.atoms().reset_i_seq()
map_coeffs = ccp4_map = None
if (params.map_coeffs is not None) :
mtz_in = file_reader.any_file(params.map_coeffs, force_type="hkl")
mtz_in.check_file_type("hkl")
best_guess = None
best_labels = []
all_labels = []
for array in mtz_in.file_server.miller_arrays :
if (array.info().label_string() == params.map_label) :
map_coeffs = array
break
elif (params.map_label is None) :
if (array.is_complex_array()) :
labels = array.info().label_string()
all_labels.append(labels)
if (labels.startswith("2FOFCWT") or labels.startswith("2mFoDFc") or
labels.startswith("FWT")) :
best_guess = array
best_labels.append(labels)
if (map_coeffs is None) :
if (len(all_labels) == 0) :
raise Sorry("No valid (pre-weighted) map coefficients found in file.")
elif (best_guess is None) :
raise Sorry("Couldn't automatically determine appropriate map labels. "+
"Choices:\n %s" % " \n".join(all_labels))
elif (len(best_labels) > 1) :
raise Sorry("Multiple appropriate map coefficients found in file. "+
"Choices:\n %s" % "\n ".join(best_labels))
map_coeffs = best_guess
print >> out, " Guessing %s for input map coefficients" % best_labels[0]
else :
ccp4_map_in = file_reader.any_file(params.map_file, force_type="ccp4_map")
ccp4_map_in.check_file_type("ccp4_map")
ccp4_map = ccp4_map_in.file_object
make_header("Iterating over residues", out=out)
t1 = time.time()
results = iterate_over_residues(
pdb_hierarchy=hierarchy,
map_coeffs=map_coeffs,
ccp4_map=ccp4_map,
params=params,
log=out).results
t2 = time.time()
if (verbose) :
print >> out, "Time excluding I/O: %8.1fs" % (t2 - t1)
print >> out, "Overall runtime: %8.1fs" % (t2 - t0)
if (params.output_base is None) :
pdb_base = os.path.basename(params.pdb_file)
params.output_base = os.path.splitext(pdb_base)[0] + "_emringer"
easy_pickle.dump("%s.pkl" % params.output_base, results)
print >> out, "Wrote %s.pkl" % params.output_base
csv = "\n".join([ r.format_csv() for r in results ])
open("%s.csv" % params.output_base, "w").write(csv)
print >> out, "Wrote %s.csv" % params.output_base
print >> out, "\nReferences:"
print >> out, """\
Lang PT, Ng HL, Fraser JS, Corn JE, Echols N, Sales M, Holton JM, Alber T.
Automated electron-density sampling reveals widespread conformational
polymorphism in proteins. Protein Sci. 2010 Jul;19(7):1420-31. PubMed PMID:
20499387"""
if (params.show_gui) :
run_app(results)
else :
return results
def validate_params (params) :
if (params.pdb_file is None) :
raise Sorry("No PDB file supplied (parameter: pdb_file)")
if (params.map_coeffs is None) and (params.map_file is None) :
raise Sorry("No map coefficients supplied (parameter: map_coeffs)")
return True
########################################################################
# GUI
from wxtbx import plots
import wx
def run_app (results) :
app = wx.App(0)
frame = RingerFrame(None, -1, "Ringer results")
frame.show_results(results)
frame.Show()
app.MainLoop()
class RingerFrame (plots.plot_frame) :
def create_plot_panel (self) :
plot = RingerPlot(self, figure_size=(6,8))
plot.canvas.Bind(wx.EVT_CHAR, self.OnChar)
return plot
def draw_top_panel (self) :
self.top_panel = wx.Panel(self, style=wx.SUNKEN_BORDER)
panel_szr = wx.BoxSizer(wx.VERTICAL)
self.top_panel.SetSizer(panel_szr)
szr2 = wx.BoxSizer(wx.HORIZONTAL)
panel_szr.Add(szr2)
txt1 = wx.StaticText(self.top_panel, -1, "Residue to display:")
szr2.Add(txt1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.chooser = wx.Choice(self.top_panel, -1, size=(200,-1))
szr2.Add(self.chooser, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_CHOICE, self.OnSelect, self.chooser)
self.Bind(wx.EVT_CHAR, self.OnChar)
self.chooser.Bind(wx.EVT_CHAR, self.OnChar)
return self.top_panel
def OnSelect (self, event) :
selection = event.GetEventObject().GetSelection()
self.plot_panel.show_residue(self.results[selection])
def show_results (self, results) :
self.results = results
choices = [ result.format() for result in results ]
self.chooser.SetItems(choices)
self.chooser.SetSelection(0)
self.plot_panel.show_residue(self.results[0])
def OnChar (self, event) :
key = event.GetKeyCode()
if (len(self.results) == 0) : return
selection = self.chooser.GetSelection()
if (key in [wx.WXK_TAB, wx.WXK_RETURN, wx.WXK_SPACE]) :
if (selection < (len(self.results) - 1)) :
selection += 1
elif (len(self.results) > 0) :
selection = 0
elif (key in [wx.WXK_DELETE, wx.WXK_BACK]) :
if (selection > 0) :
selection -= 1
else :
selection = len(results) - 1
self.chooser.SetSelection(selection)
self.plot_panel.show_residue(self.results[selection])
class RingerPlot (plots.plot_container) :
def show_residue (self, residue) :
if (self.disabled) : return
self.figure.clear()
subplots = []
for i in range(1, residue.n_chi + 1) :
chi = residue.get_angle(i)
if (chi is None) : continue
if (len(subplots) > 0) :
p = self.figure.add_subplot(4, 1, i, sharex=subplots[0])
else :
p = self.figure.add_subplot(4, 1, i)
p.set_title(residue.format())
p.set_position([0.15, 0.725 - 0.225*(i-1), 0.8, 0.225])
x = [ k*chi.sampling for k in range(len(chi.densities)) ]
p.plot(x, chi.densities, 'r-', linewidth=1)
p.axvline(chi.angle_current, color='b', linewidth=2, linestyle='--')
p.axvline(chi.peakchi, color='g', linewidth=2, linestyle = '--')
p.axhline(0, color=(0.4,0.4,0.4), linestyle='--', linewidth=1)
p.axhspan(0.3,1,facecolor="green",alpha=0.5)
p.axhspan(-1,0.3,facecolor="grey",alpha=0.5)
p.set_xlim(0,360)
ax = p.get_axes()
ax.set_ylabel("Rho")
ax.set_xlabel("Chi%d" % i)
subplots.append(p)
for p in subplots[:-1] :
for label in p.get_axes().get_xticklabels() :
label.set_visible(False)
p.text(0,-0.5,'Green = Peak, Blue = Modelled',
transform=ax.transAxes)
self.canvas.draw()
self.canvas.Fit()
self.Layout()
self.parent.Refresh()
if (__name__ == "__main__") :
run(sys.argv[1:])
|
|
import os
import struct
from io import BytesIO
from PIL import Image
def resize(image, size, format=None):
output = BytesIO()
back = Image.new('RGBA', size, (0,0,0,0))
if image.size[0] < size[0] or image.size[1] < size[1]:
if image.height > image.width:
factor = size[0] / image.height
else:
factor = size[1] / image.width
image = image.resize((int(image.width * factor), int(image.height * factor)), Image.ANTIALIAS)
else:
image.thumbnail(size, Image.ANTIALIAS)
offset = [0, 0]
if image.size[0] > image.size[1]:
offset[1] = int(back.size[1]/2-image.size[1]/2)
elif image.size[0] < image.size[1]:
offset[0] = int(back.size[0]/2-image.size[0]/2)
else:
offset[0] = int(back.size[0]/2-image.size[0]/2)
offset[1] = int(back.size[1]/2-image.size[1]/2)
back.paste(image, tuple(offset))
format = format or image.format
back.save(output, format, sizes=[size])
contents = output.getvalue()
output.close()
return contents
struct_symbols = {1:'B',#byte
2:'H',#word
4:'L',#long word
8:'Q' #double long word
}
endian_symbols = {'little':'<',
'big':'>'}
name_dictionary = {'PEHeader_Machine': {
0:'IMAGE_FILE_MACHINE_UNKNOWN',
0x014c:'IMAGE_FILE_MACHINE_I386',
0x0162:'IMAGE_FILE_MACHINE_R3000',
0x0166:'IMAGE_FILE_MACHINE_R4000',
0x0168:'IMAGE_FILE_MACHINE_R10000',
0x0169:'IMAGE_FILE_MACHINE_WCEMIPSV2',
0x0184:'IMAGE_FILE_MACHINE_ALPHA',
0x01a2:'IMAGE_FILE_MACHINE_SH3',
0x01a3:'IMAGE_FILE_MACHINE_SH3DSP',
0x01a4:'IMAGE_FILE_MACHINE_SH3E',
0x01a6:'IMAGE_FILE_MACHINE_SH4',
0x01a8:'IMAGE_FILE_MACHINE_SH5',
0x01c0:'IMAGE_FILE_MACHINE_ARM',
0x01c2:'IMAGE_FILE_MACHINE_THUMB',
0x01c4:'IMAGE_FILE_MACHINE_ARMNT',
0x01d3:'IMAGE_FILE_MACHINE_AM33',
0x01f0:'IMAGE_FILE_MACHINE_POWERPC',
0x01f1:'IMAGE_FILE_MACHINE_POWERPCFP',
0x0200:'IMAGE_FILE_MACHINE_IA64',
0x0266:'IMAGE_FILE_MACHINE_MIPS16',
0x0284:'IMAGE_FILE_MACHINE_ALPHA64',
0x0284:'IMAGE_FILE_MACHINE_AXP64', # same
0x0366:'IMAGE_FILE_MACHINE_MIPSFPU',
0x0466:'IMAGE_FILE_MACHINE_MIPSFPU16',
0x0520:'IMAGE_FILE_MACHINE_TRICORE',
0x0cef:'IMAGE_FILE_MACHINE_CEF',
0x0ebc:'IMAGE_FILE_MACHINE_EBC',
0x8664:'IMAGE_FILE_MACHINE_AMD64',
0x9041:'IMAGE_FILE_MACHINE_M32R',
0xc0ee:'IMAGE_FILE_MACHINE_CEE'
},
'PEHeader_Characteristics':{
0x0001:'IMAGE_FILE_RELOCS_STRIPPED',
0x0002:'IMAGE_FILE_EXECUTABLE_IMAGE',
0x0004:'IMAGE_FILE_LINE_NUMS_STRIPPED',
0x0008:'IMAGE_FILE_LOCAL_SYMS_STRIPPED',
0x0010:'IMAGE_FILE_AGGRESIVE_WS_TRIM',
0x0020:'IMAGE_FILE_LARGE_ADDRESS_AWARE',
0x0040:'IMAGE_FILE_16BIT_MACHINE',
0x0080:'IMAGE_FILE_BYTES_REVERSED_LO',
0x0100:'IMAGE_FILE_32BIT_MACHINE',
0x0200:'IMAGE_FILE_DEBUG_STRIPPED',
0x0400:'IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP',
0x0800:'IMAGE_FILE_NET_RUN_FROM_SWAP',
0x1000:'IMAGE_FILE_SYSTEM',
0x2000:'IMAGE_FILE_DLL',
0x4000:'IMAGE_FILE_UP_SYSTEM_ONLY',
0x8000:'IMAGE_FILE_BYTES_REVERSED_HI'
},
'OptionalHeader_Subsystem':{
0:'IMAGE_SUBSYSTEM_UNKNOWN',
1:'IMAGE_SUBSYSTEM_NATIVE',
2:'IMAGE_SUBSYSTEM_WINDOWS_GUI',
3:'IMAGE_SUBSYSTEM_WINDOWS_CUI',
5:'IMAGE_SUBSYSTEM_OS2_CUI',
7:'IMAGE_SUBSYSTEM_POSIX_CUI',
8:'IMAGE_SUBSYSTEM_NATIVE_WINDOWS',
9:'IMAGE_SUBSYSTEM_WINDOWS_CE_GUI',
10:'IMAGE_SUBSYSTEM_EFI_APPLICATION',
11:'IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER',
12:'IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER',
13:'IMAGE_SUBSYSTEM_EFI_ROM',
14:'IMAGE_SUBSYSTEM_XBOX',
16:'IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION'
},
'OptionalHeader_DLL_Characteristics':{
0x0001:'IMAGE_LIBRARY_PROCESS_INIT', # reserved
0x0002:'IMAGE_LIBRARY_PROCESS_TERM', # reserved
0x0004:'IMAGE_LIBRARY_THREAD_INIT', # reserved
0x0008:'IMAGE_LIBRARY_THREAD_TERM', # reserved
0x0020:'IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA',
0x0040:'IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE',
0x0080:'IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY',
0x0100:'IMAGE_DLLCHARACTERISTICS_NX_COMPAT',
0x0200:'IMAGE_DLLCHARACTERISTICS_NO_ISOLATION',
0x0400:'IMAGE_DLLCHARACTERISTICS_NO_SEH',
0x0800:'IMAGE_DLLCHARACTERISTICS_NO_BIND',
0x1000:'IMAGE_DLLCHARACTERISTICS_APPCONTAINER',
0x2000:'IMAGE_DLLCHARACTERISTICS_WDM_DRIVER',
0x4000:'IMAGE_DLLCHARACTERISTICS_GUARD_CF',
0x8000:'IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE'
},
'SectionHeader_Characteristics':{
0x00000000:'IMAGE_SCN_TYPE_REG', # reserved
0x00000001:'IMAGE_SCN_TYPE_DSECT', # reserved
0x00000002:'IMAGE_SCN_TYPE_NOLOAD', # reserved
0x00000004:'IMAGE_SCN_TYPE_GROUP', # reserved
0x00000008:'IMAGE_SCN_TYPE_NO_PAD', # reserved
0x00000010:'IMAGE_SCN_TYPE_COPY', # reserved
0x00000020:'IMAGE_SCN_CNT_CODE',
0x00000040:'IMAGE_SCN_CNT_INITIALIZED_DATA',
0x00000080:'IMAGE_SCN_CNT_UNINITIALIZED_DATA',
0x00000100:'IMAGE_SCN_LNK_OTHER',
0x00000200:'IMAGE_SCN_LNK_INFO',
0x00000400:'IMAGE_SCN_LNK_OVER', # reserved
0x00000800:'IMAGE_SCN_LNK_REMOVE',
0x00001000:'IMAGE_SCN_LNK_COMDAT',
0x00004000:'IMAGE_SCN_MEM_PROTECTED', # obsolete
0x00004000:'IMAGE_SCN_NO_DEFER_SPEC_EXC',
0x00008000:'IMAGE_SCN_GPREL',
0x00008000:'IMAGE_SCN_MEM_FARDATA',
0x00010000:'IMAGE_SCN_MEM_SYSHEAP', # obsolete
0x00020000:'IMAGE_SCN_MEM_PURGEABLE',
0x00020000:'IMAGE_SCN_MEM_16BIT',
0x00040000:'IMAGE_SCN_MEM_LOCKED',
0x00080000:'IMAGE_SCN_MEM_PRELOAD',
0x00100000:'IMAGE_SCN_ALIGN_1BYTES',
0x00200000:'IMAGE_SCN_ALIGN_2BYTES',
0x00300000:'IMAGE_SCN_ALIGN_4BYTES',
0x00400000:'IMAGE_SCN_ALIGN_8BYTES',
0x00500000:'IMAGE_SCN_ALIGN_16BYTES', # default alignment
0x00600000:'IMAGE_SCN_ALIGN_32BYTES',
0x00700000:'IMAGE_SCN_ALIGN_64BYTES',
0x00800000:'IMAGE_SCN_ALIGN_128BYTES',
0x00900000:'IMAGE_SCN_ALIGN_256BYTES',
0x00A00000:'IMAGE_SCN_ALIGN_512BYTES',
0x00B00000:'IMAGE_SCN_ALIGN_1024BYTES',
0x00C00000:'IMAGE_SCN_ALIGN_2048BYTES',
0x00D00000:'IMAGE_SCN_ALIGN_4096BYTES',
0x00E00000:'IMAGE_SCN_ALIGN_8192BYTES',
0x00F00000:'IMAGE_SCN_ALIGN_MASK',
0x01000000:'IMAGE_SCN_LNK_NRELOC_OVFL',
0x02000000:'IMAGE_SCN_MEM_DISCARDABLE',
0x04000000:'IMAGE_SCN_MEM_NOT_CACHED',
0x08000000:'IMAGE_SCN_MEM_NOT_PAGED',
0x10000000:'IMAGE_SCN_MEM_SHARED',
0x20000000:'IMAGE_SCN_MEM_EXECUTE',
0x40000000:'IMAGE_SCN_MEM_READ',
0x80000000:'IMAGE_SCN_MEM_WRITE',
},
}
DEFAULT_ENDIAN = 'little'
def read_data(file_data, offset, number_of_bytes, string_data=None):
"""Just reads the straight data with no endianness."""
if number_of_bytes > 0:
data = file_data[offset:offset+number_of_bytes]
#if len(data) != number_of_bytes:
#print 'data out of bounds:', 'offset', hex(offset), 'data', data, 'data_len', len(data), 'num_bytes', number_of_bytes, 'total', hex(len(file_data))
return data
else:
return bytearray('')
def read_bytes(file_data, offset, number_of_bytes, endian=None, string_data=None):
"""Returns a tuple of the data value and string representation.
Will read 1,2,4,8 bytes with little endian as the default
(value, string)
"""
if number_of_bytes > 0:
endian = endian or DEFAULT_ENDIAN
endian = endian_symbols[endian]
data = bytes(file_data[offset:offset+number_of_bytes])
if len(data) != number_of_bytes:
return 0, u''
return struct.unpack(endian+struct_symbols[number_of_bytes], data)[0], data
else:
return 0, u''
def value_to_byte_string(value, number_of_bytes, endian=None):
endian = endian or DEFAULT_ENDIAN
endian = endian_symbols[endian]
return struct.pack(endian+struct_symbols[number_of_bytes], value)
class ResourceTypes(object):
Cursor = 1
Bitmap = 2
Icon = 3
Menu = 4
Dialog = 5
String = 6
Font_Directory = 7
Font = 8
Accelerator = 9
RC_Data = 10
Message_Table = 11
Group_Cursor = 12
Group_Icon = 14
Version_Info = 16
DLG_Include = 17
Plug_Play = 19
VXD = 20
Animated_Cursor = 21
Animated_Icon = 22
HTML = 23
Manifest = 24
resource_types = {1: 'Cursor',
2: 'Bitmap',
3: 'Icon',
4: 'Menu',
5: 'Dialog',
6: 'String',
7: 'Font Directory',
8: 'Font',
9: 'Accelerator',
10: 'RC Data',
11: 'Message Table',
12: 'Group Cursor',
14: 'Group Icon',
16: 'Version Info',
17: 'DLG Include',
19: 'Plug and Play',
20: 'VXD',
21: 'Animated Cursor',
22: 'Animated Icon',
23: 'HTML',
24: 'Manifest'}
_32BIT_PLUS_MAGIC = 0x20b
_32BIT_MAGIC = 0x10b
_ROM_MAGIC = 0x107
def read_from_name_dict(obj, field_name):
dict_field = '{}_{}'.format(obj.__class__.__name__, field_name)
return name_dictionary.get(dict_field, {})
def test_bit(value, index):
mask = 1 << index
return (value & mask)
def set_bit(value, index):
mask = 1 << index
return (value | mask)
def clear_bit(value, index):
mask = ~(1 << index)
return (value & mask)
def toggle_bit(value, index):
mask = 1 << index
return (value ^ mask)
class PEFormatError(Exception):
pass
class Printable(object):
def _attrs(self):
a = []
for attr in dir(self):
if not attr.startswith('_') and not callable(getattr(self, attr)):
a.append(attr)
return a
def _dict_items(self):
for a in reversed(self._attrs()):
yield a, getattr(self,a)
def _dict_string(self):
vals = []
for key, val in self._dict_items():
try:
vals.append(u'{}={}'.format(key, val))
except UnicodeDecodeError:
vals.append(u'{}=<not printable>'.format(key))
return u', '.join(vals)
def __repr__(self):
return str(self)
def __str__(self):
return u'{} [{}]'.format(self.__class__.__name__, self._dict_string())
class Structure(Printable):
_fields = {}
def __init__(self, size=0, value=None, data=None, absolute_offset=0,
name='', friendly_name='', *args, **kwargs):
self._value = value
self.size = size
self.data = data
self.name = name
self.friendly_name = friendly_name
self._absolute_offset = absolute_offset
self._file_data = None
for k, v in kwargs.items():
setattr(self, k, v)
@property
def absolute_offset(self):
return self._absolute_offset
@absolute_offset.setter
def absolute_offset(self, abs_offset):
self._absolute_offset = abs_offset
for k, v in self._fields.items():
field = getattr(self, k)
field.absolute_offset = self.absolute_offset + field.offset
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._file_data is not None:
self.data = value_to_byte_string(value, self.size)
self._file_data[self.absolute_offset:self.absolute_offset+self.size] = bytearray(self.data)
self._value = value
def process_field(self, file_data, field_name, field_info):
if hasattr(self, 'process_'+field_name) and callable(getattr(self, 'process_'+field_name)):
getattr(self, 'process_'+field_name)(file_data, field_name, field_info)
else:
absolute_offset = field_info['offset'] + self.absolute_offset
size = field_info['size']
self.size += size
int_value, data = read_bytes(file_data, absolute_offset, size)
field_name_dict = read_from_name_dict(self, field_name)
name = field_name_dict.get(int_value, '')
friendly_name = name.replace('_', ' ').capitalize()
setattr(self, field_name, Structure(offset=field_info['offset'],
size=size,
value=int_value, data=data,
absolute_offset=absolute_offset,
name=name, friendly_name=friendly_name))
getattr(self, field_name)._file_data = file_data
def process_Characteristics(self, file_data, field_name, field_info):
absolute_offset = field_info['offset'] + self.absolute_offset
size = field_info['size']
self.size += size
int_value, data = read_bytes(file_data, absolute_offset, size)
field_name_dict = read_from_name_dict(self, field_name)
bit_length = len(bin(int_value))-2
characteristics = {}
for i in range(bit_length):
set_bit = test_bit(int_value, i)
char_name = field_name_dict.get(set_bit, '')
if set_bit != 0 and char_name:
characteristics[char_name] = set_bit
setattr(self, field_name, Structure(offset=field_info['offset'],
size=size,
value=int_value, data=data,
absolute_offset=absolute_offset,
values=characteristics,
))
getattr(self, field_name)._file_data = file_data
@classmethod
def parse_from_data(cls, file_data, **cls_args):
"""Parses the Structure from the file data."""
self = cls(**cls_args)
self._file_data = file_data
for field_name, field_info in self._fields.items():
self.process_field(file_data, field_name, field_info)
return self
class DOSHeader(Structure):
"""The dos header of the PE file"""
_fields = {'Signature':{'offset':0,
'size':2},
'PEHeaderOffset':{'offset':0x3c,
'size':4}
}
class PEHeader(Structure):
"""PE signature plus the COFF header"""
_fields = {'Signature':{'offset':0,
'size':4},
'Machine':{'offset':4,
'size':2},
'NumberOfSections':{'offset':6,
'size':2},
'TimeDateStamp':{'offset':8,
'size':4},
'PointerToSymbolTable':{'offset':12,
'size':4},
'NumberOfSymbols':{'offset':16,
'size':4},
'SizeOfOptionalHeader':{'offset':20,
'size':2},
'Characteristics':{'offset':22,
'size':2}
}
class OptionalHeader(Structure):
_fields_32_plus = {'Magic':{'offset':0,
'size':2},
'MajorLinkerVersion':{'offset':2,
'size':1},
'MinorLinkerVersion':{'offset':3,
'size':1},
'SizeOfCode':{'offset':4,
'size':4},
'SizeOfInitializedData':{'offset':8,
'size':4},
'SizeOfUninitializedData':{'offset':12,
'size':4},
'AddressOfEntryPoint':{'offset':16,
'size':4},
'BaseOfCode':{'offset':20,
'size':4},
'ImageBase':{'offset':24,
'size':8},
'SectionAlignment':{'offset':32,
'size':4},
'FileAlignment':{'offset':36,
'size':4},
'MajorOperatingSystemVersion':{'offset':40,
'size':2},
'MinorOperatingSystemVersion':{'offset':42,
'size':2},
'MajorImageVersion':{'offset':44,
'size':2},
'MinorImageVersion':{'offset':46,
'size':2},
'MajorSubsystemVersion':{'offset':48,
'size':2},
'MinorSubsystemVersion':{'offset':50,
'size':2},
'Reserved':{'offset':52,
'size':4},
'SizeOfImage':{'offset':56,
'size':4},
'SizeOfHeaders':{'offset':60,
'size':4},
'SizeOfHeaders':{'offset':60,
'size':4},
'CheckSum': {'offset': 64,
'size':4},
'Subsystem':{'offset':68,
'size':2},
'DLL_Characteristics':{'offset':70,
'size':2},
'SizeOfStackReserve':{'offset':72,
'size':8},
'SizeOfStackCommit':{'offset':80,
'size':8},
'SizeOfHeapReserve':{'offset':88,
'size':8},
'SizeOfHeapCommit':{'offset':96,
'size':8},
'LoaderFlags':{'offset':104,
'size':4},
'NumberOfRvaAndSizes':{'offset':108,
'size':4},
'ExportTableAddress':{'offset':112,
'size':4},
'ExportTableSize':{'offset':116,
'size':4},
'ImportTableAddress':{'offset':120,
'size':4},
'ImportTableSize':{'offset':124,
'size':4},
'ResourceTableAddress':{'offset':128,
'size':4},
'ResourceTableSize':{'offset':132,
'size':4},
'ExceptionTableAddress':{'offset':136,
'size':4},
'ExceptionTableSize':{'offset':140,
'size':4},
'CertificateTableAddress':{'offset':144,
'size':4},
'CertificateTableSize':{'offset':148,
'size':4},
'BaseRelocationTableAddress':{'offset':152,
'size':4},
'BaseRelocationTableSize':{'offset':156,
'size':4},
'DebugAddress':{'offset':160,
'size':4},
'DebugSize':{'offset':164,
'size':4},
'ArchitectureAddress':{'offset':168,
'size':4},
'ArchitectureSize':{'offset':172,
'size':4},
'GlobalPtrAddress':{'offset':176,
'size':8},
'GlobalPtrSize':{'offset':184,
'size':0},
'ThreadLocalStorageTableAddress':{'offset':184,
'size':4},
'ThreadLocalStorageTableSize':{'offset':188,
'size':4},
'LoadConfigTableAddress':{'offset':192,
'size':4},
'LoadConfigTableSize':{'offset':196,
'size':4},
'BoundImportAddress':{'offset':200,
'size':4},
'BoundImportSize':{'offset':204,
'size':4},
'ImportAddressTableAddress':{'offset':208,
'size':4},
'ImportAddressTableSize':{'offset':212,
'size':4},
'DelayImportDescriptorAddress':{'offset':216,
'size':4},
'DelayImportDescriptorSize':{'offset':220,
'size':4},
'COMRuntimeHeaderAddress':{'offset':224,
'size':4},
'COMRuntimeHeaderSize':{'offset':228,
'size':4},
'Reserved2':{'offset':232,
'size':8}
}
_fields_32 = {'Magic':{'offset':0,
'size':2},
'MajorLinkerVersion':{'offset':2,
'size':1},
'MinorLinkerVersion':{'offset':3,
'size':1},
'SizeOfCode':{'offset':4,
'size':4},
'SizeOfInitializedData':{'offset':8,#
'size':4},
'SizeOfUninitializedData':{'offset':12,
'size':4},
'AddressOfEntryPoint':{'offset':16,
'size':4},
'BaseOfCode':{'offset':20,
'size':4},
'BaseOfData':{'offset':24,
'size':4},
'ImageBase':{'offset':28,
'size':4},
'SectionAlignment':{'offset':32,
'size':4},
'FileAlignment':{'offset':36,
'size':4},
'MajorOperatingSystemVersion':{'offset':40,
'size':2},
'MinorOperatingSystemVersion':{'offset':42,
'size':2},
'MajorImageVersion':{'offset':44,
'size':2},
'MinorImageVersion':{'offset':46,
'size':2},
'MajorSubsystemVersion':{'offset':48,
'size':2},
'MinorSubsystemVersion':{'offset':50,
'size':2},
'Reserved':{'offset':52,
'size':4},
'SizeOfImage':{'offset':56,#
'size':4},
'SizeOfHeaders':{'offset':60,
'size':4},
'CheckSum': {'offset': 64,
'size':4},
'Subsystem':{'offset':68,
'size':2},
'DLL_Characteristics':{'offset':70,
'size':2},
'SizeOfStackReserve':{'offset':72,
'size':4},
'SizeOfStackCommit':{'offset':76,
'size':4},
'SizeOfHeapReserve':{'offset':80,
'size':4},
'SizeOfHeapCommit':{'offset':84,
'size':4},
'LoaderFlags':{'offset':88,
'size':4},
'NumberOfRvaAndSizes':{'offset':92,
'size':4},
'ExportTableAddress':{'offset':96,
'size':4},
'ExportTableSize':{'offset':100,
'size':4},
'ImportTableAddress':{'offset':104,
'size':4},
'ImportTableSize':{'offset':108,
'size':4},
'ResourceTableAddress':{'offset':112,
'size':4},
'ResourceTableSize':{'offset':116,#
'size':4},
'ExceptionTableAddress':{'offset':120,
'size':4},
'ExceptionTableSize':{'offset':124,
'size':4},
'CertificateTableAddress':{'offset':128,
'size':4},
'CertificateTableSize':{'offset':132,
'size':4},
'BaseRelocationTableAddress':{'offset':136,#
'size':4},
'BaseRelocationTableSize':{'offset':140,
'size':4},
'DebugAddress':{'offset':144,
'size':4},
'DebugSize':{'offset':148,
'size':4},
'ArchitectureAddress':{'offset':152,
'size':4},
'ArchitectureSize':{'offset':156,
'size':4},
'GlobalPtrAddress':{'offset':160,
'size':8},
'GlobalPtrSize':{'offset':168,
'size':0},
'ThreadLocalStorageTableAddress':{'offset':168,
'size':4},
'ThreadLocalStorageTableSize':{'offset':172,
'size':4},
'LoadConfigTableAddress':{'offset':176,
'size':4},
'LoadConfigTableSize':{'offset':180,
'size':4},
'BoundImportAddress':{'offset':184,
'size':4},
'BoundImportSize':{'offset':188,
'size':4},
'ImportAddressTableAddress':{'offset':192,
'size':4},
'ImportAddressTableSize':{'offset':196,
'size':4},
'DelayImportDescriptorAddress':{'offset':200,
'size':4},
'DelayImportDescriptorSize':{'offset':204,
'size':4},
'COMRuntimeHeaderAddress':{'offset':208,
'size':4},
'COMRuntimeHeaderSize':{'offset':212,
'size':4},
'Reserved2':{'offset':216,
'size':8},
}
def process_DLL_Characteristics(self, file_data, field_name, field_info):
self.process_Characteristics(file_data, field_name, field_info)
def process_field(self, file_data, field_name, field_info):
if hasattr(self, 'process_'+field_name) and callable(getattr(self, 'process_'+field_name)):
getattr(self, 'process_'+field_name)(file_data, field_name, field_info)
else:
absolute_offset = field_info['offset'] + self.absolute_offset
size = field_info['size']
self.size += size
int_value, data = read_bytes(file_data, absolute_offset, size)
field_name_dict = read_from_name_dict(self, field_name)
name = field_name_dict.get(int_value, '')
friendly_name = name.replace('_', ' ').capitalize()
setattr(self, field_name, Structure(offset=field_info['offset'],
size=size,
value=int_value, data=data,
absolute_offset=absolute_offset,
name=name, friendly_name=friendly_name))
getattr(self, field_name)._file_data = file_data
@classmethod
def parse_from_data(cls, file_data, **cls_args):
"""Parses the Structure from the file data."""
self = cls(**cls_args)
self._file_data = file_data
magic, x = read_bytes(file_data, self.absolute_offset, 2)
if magic == _32BIT_MAGIC:
self._fields = self._fields_32
elif magic == _32BIT_PLUS_MAGIC:
self._fields = self._fields_32_plus
else:
print(magic, _32BIT_MAGIC, _32BIT_PLUS_MAGIC)
raise PEFormatError('Magic for Optional Header is invalid.')
for field_name, field_info in self._fields.items():
self.process_field(file_data, field_name, field_info)
return self
class SectionHeader(Structure):
"""Section Header. Each section header is a row in the section table"""
_fields = {'Name':{'offset':0,
'size':8},
'VirtualSize':{'offset':8, #.rsrc
'size':4},
'VirtualAddress':{'offset':12,#.reloc
'size':4},
'SizeOfRawData':{'offset':16,#.rsrc
'size':4},
'PointerToRawData':{'offset':20,#.reloc
'size':4},
'PointerToRelocations':{'offset':24,
'size':4},
'PointerToLineNumbers':{'offset':28,
'size':4},
'NumberOfRelocations':{'offset':32,
'size':2},
'NumberOfLineNumbers':{'offset':34,
'size':2},
'Characteristics':{'offset':36,
'size':4}
}
class ResourceDirectoryTable(Structure):
_fields = {'Characteristics':{'offset':0,
'size':4},
'TimeDateStamp':{'offset':4,
'size':4},
'MajorVersion':{'offset':8,
'size':2},
'MinorVersion':{'offset':10,
'size':2},
'NumberOfNameEntries':{'offset':12,
'size':2},
'NumberOfIDEntries':{'offset':14,
'size':2}
}
def __init__(self, *args, **kwargs):
self.name_entries = []
self.id_entries = []
self.subdirectory_tables = []
self.data_entries = []
super(ResourceDirectoryTable, self).__init__(*args, **kwargs)
class ResourceDirectoryEntryName(Structure):
_fields = {'NameRVA':{'offset':0,
'size':4},
'DataOrSubdirectoryEntryRVA':{'offset':4, #high bit 1 for subdir RVA
'size':4}
}
directory_string = None
def is_data_entry(self):
return (test_bit(self.DataOrSubdirectoryEntryRVA.value, 31) == 0)
def data_rva_empty(self):
return self.get_data_or_subdirectory_rva() == 0
def get_data_or_subdirectory_rva(self, virtual_to_physical=0):
return clear_bit(self.DataOrSubdirectoryEntryRVA.value-virtual_to_physical, 31)
def get_data_or_subdirectory_absolute_offset(self):
return self.get_data_or_subdirectory_rva() + self._section_header.PointerToRawData.value
def get_name_absolute_offset(self):
return clear_bit(self.NameRVA.value, 31) + self._section_header.PointerToRawData.value
class ResourceDirectoryEntryID(Structure):
_fields = {'IntegerID':{'offset':0,
'size':4},
'DataOrSubdirectoryEntryRVA':{'offset':4,#high bit 1 for Subdir RVA
'size':4}
}
def is_data_entry(self):
return (test_bit(self.DataOrSubdirectoryEntryRVA.value, 31) == 0)
def data_rva_empty(self):
return self.get_data_or_subdirectory_rva() == 0
def get_data_or_subdirectory_rva(self, virtual_to_physical=0):
return clear_bit(self.DataOrSubdirectoryEntryRVA.value-virtual_to_physical, 31)
def get_data_or_subdirectory_absolute_offset(self, vtp=0):
return self.get_data_or_subdirectory_rva(vtp) + self._section_header.PointerToRawData.value
class ResourceDirectoryString(Structure):
_fields = {'Length':{'offset':0,
'size':2},
#String : offset=2, len=Length
}
@classmethod
def parse_from_data(cls, file_data, **cls_args):
"""Parses the Structure from the file data."""
self = cls(**cls_args)
self._file_data = file_data
str_len, _ = read_bytes(file_data, self.absolute_offset, 2)
self._fields['String'] = {'offset':2, 'size':str_len}
for field_name, field_info in self._fields.items():
self.process_field(file_data, field_name, field_info)
return self
def process_String(self, file_data, field_name, field_info):
absolute_offset = field_info['offset'] + self.absolute_offset
size = field_info['size']
self.size += size
data = u''
for i in range(size):
val, dat = read_bytes(file_data, absolute_offset+i*2,2)
data += str(dat, 'utf-8')
setattr(self, field_name, Structure(offset=field_info['offset'],
size=size,
data=data,
absolute_offset=absolute_offset,
))
class ResourceDataEntry(Structure):
_fields = {'DataRVA':{'offset':0,
'size':4},
'Size':{'offset':4,
'size':4},
'Codepage':{'offset':8,
'size':4},
'Reserved':{'offset':12,
'size':4},
}
def get_data_absolute_offset(self):
return self._section_header.PointerToRawData.value - self._section_header.VirtualAddress.value + self.DataRVA.value
def process_field(self, file_data, field_name, field_info):
if hasattr(self, 'process_'+field_name) and callable(getattr(self, 'process_'+field_name)):
getattr(self, 'process_'+field_name)(file_data, field_name, field_info)
else:
absolute_offset = field_info['offset'] + self.absolute_offset
size = field_info['size']
self.size += size
int_value, data = read_bytes(file_data, absolute_offset, size)
field_name_dict = read_from_name_dict(self, field_name)
name = field_name_dict.get(int_value, '')
friendly_name = name.replace('_', ' ').capitalize()
setattr(self, field_name, Structure(offset=field_info['offset'],
size=size,
value=int_value, data=data,
absolute_offset=absolute_offset,
name=name, friendly_name=friendly_name))
getattr(self, field_name)._file_data = file_data
@classmethod
def parse_from_data(cls, file_data, **cls_args):
"""Parses the Structure from the file data."""
self = cls(**cls_args)
self._file_data = file_data
for field_name, field_info in self._fields.items():
self.process_field(file_data, field_name, field_info)
self.data = read_data(file_data, self.get_data_absolute_offset(), self.Size.value)
return self
class ResourceHeader(Structure):
_fields = {'DataSize':{'offset':0,
'size':4},
'HeaderSize':{'offset':4,
'size':4},
'Type':{'offset':8,
'size':4},
'Name':{'offset':12,
'size':4},
'DataVersion':{'offset':16,
'size':4},
'MemoryFlags':{'offset':20,
'size':2},
'LanguageID':{'offset':22,
'size':2},
'Version':{'offset':24,
'size':4},
'Characteristics':{'offset':28,
'size':4},
}
def get_name(self):
return resource_types[self.Type.value]
def set_name(self, value):
for k,v in resource_types.items():
if v == value:
self.Type.value = k
return
class IconHeader(Structure):
_fields = {'Reserved':{'offset':0,
'size':2},
'ImageType':{'offset':2,#1 for ICO, 2 for CUR, others invalid
'size':2},
'ImageCount':{'offset':4,
'size':2},
}
def copy_from(self, group_header):
self.Reserved.value = group_header.Reserved.value
self.ImageType.value = group_header.ResourceType.value
self.ImageCount.value = group_header.ResourceCount.value
self.entries = []
entry_offset = 0
self.total_size = self.size
for group_entry in group_header.entries:
icon_entry = IconEntry.parse_from_data(bytearray(''), absolute_offset=self.absolute_offset+self.size+entry_offset, offset=entry_offset)
icon_entry._file_data = self._file_data
icon_entry.copy_from(group_entry)
icon_entry.number = group_entry.number
self.entries.append(icon_entry)
entry_offset += icon_entry.size
self.total_size += icon_entry.size
@classmethod
def parse_from_data(cls, file_data, **cls_args):
"""Parses the Structure from the file data."""
self = cls(**cls_args)
self._file_data = file_data
for field_name, field_info in self._fields.items():
self.process_field(file_data, field_name, field_info)
self.entries = []
entry_offset = 0
self.total_size = self.size
for i in range(self.ImageCount.value):
entry = IconEntry.parse_from_data(file_data, absolute_offset=self.absolute_offset+self.size+entry_offset, offset=entry_offset)
entry.number = i + 1
self.entries.append(entry)
entry_offset += entry.size
self.total_size += entry.size
return self
class GroupHeader(Structure):
_fields = {'Reserved':{'offset':0,
'size':2},
'ResourceType':{'offset':2,#1 for ICO, 2 for CUR, others invalid
'size':2},
'ResourceCount':{'offset':4,
'size':2},
}
def copy_from(self, icon_header):
self.Reserved._file_data = self._file_data
self.ResourceType._file_data = self._file_data
self.ResourceCount._file_data = self._file_data
self.Reserved.value = icon_header.Reserved.value
self.ResourceType.value = icon_header.ImageType.value
self.ResourceCount.value = icon_header.ImageCount.value
self.entries = []
entry_offset = 0
self.total_size = self.size
for icon_entry in icon_header.entries:
group_entry = GroupEntry.parse_from_data(bytearray(b''), absolute_offset=self.absolute_offset+self.size+entry_offset, offset=entry_offset)
group_entry._file_data = self._file_data
group_entry.copy_from(icon_entry)
group_entry.number = icon_entry.number
self.entries.append(group_entry)
entry_offset += group_entry.size
self.total_size += group_entry.size
@classmethod
def parse_from_data(cls, file_data, **cls_args):
"""Parses the Structure from the file data."""
self = cls(**cls_args)
self._file_data = file_data
for field_name, field_info in self._fields.items():
self.process_field(file_data, field_name, field_info)
self.entries = []
entry_offset = 0
self.total_size = self.size
for i in range(self.ResourceCount.value):
entry = GroupEntry.parse_from_data(file_data, absolute_offset=self.absolute_offset+self.size+entry_offset, offset=entry_offset)
entry.number = i + 1
self.entries.append(entry)
entry_offset += entry.size
self.total_size += entry.size
return self
class IconEntry(Structure):
_fields = {'Width':{'offset':0,
'size':1},
'Height':{'offset':1,
'size':1},
'ColorCount':{'offset':2,
'size':1},
'Reserved':{'offset':3,
'size':1},
'ColorPlanes':{'offset':4,
'size':2},
'BitCount':{'offset':6, #bits per pixel
'size':2},
'DataSize':{'offset':8,
'size': 4},
'OffsetToData':{'offset':12, #from start of file
'size':4},
}
def copy_from(self, group_entry, entries):
self.Width.value = group_entry.Width.value
self.Height.value = group_entry.Height.value
self.ColorCount.value = group_entry.ColorCount.value
self.Reserved.value = group_entry.Reserved.value
self.ColorPlanes.value = group_entry.ColorPlanes.value
self.BitCount.value = group_entry.BitCount.value
self.DataSize.value = group_entry.DataSize.value
self.OffsetToData.value = self._get_entry_offset(group_entry, entries)
def _get_entry_offset(self, group_entry, group_entries):
offset = 6 #Default icon header size
offset += self.size * len(group_entries)
for i in range(group_entry.number-1):
offset += group_entries[i].DataSize.value
return offset
@classmethod
def parse_from_data(cls, file_data, **cls_args):
"""Parses the Structure from the file data."""
self = cls(**cls_args)
self._file_data = file_data
for field_name, field_info in self._fields.items():
self.process_field(file_data, field_name, field_info)
self.data = read_data(file_data, self.OffsetToData.value, self.DataSize.value)
return self
class GroupEntry(Structure):
_fields = {'Width':{'offset':0,
'size':1},
'Height':{'offset':1,
'size':1},
'ColorCount':{'offset':2,
'size':1},
'Reserved':{'offset':3,
'size':1},
'ColorPlanes':{'offset':4,
'size':2},
'BitCount':{'offset':6,
'size':2},
'DataSize':{'offset':8,
'size': 4},
'IconCursorId':{'offset':12,
'size':2},
}
def copy_from(self, icon_entry):
self.Width._file_data = self._file_data
self.Height._file_data = self._file_data
self.ColorCount._file_data = self._file_data
self.Reserved._file_data = self._file_data
self.ColorPlanes._file_data = self._file_data
self.BitCount._file_data = self._file_data
self.DataSize._file_data = self._file_data
self.IconCursorId._file_data = self._file_data
self.Width.value = icon_entry.Width.value
self.Height.value = icon_entry.Height.value
self.ColorCount.value = icon_entry.ColorCount.value
self.Reserved.value = icon_entry.Reserved.value
self.ColorPlanes.value = icon_entry.ColorPlanes.value
self.BitCount.value = icon_entry.BitCount.value
self.DataSize.value = icon_entry.DataSize.value
self.IconCursorId.value = icon_entry.number
class PEFile(Printable):
"""Reads a portable exe file in either big or little endian.
Right now this only reads the .rsrc section.
"""
signature = b'MZ'
dos_header = None
def __init__(self, file_path, endian='little'):
self.file_path = os.path.abspath(os.path.expanduser(file_path))
self.endian = endian
if not self.is_PEFile():
raise PEFormatError('File is not a proper portable executable formatted file!')
self.pe_file_data = bytearray(open(self.file_path,'rb').read())
self.dos_header = DOSHeader.parse_from_data(self.pe_file_data)
self.pe_header = PEHeader.parse_from_data(self.pe_file_data, absolute_offset=self.dos_header.PEHeaderOffset.value)
self.optional_header = OptionalHeader.parse_from_data(self.pe_file_data, absolute_offset=self.pe_header.size+self.pe_header.absolute_offset)
number_of_sections = self.pe_header.NumberOfSections.value
section_size = 40
section_offset = self.pe_header.size+self.pe_header.absolute_offset+self.pe_header.SizeOfOptionalHeader.value
self.sections = {}
for section_number in range(number_of_sections):
section_header = SectionHeader.parse_from_data(self.pe_file_data, absolute_offset=section_offset)
section_offset += section_size
header_name = str(section_header.Name.data, 'utf-8').strip('\x00')
self.sections[header_name] = section_header
if section_header.PointerToLineNumbers.value != 0:
print('{} section contains line number COFF table, which is not implemented yet.'.format(section_header.Name))
if section_header.PointerToRelocations.value != 0:
print('{} section contains relocation table, which is not implemented yet.'.format(section_header.Name))
if section_header.Name.data == b'.rsrc\x00\x00\x00':
current_table_pointer = section_header.PointerToRawData.value
current_resource_directory_table = ResourceDirectoryTable.parse_from_data(self.pe_file_data, absolute_offset=current_table_pointer, _section_header=section_header, type=None)
self.resource_directory_table = current_resource_directory_table
cur_level = 0
stack = [(current_resource_directory_table, cur_level)]
delta = section_header.VirtualAddress.value - section_header.PointerToRawData.value
while stack:
resource_directory_table, level = stack.pop()
num_name_entries = resource_directory_table.NumberOfNameEntries.value
num_id_entries = resource_directory_table.NumberOfIDEntries.value
current_offset = resource_directory_table.absolute_offset + resource_directory_table.size
for i in range(num_name_entries):
name_entry = ResourceDirectoryEntryName.parse_from_data(self.pe_file_data, absolute_offset=current_offset, _section_header=section_header)
current_offset += name_entry.size
string_offset = name_entry.get_name_absolute_offset()
name_entry.directory_string = ResourceDirectoryString.parse_from_data(self.pe_file_data, absolute_offset=string_offset, _section_header=section_header)
offset = name_entry.get_data_or_subdirectory_absolute_offset()
if not name_entry.data_rva_empty():
if name_entry.is_data_entry():
rd = ResourceDataEntry.parse_from_data(self.pe_file_data, absolute_offset=offset, _section_header=section_header)
resource_directory_table.data_entries.append(rd)
else:
rd = ResourceDirectoryTable.parse_from_data(self.pe_file_data, absolute_offset=offset, _section_header=section_header, type=None)
resource_directory_table.subdirectory_tables.append(rd)
stack.append((rd, level+1))
resource_directory_table.name_entries.append(name_entry)
for i in range(num_id_entries):
id_entry = ResourceDirectoryEntryID.parse_from_data(self.pe_file_data, absolute_offset=current_offset, _section_header=section_header)
current_offset += id_entry.size
offset = id_entry.get_data_or_subdirectory_absolute_offset()
if id_entry.is_data_entry():
rd = ResourceDataEntry.parse_from_data(self.pe_file_data, absolute_offset=offset, _section_header=section_header)
resource_directory_table.data_entries.append(rd)
else:
id_entry.name = str(id_entry.IntegerID.value)
if level+1 == 1:
id_entry.name = resource_types[id_entry.IntegerID.value]
rd = ResourceDirectoryTable.parse_from_data(self.pe_file_data, absolute_offset=offset, _section_header=section_header, type=id_entry.IntegerID.value)
resource_directory_table.subdirectory_tables.append(rd)
stack.append((rd, level+1))
resource_directory_table.id_entries.append(id_entry)
def replace_icon(self, icon_path):
"""Replaces an icon in the pe file with the one specified.
This only replaces the largest icon and resizes the input
image to match so that the data is undisturbed. I tried to
update the pointers automatically by moving the data to the end
of the file, but that did not work. Comments were left as history
to what I attempted.
"""
icon_path = os.path.expanduser(icon_path) #this needs to be a string and not unicode
if not os.path.exists(icon_path):
raise Exception('Icon {} does not exist'.format(icon_path))
resource_section = self.sections['.rsrc']
g_icon_dir = self.get_directory_by_type(ResourceTypes.Group_Icon)
g_icon_data_entry = g_icon_dir.subdirectory_tables[0].data_entries[0]
icon_dir = self.get_directory_by_type(ResourceTypes.Icon)
icon_data_entry = icon_dir.subdirectory_tables[0].data_entries[0]
group_header = GroupHeader.parse_from_data(self.pe_file_data, absolute_offset=g_icon_data_entry.get_data_absolute_offset())
g_entry = group_header.entries[0]
icon = Image.open(icon_path)
width = g_entry.Width.value
height = g_entry.Height.value
if width == 0:
width = 256
if height == 0:
height = 256
i_data = resize(icon, (width, height), format='ico')
new_icon_size = len(i_data)
icon_file_size = g_entry.DataSize.value+group_header.size+g_entry.size+2
#9662 is the exact length of the icon in nw.exe
extra_size = icon_file_size-new_icon_size
if extra_size < 0:
extra_size = 0
icon_data = bytearray(i_data) + bytearray(extra_size)
icon_header = IconHeader.parse_from_data(icon_data, absolute_offset=0)
#group_header.absolute_offset = len(self.pe_file_data)
#g_icon_data_entry.DataRVA.value = len(self.pe_file_data) - resource_section.PointerToRawData.value + resource_section.VirtualAddress.value
#padding = 6+14*len(icon_header.entries)
#g_icon_data_entry.Size.value = padding
#self.pe_file_data += bytearray(padding)
group_header._file_data = self.pe_file_data
group_header.copy_from(icon_header)
#icon_data_entry.DataRVA.value = len(self.pe_file_data) - resource_section.PointerToRawData.value + resource_section.VirtualAddress.value
#print hex(icon_data_entry.DataRVA.value), hex(len(self.pe_file_data)), hex(icon_data_entry.get_data_absolute_offset())
#print hex(read_bytes(self.pe_file_data[icon_data_entry.DataRVA.absolute_offset:icon_data_entry.DataRVA.absolute_offset+icon_data_entry.DataRVA.size],0, icon_data_entry.DataRVA.size)[0])
#data = bytearray()
#for entry in icon_header.entries:
#data += entry.data
#self.pe_file_data += entry.data
#icon_data_entry.Size.value = len(data)
#self.optional_header.SizeOfImage.value = self.optional_header.SizeOfImage.value + len(data) + padding
#self.optional_header.ResourceTableSize.value = self.optional_header.ResourceTableSize.value + len(data) + padding
#self.optional_header.SizeOfInitializedData.value = self.optional_header.SizeOfInitializedData.value + len(data) + padding
#resource_section.SizeOfRawData.value = resource_section.SizeOfRawData.value + len(data) + padding
#resource_section.VirtualSize.value = resource_section.VirtualSize.value + len(data) + padding
#print icon_header.total_size
data_address = icon_data_entry.get_data_absolute_offset()
data_size = icon_data_entry.Size.value
self.pe_file_data = self.pe_file_data[:data_address] + icon_data[icon_header.total_size:] + self.pe_file_data[data_address+data_size:]
def write(self, file_name):
with open(file_name, 'wb+') as f:
f.write(self.pe_file_data)
def get_directory_by_type(self, type):
"""Gets the directory by resource type."""
for d in self.resource_directory_table.subdirectory_tables:
if d.type == type:
return d
def is_PEFile(self):
"""Checks if the file is a proper PE file"""
signature = None
try:
with open(self.file_path,'rb') as f:
signature = f.read(2)
except IOError as e:
raise e
finally:
return signature == self.signature
|
|
# -*- coding: utf-8 -*-
"""
tipfy.i18n
~~~~~~~~~~
Internationalization extension.
This extension provides internationalization utilities: a translations
store, hooks to set locale for the current request, functions to manipulate
dates according to timezones or translate and localize strings and dates.
It uses `Babel <http://babel.edgewall.org/>`_ to manage translations of
strings and localization of dates and times, and
`gae-pytz <http://code.google.com/p/gae-pytz/>`_ to handle timezones.
Several ideas and code were borrowed from
`Flask-Babel <http://pypi.python.org/pypi/Flask-Babel/>`_ and
`Kay <http://code.google.com/p/kay-framework/>`_.
:copyright: 2011 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
from datetime import datetime
import os
from babel import Locale, dates, numbers, support
try:
from pytz.gae import pytz
except ImportError:
try:
import pytz
except ImportError:
raise RuntimeError('gaepytz or pytz are required.')
from tipfy.local import get_request
#: Default configuration values for this module. Keys are:
#:
#: locale
#: The application default locale code. Default is ``en_US``.
#:
#: timezone
#: The application default timezone according to the Olson
#: database. Default is ``America/Chicago``.
#:
#: locale_session_key
#: Session key used to save requested locale, if sessions are used.
#:
#: timezone_session_key
#: Session key used to save requested timezone, if sessions are used.
#:
#: locale_request_lookup
#: A list of tuples (method, key) to search
#: for the locale to be loaded for the current request. The methods are
#: searched in order until a locale is found. Available methods are:
#:
#: - args: gets the locale code from ``GET`` arguments.
#: - form: gets the locale code from ``POST`` arguments.
#: - session: gets the locale code from the current session.
#: - cookies: gets the locale code from a cookie.
#: - rule_args: gets the locale code from the keywords in the current
#: URL rule.
#:
#: If none of the methods find a locale code, uses the default locale.
#: Default is ``[('session', '_locale')]``: gets the locale from the
#: session key ``_locale``.
#:
#: timezone_request_lookup
#: Same as `locale_request_lookup`, but for the timezone.
#:
#: date_formats
#: Default date formats for datetime, date and time.
default_config = {
'locale': 'en_US',
'timezone': 'America/Chicago',
'locale_session_key': '_locale',
'timezone_session_key': '_timezone',
'locale_request_lookup': [('session', '_locale')],
'timezone_request_lookup': [('session', '_timezone')],
'date_formats': {
'time': 'medium',
'date': 'medium',
'datetime': 'medium',
'time.short': None,
'time.medium': None,
'time.full': None,
'time.long': None,
'time.iso': "HH':'mm':'ss",
'date.short': None,
'date.medium': None,
'date.full': None,
'date.long': None,
'date.iso': "yyyy'-'MM'-'dd",
'datetime.short': None,
'datetime.medium': None,
'datetime.full': None,
'datetime.long': None,
'datetime.iso': "yyyy'-'MM'-'dd'T'HH':'mm':'ssZ",
},
}
class I18nMiddleware(object):
"""Saves the current locale in the session at the end of request, if it
differs from the current value stored in the session.
"""
def after_dispatch(self, handler, response):
"""Called after the class:`tipfy.RequestHandler` method was executed.
:param handler:
A class:`tipfy.RequestHandler` instance.
:param response:
A class:`tipfy.Response` instance.
:returns:
A class:`tipfy.Response` instance.
"""
session = handler.session
i18n = handler.i18n
locale_session_key = i18n.config['locale_session_key']
timezone_session_key = i18n.config['timezone_session_key']
# Only save if it differs from original session value.
if i18n.locale != session.get(locale_session_key):
session[locale_session_key] = i18n.locale
if i18n.timezone != session.get(timezone_session_key):
session[timezone_session_key] = i18n.timezone
return response
class I18nStore(object):
#: Loaded translations.
loaded_translations = None
#: Current locale.
locale = None
#: Current translations.
translations = None
#: Current timezone.
timezone = None
#: Current tzinfo.
tzinfo = None
def __init__(self, request):
self.config = request.app.config[__name__]
self.loaded_translations = request.app.registry.setdefault(
'i18n.translations', {})
self.set_locale_for_request(request)
self.set_timezone_for_request(request)
def set_locale_for_request(self, request):
locale = _get_request_value(request,
self.config['locale_request_lookup'], self.config['locale'])
self.set_locale(locale)
def set_timezone_for_request(self, request):
timezone = _get_request_value(request,
self.config['timezone_request_lookup'], self.config['timezone'])
self.set_timezone(timezone)
def set_locale(self, locale):
"""Sets the current locale and translations.
:param locale:
A locale code, e.g., ``pt_BR``.
"""
self.locale = locale
if locale not in self.loaded_translations:
locales = [locale]
if locale != self.config['locale']:
locales.append(self.config['locale'])
self.loaded_translations[locale] = self.load_translations(locales)
self.translations = self.loaded_translations[locale]
def set_timezone(self, timezone):
"""Sets the current timezone and tzinfo.
:param timezone:
The timezone name from the Olson database, e.g.:
``America/Chicago``.
"""
self.timezone = timezone
self.tzinfo = pytz.timezone(timezone)
def load_translations(self, locales, dirname='locale', domain='messages'):
return support.Translations.load(dirname, locales, domain)
def gettext(self, string, **variables):
"""Translates a given string according to the current locale.
:param string:
The string to be translated.
:param variables:
Variables to format the returned string.
:returns:
The translated string.
"""
if variables:
return self.translations.ugettext(string) % variables
return self.translations.ugettext(string)
def ngettext(self, singular, plural, n, **variables):
"""Translates a possible pluralized string according to the current
locale.
:param singular:
The singular for of the string to be translated.
:param plural:
The plural for of the string to be translated.
:param n:
An integer indicating if this is a singular or plural. If greater
than 1, it is a plural.
:param variables:
Variables to format the returned string.
:returns:
The translated string.
"""
if variables:
return self.translations.ungettext(singular, plural, n) % variables
return self.translations.ungettext(singular, plural, n)
def to_local_timezone(self, datetime):
"""Returns a datetime object converted to the local timezone.
:param datetime:
A ``datetime`` object.
:returns:
A ``datetime`` object normalized to a timezone.
"""
if datetime.tzinfo is None:
datetime = datetime.replace(tzinfo=pytz.UTC)
return self.tzinfo.normalize(datetime.astimezone(self.tzinfo))
def to_utc(self, datetime):
"""Returns a datetime object converted to UTC and without tzinfo.
:param datetime:
A ``datetime`` object.
:returns:
A naive ``datetime`` object (no timezone), converted to UTC.
"""
if datetime.tzinfo is None:
datetime = self.tzinfo.localize(datetime)
return datetime.astimezone(pytz.UTC).replace(tzinfo=None)
def _get_format(self, key, format):
"""A helper for the datetime formatting functions. Returns a format
name or pattern to be used by Babel date format functions.
:param key:
A format key to be get from config. Valid values are "date",
"datetime" or "time".
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern.
:returns:
A format name or pattern to be used by Babel date format functions.
"""
if format is None:
format = self.config['date_formats'].get(key)
if format in ('short', 'medium', 'full', 'long', 'iso'):
rv = self.config['date_formats'].get('%s.%s' % (key, format))
if rv is not None:
format = rv
return format
def format_date(self, date=None, format=None, rebase=True):
"""Returns a date formatted according to the given pattern and
following the current locale.
:param date:
A ``date`` or ``datetime`` object. If None, the current date in
UTC is used.
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern. Example outputs:
- short: 11/10/09
- medium: Nov 10, 2009
- long: November 10, 2009
- full: Tuesday, November 10, 2009
:param rebase:
If True, converts the date to the current :attr:`timezone`.
:returns:
A formatted date in unicode.
"""
format = self._get_format('date', format)
if rebase and isinstance(date, datetime):
date = self.to_local_timezone(date)
return dates.format_date(date, format, locale=self.locale)
def format_datetime(self, datetime=None, format=None, rebase=True):
"""Returns a date and time formatted according to the given pattern
and following the current locale and timezone.
:param datetime:
A ``datetime`` object. If None, the current date and time in UTC
is used.
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern. Example outputs:
- short: 11/10/09 4:36 PM
- medium: Nov 10, 2009 4:36:05 PM
- long: November 10, 2009 4:36:05 PM +0000
- full: Tuesday, November 10, 2009 4:36:05 PM World (GMT) Time
:param rebase:
If True, converts the datetime to the current :attr:`timezone`.
:returns:
A formatted date and time in unicode.
"""
format = self._get_format('datetime', format)
kwargs = {}
if rebase:
kwargs['tzinfo'] = self.tzinfo
return dates.format_datetime(datetime, format, locale=self.locale,
**kwargs)
def format_time(self, time=None, format=None, rebase=True):
"""Returns a time formatted according to the given pattern and
following the current locale and timezone.
:param time:
A ``time`` or ``datetime`` object. If None, the current
time in UTC is used.
:param format:
The format to be returned. Valid values are "short", "medium",
"long", "full" or a custom date/time pattern. Example outputs:
- short: 4:36 PM
- medium: 4:36:05 PM
- long: 4:36:05 PM +0000
- full: 4:36:05 PM World (GMT) Time
:param rebase:
If True, converts the time to the current :attr:`timezone`.
:returns:
A formatted time in unicode.
"""
format = self._get_format('time', format)
kwargs = {}
if rebase:
kwargs['tzinfo'] = self.tzinfo
return dates.format_time(time, format, locale=self.locale, **kwargs)
def format_timedelta(self, datetime_or_timedelta, granularity='second',
threshold=.85):
"""Formats the elapsed time from the given date to now or the given
timedelta. This currently requires an unreleased development version
of Babel.
:param datetime_or_timedelta:
A ``timedelta`` object representing the time difference to format,
or a ``datetime`` object in UTC.
:param granularity:
Determines the smallest unit that should be displayed, the value
can be one of "year", "month", "week", "day", "hour", "minute" or
"second".
:param threshold:
Factor that determines at which point the presentation switches to
the next higher unit.
:returns:
A string with the elapsed time.
"""
if isinstance(datetime_or_timedelta, datetime):
datetime_or_timedelta = datetime.utcnow() - datetime_or_timedelta
return dates.format_timedelta(datetime_or_timedelta, granularity,
threshold=threshold, locale=self.locale)
def format_number(self, number):
"""Returns the given number formatted for the current locale. Example::
>>> format_number(1099, locale='en_US')
u'1,099'
:param number:
The number to format.
:returns:
The formatted number.
"""
return numbers.format_number(number, locale=self.locale)
def format_decimal(self, number, format=None):
"""Returns the given decimal number formatted for the current locale.
Example::
>>> format_decimal(1.2345, locale='en_US')
u'1.234'
>>> format_decimal(1.2346, locale='en_US')
u'1.235'
>>> format_decimal(-1.2346, locale='en_US')
u'-1.235'
>>> format_decimal(1.2345, locale='sv_SE')
u'1,234'
>>> format_decimal(12345, locale='de')
u'12.345'
The appropriate thousands grouping and the decimal separator are used
for each locale::
>>> format_decimal(12345.5, locale='en_US')
u'12,345.5'
:param number:
The number to format.
:param format:
Notation format.
:returns:
The formatted decimal number.
"""
return numbers.format_decimal(number, format=format,
locale=self.locale)
def format_currency(self, number, currency, format=None):
"""Returns a formatted currency value. Example::
>>> format_currency(1099.98, 'USD', locale='en_US')
u'$1,099.98'
>>> format_currency(1099.98, 'USD', locale='es_CO')
u'US$\\xa01.099,98'
>>> format_currency(1099.98, 'EUR', locale='de_DE')
u'1.099,98\\xa0\\u20ac'
The pattern can also be specified explicitly::
>>> format_currency(1099.98, 'EUR', u'\\xa4\\xa4 #,##0.00', locale='en_US')
u'EUR 1,099.98'
:param number:
The number to format.
:param currency:
The currency code.
:param format:
Notation format.
:returns:
The formatted currency value.
"""
return numbers.format_currency(number, currency, format=format,
locale=self.locale)
def format_percent(self, number, format=None):
"""Returns formatted percent value for the current locale. Example::
>>> format_percent(0.34, locale='en_US')
u'34%'
>>> format_percent(25.1234, locale='en_US')
u'2,512%'
>>> format_percent(25.1234, locale='sv_SE')
u'2\\xa0512\\xa0%'
The format pattern can also be specified explicitly::
>>> format_percent(25.1234, u'#,##0\u2030', locale='en_US')
u'25,123\u2030'
:param number:
The percent number to format
:param format:
Notation format.
:returns:
The formatted percent number.
"""
return numbers.format_percent(number, format=format,
locale=self.locale)
def format_scientific(self, number, format=None):
"""Returns value formatted in scientific notation for the current
locale. Example::
>>> format_scientific(10000, locale='en_US')
u'1E4'
The format pattern can also be specified explicitly::
>>> format_scientific(1234567, u'##0E00', locale='en_US')
u'1.23E06'
:param number:
The number to format.
:param format:
Notation format.
:returns:
Value formatted in scientific notation.
"""
return numbers.format_scientific(number, format=format,
locale=self.locale)
def parse_date(self, string):
"""Parses a date from a string.
This function uses the date format for the locale as a hint to
determine the order in which the date fields appear in the string.
Example::
>>> parse_date('4/1/04', locale='en_US')
datetime.date(2004, 4, 1)
>>> parse_date('01.04.2004', locale='de_DE')
datetime.date(2004, 4, 1)
:param string:
The string containing the date.
:returns:
The parsed date object.
"""
return dates.parse_date(string, locale=self.locale)
def parse_datetime(self, string):
"""Parses a date and time from a string.
This function uses the date and time formats for the locale as a hint
to determine the order in which the time fields appear in the string.
:param string:
The string containing the date and time.
:returns:
The parsed datetime object.
"""
return dates.parse_datetime(string, locale=self.locale)
def parse_time(self, string):
"""Parses a time from a string.
This function uses the time format for the locale as a hint to
determine the order in which the time fields appear in the string.
Example::
>>> parse_time('15:30:00', locale='en_US')
datetime.time(15, 30)
:param string:
The string containing the time.
:returns:
The parsed time object.
"""
return dates.parse_time(string, locale=self.locale)
def parse_number(self, string):
"""Parses localized number string into a long integer. Example::
>>> parse_number('1,099', locale='en_US')
1099L
>>> parse_number('1.099', locale='de_DE')
1099L
When the given string cannot be parsed, an exception is raised::
>>> parse_number('1.099,98', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '1.099,98' is not a valid number
:param string:
The string to parse.
:returns:
The parsed number.
:raises:
``NumberFormatError`` if the string can not be converted to a
number.
"""
return numbers.parse_number(string, locale=self.locale)
def parse_decimal(self, string):
"""Parses localized decimal string into a float. Example::
>>> parse_decimal('1,099.98', locale='en_US')
1099.98
>>> parse_decimal('1.099,98', locale='de')
1099.98
When the given string cannot be parsed, an exception is raised::
>>> parse_decimal('2,109,998', locale='de')
Traceback (most recent call last):
...
NumberFormatError: '2,109,998' is not a valid decimal number
:param string:
The string to parse.
:returns:
The parsed decimal number.
:raises:
``NumberFormatError`` if the string can not be converted to a
decimal number.
"""
return numbers.parse_decimal(string, locale=self.locale)
def get_timezone_location(self, dt_or_tzinfo):
"""Returns a representation of the given timezone using "location
format".
The result depends on both the local display name of the country and
the city assocaited with the time zone::
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> get_timezone_location(tz, locale='de_DE')
u"Kanada (St. John's)"
>>> tz = timezone('America/Mexico_City')
>>> get_timezone_location(tz, locale='de_DE')
u'Mexiko (Mexiko-Stadt)'
If the timezone is associated with a country that uses only a single
timezone, just the localized country name is returned::
>>> tz = timezone('Europe/Berlin')
>>> get_timezone_name(tz, locale='de_DE')
u'Deutschland'
:param dt_or_tzinfo:
The ``datetime`` or ``tzinfo`` object that determines
the timezone; if None, the current date and time in UTC is assumed.
:returns:
The localized timezone name using location format.
"""
return dates.get_timezone_name(dt_or_tzinfo, locale=self.locale)
def set_locale(locale):
"""See :meth:`I18nStore.set_locale`."""
return get_request().i18n.set_locale(locale)
def set_timezone(timezone):
"""See :meth:`I18nStore.set_timezone`."""
return get_request().i18n.set_timezone(timezone)
def gettext(string, **variables):
"""See :meth:`I18nStore.gettext`."""
return get_request().i18n.gettext(string, **variables)
def ngettext(singular, plural, n, **variables):
"""See :meth:`I18nStore.ngettext`."""
return get_request().i18n.ngettext(singular, plural, n, **variables)
def to_local_timezone(datetime):
"""See :meth:`I18nStore.to_local_timezone`."""
return get_request().i18n.to_local_timezone(datetime)
def to_utc(datetime):
"""See :meth:`I18nStore.to_utc`."""
return get_request().i18n.to_utc(datetime)
def format_date(date=None, format=None, rebase=True):
"""See :meth:`I18nStore.format_date`."""
return get_request().i18n.format_date(date, format, rebase)
def format_datetime(datetime=None, format=None, rebase=True):
"""See :meth:`I18nStore.format_datetime`."""
return get_request().i18n.format_datetime(datetime, format, rebase)
def format_time(time=None, format=None, rebase=True):
"""See :meth:`I18nStore.format_time`."""
return get_request().i18n.format_time(time, format, rebase)
def format_timedelta(datetime_or_timedelta, granularity='second',
threshold=.85):
"""See :meth:`I18nStore.format_timedelta`."""
return get_request().i18n.format_timedelta(datetime_or_timedelta,
granularity, threshold)
def format_number(number):
"""See :meth:`I18nStore.format_number`."""
return get_request().i18n.format_number(number)
def format_decimal(number, format=None):
"""See :meth:`I18nStore.format_decimal`."""
return get_request().i18n.format_decimal(number, format)
def format_currency(number, currency, format=None):
"""See :meth:`I18nStore.format_currency`."""
return get_request().i18n.format_currency(number, currency, format)
def format_percent(number, format=None):
"""See :meth:`I18nStore.format_percent`."""
return get_request().i18n.format_percent(number, format)
def format_scientific(number, format=None):
"""See :meth:`I18nStore.format_scientific`."""
return get_request().i18n.format_scientific(number, format)
def parse_date(string):
"""See :meth:`I18nStore.parse_date`"""
return get_request().i18n.parse_date(string)
def parse_datetime(string):
"""See :meth:`I18nStore.parse_datetime`."""
return get_request().i18n.parse_datetime(string)
def parse_time(string):
"""See :meth:`I18nStore.parse_time`."""
return get_request().i18n.parse_time(string)
def parse_number(string):
"""See :meth:`I18nStore.parse_number`."""
return get_request().i18n.parse_number(string)
def parse_decimal(string):
"""See :meth:`I18nStore.parse_decimal`."""
return get_request().i18n.parse_decimal(string)
def get_timezone_location(dt_or_tzinfo):
"""See :meth:`I18nStore.get_timezone_location`."""
return get_request().i18n.get_timezone_location(dt_or_tzinfo)
def list_translations(dirname='locale'):
"""Returns a list of all the existing translations. The list returned
will be filled with actual locale objects and not just strings.
:param dirname:
Path to the translations directory.
:returns:
A list of ``babel.Locale`` objects.
"""
if not os.path.isdir(dirname):
return []
result = []
for folder in sorted(os.listdir(dirname)):
if os.path.isdir(os.path.join(dirname, folder, 'LC_MESSAGES')):
result.append(Locale.parse(folder))
return result
def lazy_gettext(string, **variables):
"""A lazy version of :func:`gettext`.
:param string:
The string to be translated.
:param variables:
Variables to format the returned string.
:returns:
A ``babel.support.LazyProxy`` object that when accessed translates
the string.
"""
return support.LazyProxy(gettext, string, **variables)
def lazy_ngettext(singular, plural, n, **variables):
"""A lazy version of :func:`ngettext`.
:param singular:
The singular for of the string to be translated.
:param plural:
The plural for of the string to be translated.
:param n:
An integer indicating if this is a singular or plural. If greater
than 1, it is a plural.
:param variables:
Variables to format the returned string.
:returns:
A ``babel.support.LazyProxy`` object that when accessed translates
the string.
"""
return support.LazyProxy(ngettext, singular, plural, n, **variables)
def _get_request_value(request, lookup_list, default=None):
"""Returns a locale code or timezone for the current request.
It will use the configuration for ``locale_request_lookup`` or
``timezone_request_lookup`` to search for a key in ``GET``, ``POST``,
session, cookie or keywords in the current URL rule. If no value is
found, returns the default value.
:param request:
A :class:`tipfy.app.Request` instance.
:param lookup_list:
A list of `(attribute, key)` tuples to search in request, e.g.,
``[('args', 'lang'), ('session', 'locale')]``.
:default:
Default value to return in case none is found.
:returns:
A locale code or timezone setting.
"""
value = None
attrs = ('args', 'form', 'cookies', 'session', 'rule_args')
for method, key in lookup_list:
if method in attrs:
# Get from GET, POST, cookies or rule_args.
obj = getattr(request, method)
else:
obj = None
if obj:
value = obj.get(key, None)
if value:
break
else:
value = default
return value
# Alias to gettext.
_ = gettext
|
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtCore, QtGui
import glob
from itertools import chain
import os
from datetime import datetime
from time import strptime
from core.thumbnails import ThumbnailCache
from core import debug
from core.collection import Collection, MashupEntity, ThumbnailEntity, \
VistrailEntity, WorkflowEntity, WorkflowExecEntity
from core.collection.search import SearchCompiler, SearchParseError
from core.db.locator import FileLocator
from gui.common_widgets import QToolWindowInterface, QToolWindow, QSearchBox
from gui.vistrails_palette import QVistrailsPaletteInterface
from gui.theme import CurrentTheme
from gui.module_palette import QModuleTreeWidgetItemDelegate
from gui.vis_diff import QDiffView
from core.collection.entity import Entity
class QCollectionWidget(QtGui.QTreeWidget):
""" This is an abstract class that contains functions for handling
a core.collection.Collection object
a subclass should provide a view of the collection
"""
def __init__(self, collection, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.collection = collection
self.collection.add_listener(self)
self.setExpandsOnDoubleClick(False)
self.connect(self,
QtCore.SIGNAL('itemDoubleClicked(QTreeWidgetItem *, int)'),
self.item_selected)
self.setIconSize(QtCore.QSize(16,16))
def setup_widget(self, workspace=None):
""" Adds the items from the current workspace """
pass
def updated(self):
""" Called from the collection when committed """
self.setup_widget()
def run_search(self, search, items=None):
# FIXME only uses top level items
if items is None:
items = [self.topLevelItem(i)
for i in xrange(self.topLevelItemCount())]
for item in items:
if search.match(item.entity):
item.setHidden(False)
parent = item.parent()
while parent is not None:
if parent.isHidden():
parent.setHidden(False)
parent = parent.parent()
else:
item.setHidden(True)
self.run_search(search, [item.child(i)
for i in xrange(item.childCount())])
def reset_search(self, items=None):
if items is None:
items = [self.topLevelItem(i)
for i in xrange(self.topLevelItemCount())]
for item in items:
item.setHidden(False)
self.reset_search([item.child(i)
for i in xrange(item.childCount())])
def item_selected(self, widget_item, column):
#print 'item_selected'
locator = widget_item.entity.locator()
#print "locator", locator
import gui.application
# if not locator.is_valid():
# debug.critical("Locator is not valid:" % locator.to_url())
# return
app = gui.application.get_vistrails_application()
open_vistrail = app.builderWindow.open_vistrail_without_prompt
args = {}
args['version'] = locator.kwargs.get('version_node', None) or \
locator.kwargs.get('version_tag', None)
if args['version']:
# set vistrail name
locator = widget_item.entity.parent.locator()
pass
#locator._name = widget_item.entity.parent.name
workflow_exec = locator.kwargs.get('workflow_exec', None)
if workflow_exec:
args['workflow_exec'] = workflow_exec
locator = widget_item.entity.parent.parent.locator()
locator.update_from_gui(self)
# set vistrail name
#locator._name = widget_item.entity.parent.parent.name
locator.update_from_gui(self)
# print '*** opening'
# print locator.to_url()
# print locator.name
# print '***'
open_vistrail(locator, **args)
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
menu = QtGui.QMenu(self)
if item:
# find top level
p = item
while p.parent():
p = p.parent()
act = QtGui.QAction("&Update", self)
act.setStatusTip("Update this object")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
p.refresh_object)
menu.addAction(act)
act = QtGui.QAction("&Remove", self)
act.setStatusTip("Remove from this list")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
p.remove_object)
menu.addAction(act)
act = QtGui.QAction("", self)
act.setSeparator(True)
menu.addAction(act)
act = QtGui.QAction("Check &All", self)
act.setStatusTip("Removes deleted files")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
self.check_objects)
menu.addAction(act)
act = QtGui.QAction("Remove All", self)
act.setStatusTip("Removes all files")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
self.remove_all)
menu.addAction(act)
act = QtGui.QAction("Add &File", self)
act.setStatusTip("Add specified vistrail file")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
self.add_file)
menu.addAction(act)
act = QtGui.QAction("Add from &Directory", self)
act.setStatusTip("Add all vistrail files in a directory")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
self.add_dir)
menu.addAction(act)
act = QtGui.QAction("", self)
act.setSeparator(True)
menu.addAction(act)
act = QtGui.QAction("Add a new Workspace", self)
act.setStatusTip("Create a new workspace")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
self.add_workspace)
menu.addAction(act)
if self.collection.currentWorkspace != 'Default':
act = QtGui.QAction("Delete Workspace", self)
act.setStatusTip("Remove current workspace")
QtCore.QObject.connect(act,
QtCore.SIGNAL("triggered()"),
self.delete_workspace)
menu.addAction(act)
menu.exec_(event.globalPos())
def check_objects(self):
items = [self.topLevelItem(i)
for i in xrange(self.topLevelItemCount())]
for item in items:
item.entity.locator().update_from_gui(self)
if not self.collection.urlExists(item.entity.url):
self.collection.delete_entity(item.entity)
self.collection.commit()
def remove_all(self):
items = [self.topLevelItem(i)
for i in xrange(self.topLevelItemCount())]
for item in items:
self.collection.del_from_workspace(item.entity)
self.collection.commit()
def add_file(self):
s = QtGui.QFileDialog.getOpenFileName(
self, "Choose a file",
"", "Vistrail files (*.vt *.xml)");
if str(s):
locator = FileLocator(str(s))
url = locator.to_url()
entity = self.collection.updateVistrail(url)
# add to relevant workspace categories
self.collection.add_to_workspace(entity)
self.collection.commit()
def add_dir(self):
s = QtGui.QFileDialog.getExistingDirectory(
self, "Choose a directory",
"", QtGui.QFileDialog.ShowDirsOnly);
if str(s):
self.update_from_directory(str(s))
def update_from_directory(self, s):
filenames = glob.glob(os.path.join(s, '*.vt,*.xml'))
progress = QtGui.QProgressDialog('', '', 0, len(filenames))
progress.setWindowTitle('Adding files')
progress.setMinimumDuration(500)
progress.setWindowModality(QtCore.Qt.WindowModal)
i = 0
for filename in filenames:
progress.setValue(i)
progress.setLabelText(filename)
i += 1
try:
locator = FileLocator(filename)
url = locator.to_url()
entity = self.collection.updateVistrail(url)
self.collection.add_to_workspace(entity)
except:
debug.critical("Failed to add file '%s'" % filename)
progress.setValue(len(filenames))
self.collection.commit()
def add_workspace(self):
text, ok = QtGui.QInputDialog.getText(self, 'Create workspace',
'Enter new workspace name:')
workspace = str(text).strip()
if ok and workspace != '':
self.collection.currentWorkspace = workspace
if workspace not in self.collection.workspaces:
self.collection.add_workspace(workspace)
self.collection.commit()
self.emit(QtCore.SIGNAL("workspaceListUpdated()"))
def delete_workspace(self):
if self.collection.currentWorkspace != 'Default':
self.collection.delete_workspace(self.collection.currentWorkspace)
self.collection.currentWorkspace = 'Default'
self.collection.commit()
self.emit(QtCore.SIGNAL("workspaceListUpdated()"))
class QWorkspaceWidget(QCollectionWidget):
""" This class implements QCollectionWidget as a side bar browser widget
"""
def __init__(self, collection, parent=None):
QCollectionWidget.__init__(self, collection, parent)
self.setColumnCount(1)
self.setHeaderHidden(True)
def setup_widget(self, workspace=None):
""" Adds the items from the current workspace """
while self.topLevelItemCount():
self.takeTopLevelItem(0)
if workspace:
self.collection.currentWorkspace = workspace
for entity in self.collection.workspaces[self.collection.currentWorkspace]:
item = QBrowserWidgetItem(entity, self)
self.addTopLevelItem(item)
# if self.collection.currentWorkspace != 'Default':
self.setSortingEnabled(True)
self.sortItems(0, QtCore.Qt.AscendingOrder)
class QWorkflowsItem(QtGui.QTreeWidgetItem):
def __init__(self, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent, ['Workflows'])
class QMashupsItem(QtGui.QTreeWidgetItem):
def __init__(self, parent=None):
QtGui.QTreeWidgetItem.__init__(self, parent, ['Mashups'])
class QBrowserWidgetItem(QtGui.QTreeWidgetItem):
def __init__(self, entity, parent=None):
if not entity:
# assuming an unsaved item
QtGui.QTreeWidgetItem.__init__(self, parent)
self.tag_to_item = {}
self.workflowsItem = QWorkflowsItem()
self.addChild(self.workflowsItem)
self.mshp_to_item = {}
self.mashupsItem = QMashupsItem()
self.addChild(self.mashupsItem)
self.setIcon(0, CurrentTheme.HISTORY_ICON)
return
# # old, esoteric code
#
# l = list(str(x) for x in entity.save())
# l.pop(0) # remove identifier
# type = l.pop(0)
# desc = l[5]
# if len(desc) > 20:
# l[5] = desc[:20] + '...'
klass = self.__class__
self.entity = entity
QtGui.QTreeWidgetItem.__init__(self, parent, [entity.name])
if entity.type_id == VistrailEntity.type_id:
# vistrail - create Workflows and Mashups item
self.workflowsItem = QWorkflowsItem()
self.addChild(self.workflowsItem)
self.mashupsItem = QMashupsItem()
self.addChild(self.mashupsItem)
# self.mashupsItem.setHidden(True)
self.setIcon(0, CurrentTheme.HISTORY_ICON)
self.tag_to_item = {}
self.mshp_to_item = {}
elif entity.type_id == WorkflowEntity.type_id:
self.setIcon(0, CurrentTheme.PIPELINE_ICON)
self.executionList = []
elif entity.type_id == WorkflowExecEntity.type_id:
self.setIcon(0, CurrentTheme.EXECUTE_PIPELINE_ICON)
tooltip = '<html>%s' % entity.url
for child in entity.children:
# l = child.save()
if child.type_id == ThumbnailEntity.type_id:
# is a thumbnail
# add to parent workflow item
cache = ThumbnailCache.getInstance()
path = cache.get_abs_name_entry(child.name)
if path:
pixmap = QtGui.QPixmap(path)
if pixmap and not pixmap.isNull():
self.setIcon(0, QtGui.QIcon(pixmap.scaled(16, 16)))
tooltip += """<br/><img border=0 src='%(path)s'/>
""" % {'path':path}
elif child.type_id == WorkflowEntity.type_id:
# is a pipeline
# only show tagged items
# Add to 'Workflows' item
if not child.name.startswith('Version #'):
childItem = QWorkflowEntityItem(child)
self.workflowsItem.addChild(childItem)
# keep list of tagged workflows
self.tag_to_item[child.name] = childItem
elif child.type_id == WorkflowExecEntity.type_id:
# is an execution
childItem = QWorkflowExecEntityItem(child)
# hidden by default
self.executionList.append(childItem)
self.addChild(childItem)
childItem.setHidden(True)
elif child.type_id == MashupEntity.type_id:
# is a mashup
if not child.name.startswith('Version #'):
childItem = QMashupEntityItem(child)
self.mashupsItem.addChild(childItem)
# keep list of tagged workflows
self.mshp_to_item[child.name] = childItem
else:
self.addChild(QBrowserWidgetItem(child))
if entity.description:
tooltip += '<br/>%s' % entity.description
tooltip += '</html>'
self.setToolTip(0, tooltip)
#def __lt__(self, other):
# sort_col = self.treeWidget().sortColumn()
# if sort_col in set([4]):
# return int(self.text(sort_col)) < int(other.text(sort_col))
# elif sort_col in set([2,3]):
# return datetime(*strptime(str(self.text(sort_col)), '%d %b %Y %H:%M:%S')[0:6]) < datetime(*strptime(str(other.text(sort_col)), '%d %b %Y %H:%M:%S')[0:6])
# return QtGui.QTreeWidgetItem.__lt__(self, other)
def refresh_object(self):
Collection.getInstance().updateVistrail(self.entity.url)
Collection.getInstance().commit()
def remove_object(self):
Collection.getInstance().del_from_workspace(self.entity)
Collection.getInstance().commit()
class QWorkflowEntityItem(QBrowserWidgetItem):
pass
class QWorkflowExecEntityItem(QBrowserWidgetItem):
pass
class QMashupEntityItem(QBrowserWidgetItem):
pass
class QExplorerWidget(QCollectionWidget):
""" This class implements QCollectionWidget as a full-screen explorer widget
"""
def __init__(self, collection, parent=None):
QCollectionWidget.__init__(self, collection, parent)
self.setColumnCount(6)
self.setHeaderLabels(['name', 'user', 'mod_date', 'create_date', 'size', 'url'])
def setup_widget(self, workspace=None):
""" Adds the items from the current workspace """
self.clear()
if workspace:
self.collection.currentWorkspace = workspace
for entity in self.collection.workspaces[self.collection.currentWorkspace]:
item = QExplorerWidgetItem(entity)
self.addTopLevelItem(item)
# if self.collection.currentWorkspace != 'Default':
self.setSortingEnabled(True)
self.sortItems(0, QtCore.Qt.AscendingOrder)
class QExplorerWidgetItem(QtGui.QTreeWidgetItem):
def __init__(self, entity, parent=None):
l = list(str(x) for x in entity.save())
l.pop(0) # remove identifier
type = l.pop(0)
desc = l.pop(5)
# l.pop(7)
# if len(desc) > 20:
# l[5] = desc[:20] + '...'
QtGui.QTreeWidgetItem.__init__(self, parent, l)
self.entity = entity
if type == '1':
self.setIcon(0, CurrentTheme.HISTORY_ICON)
elif type == '2':
self.setIcon(0, CurrentTheme.PIPELINE_ICON)
elif type == '3':
self.setIcon(0, CurrentTheme.EXECUTE_PIPELINE_ICON)
self.setToolTip(0, entity.url)
for child in entity.children:
l = child.save()
if l[1] == 4:
cache = ThumbnailCache.getInstance()
path = cache.get_abs_name_entry(l[2])
if path:
self.setIcon(0, QtGui.QIcon(path))
continue
else:
self.addChild(QExplorerWidgetItem(child))
def __lt__(self, other):
sort_col = self.treeWidget().sortColumn()
if sort_col in set([4]):
return int(self.text(sort_col)) < int(other.text(sort_col))
elif sort_col in set([2,3]):
return datetime(*strptime(str(self.text(sort_col)), '%d %b %Y %H:%M:%S')[0:6]) < datetime(*strptime(str(other.text(sort_col)), '%d %b %Y %H:%M:%S')[0:6])
return QtGui.QTreeWidgetItem.__lt__(self, other)
def refresh_object(self):
Collection.getInstance().updateVistrail(self.entity.url)
Collection.getInstance().commit()
def remove_object(self):
Collection.getInstance().del_from_workspace(self.entity)
Collection.getInstance().commit()
class QWorkspaceWindow(QtGui.QWidget, QVistrailsPaletteInterface):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
# self.workspace_list = QtGui.QComboBox()
# self.titleWidget = QtGui.QWidget()
# self.titleLayout = QtGui.QHBoxLayout()
# self.titleLayout.setMargin(0)
# self.titleLayout.setSpacing(5)
# self.titleLayout.addWidget(QtGui.QLabel('Project:'), 0)
# self.titleLayout.addWidget(self.workspace_list, 1)
# self.titleWidget.setLayout(self.titleLayout)
# self.setTitleBarWidget(self.titleWidget)
self.setWindowTitle('Workspace')
# make it possible to ignore updates during updating of workspace list
self.updatingWorkspaceList = False
# self.connect(self.workspace_list,
# QtCore.SIGNAL("currentIndexChanged(QString)"),
# self.workspace_changed)
layout = QtGui.QVBoxLayout()
layout.setMargin(0)
layout.setSpacing(5)
# self.search_box = QSearchBox(True, False, self)
# layout.addWidget(self.search_box)
self.collection = Collection.getInstance()
self.open_list = QVistrailList()
self.open_list.collection = self.collection
layout.addWidget(self.open_list)
# layout.addWidget(self.titleWidget)
# self.browser = QWorkspaceWidget(self.collection)
# layout.addWidget(self.browser)
# self.browser.setup_widget('Default')
# self.connect(self.search_box, QtCore.SIGNAL('resetSearch()'),
# self.reset_search)
# self.connect(self.search_box, QtCore.SIGNAL('executeSearch(QString)'),
# self.execute_search)
# self.connect(self.search_box, QtCore.SIGNAL('refineMode(bool)'),
# self.refine_mode)
# self.connect(self.browser, QtCore.SIGNAL('workspaceListUpdated()'),
# self.update_workspace_list)
self.setLayout(layout)
# self.update_workspace_list()
self.addButtonsToToolbar()
def addButtonsToToolbar(self):
# button for toggling executions
self.execAction = QtGui.QAction(CurrentTheme.EXECUTE_PIPELINE_ICON,
"Show/hide workflow executions",
None,
triggered=self.showWorkflowExecutions)
self.execAction.setCheckable(True)
self.toolWindow().toolbar.insertAction(self.toolWindow().pinAction,
self.execAction)
# buttons for toggling list/tree views of workflows
self.listAction = QtGui.QAction(CurrentTheme.LIST_VIEW_ICON,
"View workflows in a list",
None,
triggered=self.viewAsList)
self.listAction.setCheckable(True)
self.listAction.setChecked(True)
self.treeAction = QtGui.QAction(CurrentTheme.TREE_VIEW_ICON,
"View workflows in a tree",
None,
triggered=self.viewAsTree)
self.treeAction.setCheckable(True)
self.workflowDisplayGroup = QtGui.QActionGroup(self)
self.workflowDisplayGroup.setExclusive(True)
self.workflowDisplayGroup.addAction(self.listAction)
self.workflowDisplayGroup.addAction(self.treeAction)
self.toolWindow().toolbar.insertAction(self.toolWindow().pinAction,
self.listAction)
self.toolWindow().toolbar.insertAction(self.toolWindow().pinAction,
self.treeAction)
# buttons for going to the search view to search all vistrails
self.searchAction = QtGui.QAction("Search", self.toolWindow().toolbar,
triggered=self.gotoSearch)
self.searchAction.searchMode = False
self.toolWindow().toolbar.insertAction(self.toolWindow().pinAction,
self.searchAction)
def state_changed(self, view):
self.open_list.state_changed(view)
def gotoSearch(self):
if self.searchAction.searchMode:
self.open_list.hide_search_results()
self.searchAction.searchMode = False
self.open_list.searchMode = False
self.searchAction.setText("Search")
from gui.vistrails_window import _app
_app.notify('query_changed', None)
else:
from gui.vistrails_window import _app
_app.qactions['search'].trigger()
def updateSearchResults(self, search=None, result_list=None):
if search is None:
self.gotoSearch()
elif not self.searchAction.searchMode:
self.open_list.show_search_results()
self.searchAction.searchMode = True
self.open_list.searchMode = True
self.searchAction.setText("Clear Search")
self.open_list.update_search_results(search, result_list)
def execution_updated(self):
self.open_list.execution_updated()
def showWorkflowExecutions(self, state):
""" toggle show executions on/off """
self.open_list.hideExecutions(not state)
def viewAsList(self):
""" Order workflow items as a flat list """
self.open_list.isTreeView = False
for i in xrange(self.open_list.openFilesItem.childCount()):
item = self.open_list.openFilesItem.child(i)
self.open_list.make_list(item)
def viewAsTree(self):
""" Order workflow items as a history tree """
self.open_list.isTreeView = True
for i in xrange(self.open_list.openFilesItem.childCount()):
item = self.open_list.openFilesItem.child(i)
self.open_list.make_tree(item)
def update_workspace_list(self):
""" Updates workspace list and highlights currentWorkspace
Keeps 'Recent files' on top
"""
self.updatingWorkspaceList = True
self.workspace_list.clear()
self.workspace_list.addItem('Default')
if 'Default' == self.browser.collection.currentWorkspace:
self.workspace_list.setCurrentIndex(self.workspace_list.count()-1)
locations = self.browser.collection.workspaces.keys()
workspaces = [ l for l in locations \
if not l.startswith('file') and \
not l.startswith('db') and \
not l == 'Default']
workspaces.sort()
for w in workspaces:
self.workspace_list.addItem(w)
if w == self.browser.collection.currentWorkspace:
self.workspace_list.setCurrentIndex(self.workspace_list.count()-1)
self.updatingWorkspaceList = False
def workspace_changed(self, workspace):
if not self.updatingWorkspaceList:
self.browser.setup_widget(str(workspace))
def reset_search(self):
self.browser.reset_search()
def set_results(self, results):
pass
def execute_search(self, text):
s = str(text)
try:
search = SearchCompiler(s).searchStmt
except SearchParseError, e:
debug.warning("Search Parse Error", str(e))
search = None
self.browser.run_search(search)
def refine_mode(self, on):
pass
def change_vt_window(self, vistrail_window):
self.open_list.change_vt_window(vistrail_window)
def add_vt_window(self, vistrail_window):
self.open_list.add_vt_window(vistrail_window)
def remove_vt_window(self, vistrail_window):
self.open_list.remove_vt_window(vistrail_window)
class QExplorerDialog(QToolWindow, QToolWindowInterface):
def __init__(self, parent=None):
QToolWindow.__init__(self, parent=parent)
self.widget = QtGui.QWidget()
self.setWidget(self.widget)
self.workspace_list = QtGui.QComboBox()
self.setTitleBarWidget(self.workspace_list)
# make it possible to ignore updates during updating of workspace list
self.updatingWorkspaceList = False
self.connect(self.workspace_list,
QtCore.SIGNAL("currentIndexChanged(QString)"),
self.workspace_changed)
layout = QtGui.QVBoxLayout()
# layout.setMargin(0)
# layout.setSpacing(5)
self.search_box = QSearchBox(True, False, self)
layout.addWidget(self.search_box)
self.collection = Collection.getInstance()
self.browser = QExplorerWidget(self.collection, self)
layout.addWidget(self.browser)
self.browser.setup_widget('Recent files')
self.connect(self.search_box, QtCore.SIGNAL('resetSearch()'),
self.reset_search)
self.connect(self.search_box, QtCore.SIGNAL('executeSearch(QString)'),
self.execute_search)
self.connect(self.search_box, QtCore.SIGNAL('refineMode(bool)'),
self.refine_mode)
self.connect(self.browser, QtCore.SIGNAL('workspaceListUpdated()'),
self.update_workspace_list)
self.widget.setLayout(layout)
self.update_workspace_list()
def update_workspace_list(self):
""" Updates workspace list and highlights currentWorkspace
Keeps 'Default' on top
"""
self.updatingWorkspaceList = True
self.workspace_list.clear()
self.workspace_list.addItem('Default')
if 'Default' == self.browser.collection.currentWorkspace:
self.workspace_list.setCurrentIndex(self.workspace_list.count()-1)
sorted_workspaces = self.browser.collection.workspaces.keys()
if 'Default' in sorted_workspaces:
sorted_workspaces.remove('Default')
sorted_workspaces.sort()
for p in sorted_workspaces:
self.workspace_list.addItem(p)
if p == self.browser.collection.currentWorkspace:
self.workspace_list.setCurrentIndex(self.workspace_list.count()-1)
self.updatingWorkspaceList = False
def workspace_changed(self, workspace):
if not self.updatingWorkspaceList:
self.browser.setup_widget(str(workspace))
def reset_search(self):
self.browser.reset_search()
def set_results(self, results):
pass
def execute_search(self, text):
s = str(text)
try:
search = SearchCompiler(s).searchStmt
except SearchParseError, e:
debug.warning("Search Parse Error", str(e))
search = None
self.browser.run_search(search)
def refine_mode(self, on):
pass
class QVistrailEntityItem(QBrowserWidgetItem):
def __init__(self, entity, window=None):
QBrowserWidgetItem.__init__(self, entity)
if window:
self.window = window
self.entity = entity
if not entity:
self.setText(0, self.window.get_name())
# make them draggable
self.setFlags(self.flags() | QtCore.Qt.ItemIsDragEnabled
| QtCore.Qt.ItemIsDropEnabled
# | QtCore.Qt.ItemIsSelectable
)
def open_in_new_window(self):
if hasattr(self, "window"):
self.treeWidget().setSelected(self.window)
self.treeWidget().parent().emit(QtCore.SIGNAL("detachVistrail"),
self.window)
def open_workflow(self):
self.treeWidget().item_selected(self, 0)
def open_workflow_in_new_tab(self):
self.parent().parent().window.add_pipeline_view()
self.open_workflow()
def open_workflow_in_new_window(self):
self.open_workflow_in_new_tab()
self.parent().parent().window.detach_view(
self.parent().parent().window.tabs.currentIndex())
def open_mashup(self):
self.treeWidget().open_mashup(self.entity)
def edit_mashup(self):
self.treeWidget().edit_mashup(self.entity)
class QVistrailListLatestItem(QtGui.QTreeWidgetItem):
def __init__(self):
QtGui.QTreeWidgetItem.__init__(self)
self.setIcon(0, CurrentTheme.PIPELINE_ICON)
self.setText(0, '(latest)')
def open_workflow(self):
self.treeWidget().item_selected(self, 0)
def open_workflow_in_new_tab(self):
self.parent().parent().window.add_pipeline_view()
self.open_workflow()
def open_workflow_in_new_window(self):
self.open_workflow_in_new_tab()
self.parent().parent().window.detach_view(
self.parent().parent().window.tabs.currentIndex())
class QVistrailList(QtGui.QTreeWidget):
def __init__(self, parent=None):
QtGui.QTreeWidget.__init__(self, parent)
self.searchMode = False
self.search = None
self.setColumnCount(1)
self.setHeaderHidden(True)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
self.setExpandsOnDoubleClick(False)
self.setRootIsDecorated(False)
self.isTreeView = False
self.executionsHidden = True
self.collection = Collection.getInstance()
self.items = {}
self.delegate = QModuleTreeWidgetItemDelegate(self, self)
self.setItemDelegate(self.delegate)
self.openFilesItem = QtGui.QTreeWidgetItem(['Current Vistrails'])
self.addTopLevelItem(self.openFilesItem)
self.setup_closed_files()
self.openFilesItem.setExpanded(True)
self.closedFilesItem.setExpanded(True)
self.setSortingEnabled(True)
self.sortItems(0, QtCore.Qt.AscendingOrder)
self.connect(self,
QtCore.SIGNAL("currentItemChanged(QTreeWidgetItem*,"
"QTreeWidgetItem*)"),
self.item_changed)
self.connect(self,
QtCore.SIGNAL('itemDoubleClicked(QTreeWidgetItem *, int)'),
self.item_selected)
self.setIconSize(QtCore.QSize(16,16))
self.connect(self,
QtCore.SIGNAL('itemPressed(QTreeWidgetItem *,int)'),
self.onItemPressed)
self.updateHideExecutions()
def setup_closed_files(self):
self.closedFilesItem = QtGui.QTreeWidgetItem(['My Vistrails'])
self.addTopLevelItem(self.closedFilesItem)
closed_entities = self.collection.workspaces['Default']
for entity in closed_entities:
if entity.url.startswith('file://'):
if not entity.locator().is_valid():
self.collection.del_from_workspace(entity)
self.collection.delete_entity(entity)
continue
self.closedFilesItem.addChild(QVistrailEntityItem(entity))
def show_search_results(self):
self.searchResultsItem = QtGui.QTreeWidgetItem(['Search Results'])
self.addTopLevelItem(self.searchResultsItem)
self.openFilesItem.setHidden(True)
self.closedFilesItem.setHidden(True)
def hide_search_results(self):
self.takeTopLevelItem(self.indexOfTopLevelItem(self.searchResultsItem))
self.openFilesItem.setHidden(False)
self.closedFilesItem.setHidden(False)
def update_search_results(self, search=None, result_list=None):
self.search = search
self.searchResultsItem.takeChildren()
if result_list is not None:
for entity in result_list:
item = QVistrailEntityItem(entity)
self.searchResultsItem.addChild(item)
item.setExpanded(True)
self.searchResultsItem.setExpanded(True)
def onItemPressed(self, item, column):
""" onItemPressed(item: QTreeWidgetItem, column: int) -> None
Expand/Collapse top-level item when the mouse is pressed
"""
if item and item.parent() == None:
self.setItemExpanded(item, not self.isItemExpanded(item))
def search_result_selected(self, view, version):
# need to signal the query view to change its version and vistrail
from gui.vistrails_window import _app
_app.change_view(view)
view.query_version_selected(self.search, version)
def item_selected(self, widget_item, column):
""" opens or displays the selected item if possible """
locator = None
entity = None
if hasattr(widget_item, 'entity') and widget_item.entity is not None:
entity = widget_item.entity
locator = entity.locator()
elif type(widget_item) == QVistrailListLatestItem and \
hasattr(widget_item.parent().parent(), 'entity') and \
widget_item.parent().parent().entity is not None:
entity = widget_item.parent().parent().entity
locator = entity.locator()
elif not type(widget_item) == QVistrailListLatestItem:
# no valid item selected
return
from gui.vistrails_window import _app
open_vistrail = _app.open_vistrail_without_prompt
set_current_locator = _app.set_current_locator
if not locator:
# assuming an unsaved vistrail - need to use view
vistrail_widget = widget_item
view = None
while view is None and vistrail_widget is not None:
if hasattr(vistrail_widget, 'window'):
view = vistrail_widget.window
break
elif (hasattr(vistrail_widget, 'entity') and
hasattr(vistrail_widget.entity, '_window')):
view = vistrail_widget.entity._window
break
vistrail_widget = vistrail_widget.parent()
if vistrail_widget == widget_item:
# do nothing - view is already selected
return
is_execution = False
version = None
if type(widget_item) == QVistrailListLatestItem:
version = view.controller.vistrail.get_latest_version()
elif hasattr(widget_item, 'entity'):
if hasattr(widget_item, 'executionList'):
version = widget_item.entity.name
else:
is_execution = True
version = widget_item.parent().entity.name
if not version:
# assume execution
version = str(widget_item.parent().text(0))
if type(version) == str:
try:
version = \
view.controller.vistrail.get_version_number(version)
except:
version = None
if self.searchMode:
self.search_result_selected(view, version)
else:
# _app.view_changed(view)
_app.change_view(view)
if version:
view.version_selected(version, True, double_click=True)
if is_execution:
_app.qactions['provenance'].trigger()
workflow_exec = widget_item.entity.name
view.log_view.set_exec_by_id(workflow_exec) or \
view.log_view.set_exec_by_date(workflow_exec)
return
args = {}
args['version'] = locator.kwargs.get('version_node', None) or \
locator.kwargs.get('version_tag', None)
vistrail_widget = widget_item
vistrail_entity = entity
version = None
if args['version']:
vistrail_widget = widget_item.parent()
vistrail_entity = entity.parent
locator = vistrail_entity.locator()
version = args['version']
workflow_exec = locator.kwargs.get('workflow_exec', None)
if workflow_exec:
args['workflow_exec'] = workflow_exec
vistrail_widget = widget_item.parent().parent()
vistrail_entity = entity.parent.parent
locator = vistrail_entity.locator()
locator.update_from_gui(self)
# set vistrail name
#locator._name = widget_item.entity.parent.parent.name
if type(widget_item) == QVistrailListLatestItem:
# find the latest item (max action id)
vistrail = widget_item.parent().parent().window.controller.vistrail
args['version'] = vistrail.get_latest_version()
version = vistrail.get_latest_version()
locator.update_from_gui(self)
if not locator.is_valid():
debug.critical("File not found: '%s'. Entry will be deleted." % locator.to_url())
vistrail_widget.parent().removeChild(vistrail_widget)
self.collection.delete_entity(vistrail_entity)
self.collection.commit()
view = _app.ensureVistrail(locator)
if self.searchMode:
self.search_result_selected(view, version)
else:
if view:
self.ensureNotDiffView()
open_vistrail(locator, **args)
if view is None or not view.is_abstraction:
set_current_locator(locator)
if view and isinstance(entity, MashupEntity):
# I am assuming that double-clicking a mashup, the user wants to
# run the mashup
# if it is doubele-clicked without the vistrail being open we
#should open the vistrail
self.open_mashup(entity)
def ensureNotDiffView(self):
""" If current tab is a diff, create a new tab """
from gui.vistrails_window import _app
view = _app.get_current_view()
tab = view.get_current_tab()
if type(tab) == QDiffView:
view.add_pipeline_view()
def open_mashup(self, entity):
"""open_mashup(entity:MashupEntity) -> None
It will ask the Vistrail view to execute the mashup
"""
self.ensureNotDiffView()
from gui.vistrails_window import _app
view = _app.get_current_view()
view.open_mashup(entity.mashup)
def edit_mashup(self, entity):
"""open_mashup(entity:MashupEntity) -> None
It will ask the Vistrail view to execute the mashup
"""
from gui.vistrails_window import _app
view = _app.get_current_view()
view.edit_mashup(entity.mashup)
def mimeData(self, itemList):
""" mimeData(itemList) -> None
Setup the mime data to contain itemList because Qt 4.2.2
implementation doesn't instantiate QTreeWidgetMimeData
anywhere as it's supposed to. It must have been a bug...
"""
data = QtGui.QTreeWidget.mimeData(self, itemList)
data.items = itemList
return data
def dropEvent( self, event):
event.accept()
destination = self.itemAt(event.pos())
if not destination:
return
if type(event.source())==QVistrailList:
data = event.mimeData()
if hasattr(data, 'items'):
assert len(data.items) == 1
source = data.items[0]
if not source or source == destination:
return
if hasattr(source, 'window') and hasattr(destination, 'window'):
# both are vistrails
self.merge_vistrails(source, destination)
elif (type(source) == QVistrailListLatestItem or
hasattr(source, 'executionList')) and \
(type(destination) == QVistrailListLatestItem or
hasattr(destination, 'executionList')):
# workflows can be from diff vistrails
self.visual_diff(source, destination)
def merge_vistrails(self, source, destination):
if source.window.controller.changed or destination.window.controller.changed:
text = ('Both Vistrails need to be saved before they can be merged.')
QtGui.QMessageBox.information(None, 'Cannot perform merge',
text, '&OK')
return
res = QtGui.QMessageBox.question(None, 'Merge the histories of these 2 vistrails into a new vistrail?',
source.window.get_name() + '\n' + destination.window.get_name(),
buttons=QtGui.QMessageBox.Yes,
defaultButton=QtGui.QMessageBox.No)
if res == QtGui.QMessageBox.Yes:
from gui.vistrails_window import _app
_app.merge_vistrails(destination.window.controller, source.window.controller)
def visual_diff(self, source, destination):
source_parent = source.parent()
while not hasattr(source_parent, 'window'):
source_parent = source_parent.parent()
destination_parent = destination.parent()
while not hasattr(destination_parent, 'window'):
destination_parent = destination_parent.parent()
vistrail_1 = source_parent.window.controller.vistrail
vistrail_2 = destination_parent.window.controller.vistrail
if hasattr(source, 'entity'):
v1 = source.entity.locator().kwargs.get('version_node', None)
else:
v1 = vistrail_1.get_latest_version()
if hasattr(destination, 'entity'):
v2 = destination.entity.locator().kwargs.get('version_node', None)
else:
v2 = vistrail_2.get_latest_version()
# if we don't have the same vistrail, pass the second vistrail
if id(vistrail_1) == id(vistrail_2):
source_parent.window.diff_requested(v1, v2)
else:
source_parent.window.diff_requested(v1, v2, vistrail_2)
def hideExecutions(self, hidden):
self.executionsHidden = hidden
self.updateHideExecutions()
def updateHideExecutions(self):
for i in xrange(self.openFilesItem.childCount()):
vt = self.openFilesItem.child(i)
if not hasattr(vt, 'tag_to_item'):
continue
for item in vt.tag_to_item.itervalues():
if not hasattr(item, 'executionList'):
continue
for exec_item in item.executionList:
exec_item.setHidden(self.executionsHidden)
for i in xrange(self.closedFilesItem.childCount()):
vt = self.closedFilesItem.child(i)
if not hasattr(vt, 'tag_to_item'):
continue
for item in vt.tag_to_item.itervalues():
if not hasattr(item, 'executionList'):
continue
for exec_item in item.executionList:
exec_item.setHidden(self.executionsHidden)
def make_list(self, item):
""" construct a list from the tagged workflows in a loaded vistrail
"""
self.setSortingEnabled(False)
if not (hasattr(item, 'tag_to_item') or hasattr(item, 'mshp_to_item')):
return
for tag, wf in item.tag_to_item.iteritems():
index = wf.parent().indexOfChild(wf)
wf = wf.parent().takeChild(index)
item.workflowsItem.addChild(wf)
for tag, mshp in item.mshp_to_item.iteritems():
index = mshp.parent().indexOfChild(mshp)
mshp = mshp.parent().takeChild(index)
item.mashupsItem.addChild(mshp)
self.updateHideExecutions()
self.setSortingEnabled(True)
def make_tree(self, item):
""" construct a tree from the tagged workflows in a loaded vistrail
"""
self.setSortingEnabled(False)
if not hasattr(item, 'window'):
return
am = item.window.controller.vistrail.actionMap
tm = item.window.controller.vistrail.get_tagMap()
vm = dict((v,k) for k, v in tm.iteritems())
# loop through tagged workflows and add to parent workflow
if not hasattr(item, 'tag_to_item'):
return
for tag, wf in item.tag_to_item.iteritems():
if tag not in vm:
continue
# find parent
version = vm[tag]
action = am[version]
while action.parent in am:
action = am[action.parent]
if action.timestep in tm:
break
if action.timestep not in tm or action.timestep == version:
continue
parent_tag = tm[action.timestep]
if parent_tag in item.tag_to_item:
parent_wf = item.tag_to_item[parent_tag]
index = wf.parent().indexOfChild(wf)
wf = wf.parent().takeChild(index)
parent_wf.addChild(wf)
self.updateHideExecutions()
self.setSortingEnabled(True)
def state_changed(self, view):
""" update tags and mashups """
item = self.items[id(view)]
entity = item.entity
(new_entity, was_updated) = \
entity.update_vistrail(view.controller.vistrail)
if new_entity:
Collection.getInstance().create_vistrail_entity(
view.controller.vistrail)
self.add_vt_window(view)
return
elif was_updated:
item.setText(0, entity.name)
(added_wfs, deleted_wfs) = entity.update_workflows()
(more_added_wfs, added_wf_execs) = entity.update_log()
(added_mshps, deleted_mshps) = entity.update_mashups()
for wf_entity in deleted_wfs:
assert(wf_entity.name in item.tag_to_item)
child = item.tag_to_item[wf_entity.name]
child_idx = child.parent().indexOfChild(child)
child.parent().takeChild(child_idx)
del item.tag_to_item[wf_entity.name]
for wf_entity in chain(added_wfs, more_added_wfs):
# this is from the original code...
if not wf_entity.name.startswith('Version #'):
childItem = QWorkflowEntityItem(wf_entity)
item.workflowsItem.addChild(childItem)
# keep list of tagged workflows
item.tag_to_item[wf_entity.name] = childItem
for wf_exec_entity in added_wf_execs:
parent_version = wf_exec_entity.workflow_exec.parent_version
wf_entity = entity.wf_entity_map[parent_version]
if not wf_entity.name.startswith('Version #'):
assert(wf_entity.name in item.tag_to_item)
wf_item = item.tag_to_item[wf_entity.name]
child = QWorkflowExecEntityItem(wf_exec_entity)
wf_item.addChild(child)
wf_item.executionList.append(child)
self.updateHideExecutions()
for mshp_entity in deleted_mshps:
assert(mshp_entity.name in item.mshp_to_item)
child = item.mshp_to_item[mshp_entity.name]
child_idx = child.parent().indexOfChild(child)
child.parent().takeChild(child_idx)
del item.mshp_to_item[mshp_entity.name]
for mshp_entity in added_mshps:
if not mshp_entity.name.startswith('Version #'):
childItem = QMashupEntityItem(mshp_entity)
item.mashupsItem.addChild(childItem)
# keep list of tagged workflows
item.mshp_to_item[mshp_entity.name] = childItem
self.make_tree(item) if self.isTreeView else self.make_list(item)
def execution_updated(self):
""" Add new executions to workflow """
# get view and item
from gui.vistrails_window import _app
view = _app.get_current_view()
if id(view) not in self.items:
return
item = self.items[id(view)]
entity = item.entity
entity.set_vistrail(view.controller.vistrail)
(added_wfs, added_wf_execs) = entity.update_log()
for wf_entity in added_wfs:
if not wf_entity.name.startswith('Version #'):
childItem = QWorkflowEntityItem(wf_entity)
item.workflowsItem.addChild(childItem)
# keep list of tagged workflows
item.tag_to_item[wf_entity.name] = childItem
for wf_exec_entity in added_wf_execs:
parent_version = wf_exec_entity.workflow_exec.parent_version
wf_entity = entity.wf_entity_map[parent_version]
if not wf_entity.name.startswith('Version #'):
assert(wf_entity.name in item.tag_to_item)
wf_item = item.tag_to_item[wf_entity.name]
child = QWorkflowExecEntityItem(wf_exec_entity)
wf_item.addChild(child)
wf_item.executionList.append(child)
self.updateHideExecutions()
def add_vt_window(self, vistrail_window):
locator = vistrail_window.controller.locator
entity = None
entity_was_none = False
item_reused = False
if locator:
entity = self.collection.fromUrl(locator.to_url())
if entity is None:
entity = VistrailEntity(vistrail_window.controller.vistrail)
entity_was_none = True
# remove item from recent list
for i in xrange(self.closedFilesItem.childCount()):
recent = self.closedFilesItem.child(i)
if entity and recent and recent.entity and \
recent.entity.url == entity.url:
self.setSelected(None)
index = self.closedFilesItem.indexOfChild(recent)
item = self.closedFilesItem.takeChild(index)
item = QVistrailEntityItem(entity, vistrail_window)
item.current_item = QVistrailListLatestItem()
item.workflowsItem.addChild(item.current_item)
if id(vistrail_window) in self.items:
# window already exists
old_item = self.items[id(vistrail_window)]
url = None
if old_item.entity is not None:
url = old_item.entity.url
# if there was a change in the locator, we need to remove the old
# item and put in the closed vistrails area
if url != vistrail_window.controller.locator.to_url():
self.remove_vt_window(vistrail_window)
else:
# we will reuse the item
if hasattr(item, 'entity'):
old_item.entity = item.entity
old_item.window = item.window
old_item.current_item = item.current_item
old_item.workflowsItem = item.workflowsItem
old_item.mashupsItem = item.mashupsItem
old_item.tag_to_item = item.tag_to_item
old_item.mshp_to_item = item.mshp_to_item
old_item.setText(0, item.text(0))
while old_item.childCount():
child = old_item.child(0)
index = old_item.indexOfChild(child)
old_item.takeChild(index)
while item.childCount():
child = item.child(0)
index = item.indexOfChild(child)
child = item.takeChild(index)
old_item.addChild(child)
item = old_item
item_reused = True
if not item_reused:
self.items[id(vistrail_window)] = item
if entity_was_none:
# why is this all the way down here?!?
# moving the create stmt up much earlier so it is set
# on the item!
# entity = VistrailEntity(vistrail_window.controller.vistrail)
self.collection.add_temp_entity(entity)
entity.is_open = True
entity._window = vistrail_window
self.openFilesItem.addChild(item)
self.make_tree(item) if self.isTreeView else self.make_list(item)
item.workflowsItem.setExpanded(True)
item.mashupsItem.setExpanded(True)
self.setSelected(vistrail_window)
self.updateHideExecutions()
def remove_vt_window(self, vistrail_window):
if id(vistrail_window) not in self.items:
return
self.setSelected(None)
item = self.items[id(vistrail_window)]
del self.items[id(vistrail_window)]
delattr(item, 'window')
index = self.openFilesItem.indexOfChild(item)
item = self.openFilesItem.takeChild(index)
url = None
if item.entity is not None:
item.entity.is_open = False
item.entity._window = None
url = item.entity.url
item.current_item.parent().removeChild(item.current_item)
# entity may have changed
entity = None
if url is None:
locator = vistrail_window.controller.locator
if locator:
entity = self.collection.fromUrl(locator.to_url())
else:
entity = self.collection.fromUrl(url)
if entity and not self.collection.is_temp_entity(entity) and \
not vistrail_window.is_abstraction:
item = QVistrailEntityItem(entity)
self.make_tree(item) if self.isTreeView else self.make_list(item)
self.closedFilesItem.addChild(item)
item.setText(0, entity.name)
self.updateHideExecutions()
def change_vt_window(self, vistrail_window):
self.setSelected(vistrail_window)
def setSelected(self, view):
for item in self.selectedItems():
item.setSelected(False)
def setBold(parent_item):
for i in xrange(parent_item.childCount()):
item = parent_item.child(i)
font = item.font(0)
window = item.window if hasattr(item, 'window') else None
font.setBold(view == window if window and view else False)
item.setFont(0, font)
if window:
item.setText(0, window.get_name())
# item.setSelected(view == item.window
# if window and view else False)
if not self.openFilesItem.isHidden():
setBold(self.openFilesItem)
elif self.searchMode:
setBold(self.searchResultsItem)
def item_changed(self, item, prev_item):
if not item:
return
vistrail = item
while not hasattr(vistrail, 'window'):
if not vistrail or not vistrail.parent:
# parent node
return
vistrail = vistrail.parent()
#print "*** item clicked", id(vistrail.window)
self.setSelected(vistrail.window)
self.parent().emit(QtCore.SIGNAL("vistrailChanged(PyQt_PyObject)"),
vistrail.window)
def keyPressEvent(self, event):
if event.key() in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace]:
items = self.selectedItems()
if len(items) == 1:
item = items[0]
if item.parent() == self.openFilesItem:
# close current vistrail
from gui.vistrails_window import _app
if hasattr(item, 'window'):
_app.close_vistrail(item.window)
else:
_app.close_vistrail()
elif item.parent() == self.closedFilesItem:
# remove from closed list
self.closedFilesItem.removeChild(item)
self.collection.del_from_workspace(item.entity)
self.collection.delete_entity(item.entity)
self.collection.commit()
else:
QtGui.QTreeWidget.keyPressEvent(self, event)
def contextMenuEvent(self, event):
item = self.itemAt(event.pos())
if item and self.openFilesItem.indexOfChild(item) != -1:
# item is vistrail
menu = QtGui.QMenu(self)
act = QtGui.QAction("Open in New Window", self,
triggered=item.open_in_new_window)
act.setStatusTip("Open specified vistrail file in another window")
menu.addAction(act)
menu.exec_(event.globalPos())
elif item and (isinstance(item, QVistrailEntityItem) or
isinstance(item, QVistrailListLatestItem)):
vtparent = item.parent().parent()
if (self.openFilesItem.indexOfChild(vtparent) != -1 and
isinstance(item.parent(),QWorkflowsItem)):
# item is workflow
menu = QtGui.QMenu(self)
act = QtGui.QAction("Open", self,
triggered=item.open_workflow)
act.setStatusTip("Open specified workflow in this window")
menu.addAction(act)
act = QtGui.QAction("Open in new Tab", self,
triggered=item.open_workflow_in_new_tab)
act.setStatusTip("Open specified workflow in a new tab")
menu.addAction(act)
act = QtGui.QAction("Open in new Window", self,
triggered=item.open_workflow_in_new_window)
act.setStatusTip("Open specified workflow in a new window")
menu.addAction(act)
menu.exec_(event.globalPos())
elif (self.openFilesItem.indexOfChild(vtparent) != -1 and
isinstance(item.parent(),QMashupsItem)):
# item is mashup
menu = QtGui.QMenu(self)
act = QtGui.QAction("Edit", self,
triggered=item.edit_mashup)
act.setStatusTip("Edit the mashup")
menu.addAction(act)
act = QtGui.QAction("Execute", self,
triggered=item.open_mashup)
act.setStatusTip("Execute the mashup")
menu.addAction(act)
menu.exec_(event.globalPos())
if __name__ == '__main__':
import sys
sys.path.append('/vistrails/src/query/vistrails')
from core.collection import Collection
# vt_1 = load_vistrail(ZIPFileLocator('/vistrails/examples/spx.vt'))[0]
# vt_2 = load_vistrail(DBLocator('vistrails.sci.utah.edu', 3306,
# 'vistrails', 'vistrails', '8edLj4',
# obj_id=9, obj_type='vistrail'))[0]
c = Collection('test.db')
# c.clear()
# e_1 = c.create_vistrail_entity(vt_1)
# e_2 = c.create_vistrail_entity(vt_2)
c.entities = {}
c.load_entities()
app = QtGui.QApplication(sys.argv)
widget = QBrowserWidget(c)
widget.setup_widget('Recent items')
widget.show()
sys.exit(app.exec_())
|
|
# swift_build_support/toolchain.py ------------------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
"""
Represent toolchain - the versioned executables.
"""
# ----------------------------------------------------------------------------
from __future__ import absolute_import
import platform
from build_swift.build_swift import cache_utils
from build_swift.build_swift.shell import which
from build_swift.build_swift.wrappers import xcrun
from . import shell
__all__ = [
'host_toolchain',
]
class Toolchain(object):
"""Represents native host toolchain
"""
def find_tool(self, *names):
raise NotImplementedError('Subclasses must implement this method')
# Declare properties for each tools.
# These properties are loaded lazily and assignable.
def _register(name, *tool):
def _getter(self):
return self.find_tool(*tool)
_getter.__name__ = name
setattr(Toolchain, name, cache_utils.reify(_getter))
if platform.system() == 'Windows':
_register("cc", "clang-cl")
_register("cxx", "clang-cl")
else:
_register("cc", "clang")
_register("cxx", "clang++")
_register("ninja", "ninja", "ninja-build")
_register("cmake", "cmake")
_register("distcc", "distcc")
_register("distcc_pump", "distcc-pump", "pump")
_register("llvm_profdata", "llvm-profdata")
_register("llvm_cov", "llvm-cov")
_register("lipo", "lipo")
_register("libtool", "libtool")
_register("swiftc", "swiftc")
class Darwin(Toolchain):
def __init__(self, sdk, toolchain):
super(Darwin, self).__init__()
self.xcrun_sdk = sdk
self.xcrun_toolchain = toolchain
def find_tool(self, *names):
for name in names:
# NOTE: xcrun searches from developer tools directory *and* from
# PATH. Relatively slow, but we don't need `which` for
# Darwin.
found = xcrun.find(name,
sdk=self.xcrun_sdk,
toolchain=self.xcrun_toolchain)
if found is not None:
return found
return None
class GenericUnix(Toolchain):
def __init__(self, suffixes):
super(GenericUnix, self).__init__()
# On these platforms, search 'clang', 'clang++' unconditionally.
# To determine the llvm_suffix.
ret = self.find_clang(['clang', 'clang++'], suffixes)
if ret is None:
self.cc = None
self.cxx = None
# We don't have clang, then we don't have any llvm tools.
self.llvm_suffixes = []
else:
found, suffix = ret
self.cc, self.cxx = found
if suffix == '':
# Some platform may have `clang`, `clang++`, `llvm-cov-3.6`
# but not `llvm-cov`. In that case, we assume `clang` is
# corresponding to the best version of llvm tools found.
self.llvm_suffixes = suffixes
else:
# Otherwise, we must have llvm tools with the same suffix as
# `clang` or `clang++`
self.llvm_suffixes = [suffix]
def find_clang(self, tools, suffixes):
for suffix in suffixes:
ret = [which(t + suffix) for t in tools]
if all(t is not None for t in ret):
return (ret, suffix)
return None
def find_llvm_tool(self, tool):
for suffix in self.llvm_suffixes:
found = which(tool + suffix)
if found is not None:
# If we found the tool with the suffix, lock suffixes to it.
self.llvm_suffix = [suffix]
return found
return None
def find_tool(self, *names):
for name in names:
if name.startswith('llvm-'):
found = self.find_llvm_tool(name)
else:
found = which(name)
if found is not None:
return found
return None
class MacOSX(Darwin):
def __init__(self, toolchain='default'):
super(MacOSX, self).__init__(sdk='macosx', toolchain=toolchain)
class Linux(GenericUnix):
def __init__(self):
super(Linux, self).__init__(['', '-3.8', '-3.7', '-3.6', '-3.5'])
class FreeBSD(GenericUnix):
def __init__(self):
# For testing toolchain initializer on non-FreeBSD systems
sys = platform.system()
if sys != 'FreeBSD':
suffixes = ['']
# See: https://github.com/apple/swift/pull/169
# Building Swift from source requires a recent version of the Clang
# compiler with C++14 support.
elif self._release_date and self._release_date >= 1100000:
suffixes = ['']
else:
suffixes = ['38', '37', '36', '35']
super(FreeBSD, self).__init__(suffixes)
@cache_utils.reify
def _release_date(self):
"""Return the release date for FreeBSD operating system on this host.
If the release date cannot be ascertained, return None.
"""
# For details on `sysctl`, see:
# http://www.freebsd.org/cgi/man.cgi?sysctl(8)
out = shell.capture(['sysctl', '-n', 'kern.osreldate'],
dry_run=False, echo=False, optional=True)
if out is None:
return None
return int(out)
class OpenBSD(GenericUnix):
def __init__(self):
super(OpenBSD, self).__init__([''])
class Cygwin(Linux):
# Currently, Cygwin is considered as the same as Linux.
pass
class Windows(Toolchain):
def find_tool(self, *names):
for name in names:
found = which(name)
if found is not None:
return found
return None
class Haiku(GenericUnix):
def __init__(self):
super(Haiku, self)
def host_toolchain(**kwargs):
sys = platform.system()
if sys == 'Darwin':
return MacOSX(kwargs.pop('xcrun_toolchain', 'default'))
elif sys == 'Linux':
return Linux()
elif sys == 'FreeBSD':
return FreeBSD()
elif sys == 'OpenBSD':
return OpenBSD()
elif sys.startswith('CYGWIN'):
return Cygwin()
elif sys == 'Windows':
return Windows()
elif sys == 'Haiku':
return Haiku()
else:
raise NotImplementedError('The platform "%s" does not have a defined '
'toolchain.' % sys)
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells Messaging module
"""
import contextlib
import mock
import mox
from oslo.config import cfg
from oslo import messaging as oslo_messaging
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova.objects import base as objects_base
from nova.objects import fields as objects_fields
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import rpc
from nova import test
from nova.tests.cells import fakes
from nova.tests import fake_instance_actions
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
class CellsMessageClassesTestCase(test.TestCase):
"""Test case for the main Cells Message classes."""
def setUp(self):
super(CellsMessageClassesTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self.our_name = 'api-cell'
self.msg_runner = fakes.get_message_runner(self.our_name)
self.state_manager = self.msg_runner.state_manager
def test_reverse_path(self):
path = 'a!b!c!d'
expected = 'd!c!b!a'
rev_path = messaging._reverse_path(path)
self.assertEqual(rev_path, expected)
def test_response_cell_name_from_path(self):
# test array with tuples of inputs/expected outputs
test_paths = [('cell1', 'cell1'),
('cell1!cell2', 'cell2!cell1'),
('cell1!cell2!cell3', 'cell3!cell2!cell1')]
for test_input, expected_output in test_paths:
self.assertEqual(expected_output,
messaging._response_cell_name_from_path(test_input))
def test_response_cell_name_from_path_neighbor_only(self):
# test array with tuples of inputs/expected outputs
test_paths = [('cell1', 'cell1'),
('cell1!cell2', 'cell2!cell1'),
('cell1!cell2!cell3', 'cell3!cell2')]
for test_input, expected_output in test_paths:
self.assertEqual(expected_output,
messaging._response_cell_name_from_path(test_input,
neighbor_only=True))
def test_targeted_message(self):
self.flags(max_hop_count=99, group='cells')
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual(self.ctxt, tgt_message.ctxt)
self.assertEqual(method, tgt_message.method_name)
self.assertEqual(method_kwargs, tgt_message.method_kwargs)
self.assertEqual(direction, tgt_message.direction)
self.assertEqual(target_cell, target_cell)
self.assertFalse(tgt_message.fanout)
self.assertFalse(tgt_message.need_response)
self.assertEqual(self.our_name, tgt_message.routing_path)
self.assertEqual(1, tgt_message.hop_count)
self.assertEqual(99, tgt_message.max_hop_count)
self.assertFalse(tgt_message.is_broadcast)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
child_cell = self.state_manager.get_child_cell('child-cell2')
self.assertEqual(child_cell, next_hop)
def test_create_targeted_message_with_response(self):
self.flags(max_hop_count=99, group='cells')
our_name = 'child-cell1'
target_cell = 'child-cell1!api-cell'
msg_runner = fakes.get_message_runner(our_name)
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
tgt_message = messaging._TargetedMessage(msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
self.assertEqual(self.ctxt, tgt_message.ctxt)
self.assertEqual(method, tgt_message.method_name)
self.assertEqual(method_kwargs, tgt_message.method_kwargs)
self.assertEqual(direction, tgt_message.direction)
self.assertEqual(target_cell, target_cell)
self.assertFalse(tgt_message.fanout)
self.assertTrue(tgt_message.need_response)
self.assertEqual(our_name, tgt_message.routing_path)
self.assertEqual(1, tgt_message.hop_count)
self.assertEqual(99, tgt_message.max_hop_count)
self.assertFalse(tgt_message.is_broadcast)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
parent_cell = msg_runner.state_manager.get_parent_cell('api-cell')
self.assertEqual(parent_cell, next_hop)
def test_targeted_message_when_target_is_cell_state(self):
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
target_cell = self.state_manager.get_child_cell('child-cell2')
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual('api-cell!child-cell2', tgt_message.target_cell)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
self.assertEqual(target_cell, next_hop)
def test_targeted_message_when_target_cell_state_is_me(self):
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
target_cell = self.state_manager.get_my_state()
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
self.assertEqual('api-cell', tgt_message.target_cell)
# Correct next hop?
next_hop = tgt_message._get_next_hop()
self.assertEqual(target_cell, next_hop)
def test_create_broadcast_message(self):
self.flags(max_hop_count=99, group='cells')
self.flags(name='api-cell', max_hop_count=99, group='cells')
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction)
self.assertEqual(self.ctxt, bcast_message.ctxt)
self.assertEqual(method, bcast_message.method_name)
self.assertEqual(method_kwargs, bcast_message.method_kwargs)
self.assertEqual(direction, bcast_message.direction)
self.assertFalse(bcast_message.fanout)
self.assertFalse(bcast_message.need_response)
self.assertEqual(self.our_name, bcast_message.routing_path)
self.assertEqual(1, bcast_message.hop_count)
self.assertEqual(99, bcast_message.max_hop_count)
self.assertTrue(bcast_message.is_broadcast)
# Correct next hops?
next_hops = bcast_message._get_next_hops()
child_cells = self.state_manager.get_child_cells()
self.assertEqual(child_cells, next_hops)
def test_create_broadcast_message_with_response(self):
self.flags(max_hop_count=99, group='cells')
our_name = 'child-cell1'
msg_runner = fakes.get_message_runner(our_name)
method = 'fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
method, method_kwargs, direction, need_response=True)
self.assertEqual(self.ctxt, bcast_message.ctxt)
self.assertEqual(method, bcast_message.method_name)
self.assertEqual(method_kwargs, bcast_message.method_kwargs)
self.assertEqual(direction, bcast_message.direction)
self.assertFalse(bcast_message.fanout)
self.assertTrue(bcast_message.need_response)
self.assertEqual(our_name, bcast_message.routing_path)
self.assertEqual(1, bcast_message.hop_count)
self.assertEqual(99, bcast_message.max_hop_count)
self.assertTrue(bcast_message.is_broadcast)
# Correct next hops?
next_hops = bcast_message._get_next_hops()
parent_cells = msg_runner.state_manager.get_parent_cells()
self.assertEqual(parent_cells, next_hops)
def test_self_targeted_message(self):
target_cell = 'api-cell'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_child_targeted_message(self):
target_cell = 'api-cell!child-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_child_targeted_message_with_object(self):
target_cell = 'api-cell!child-cell1'
method = 'our_fake_method'
direction = 'down'
call_info = {}
class CellsMsgingTestObject(objects_base.NovaObject):
"""Test object. We just need 1 field in order to test
that this gets serialized properly.
"""
fields = {'test': objects_fields.StringField()}
test_obj = CellsMsgingTestObject()
test_obj.test = 'meow'
method_kwargs = dict(obj=test_obj, arg1=1, arg2=2)
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(target_cell, call_info['routing_path'])
self.assertEqual(3, len(call_info['kwargs']))
self.assertEqual(1, call_info['kwargs']['arg1'])
self.assertEqual(2, call_info['kwargs']['arg2'])
# Verify we get a new object with what we expect.
obj = call_info['kwargs']['obj']
self.assertIsInstance(obj, CellsMsgingTestObject)
self.assertNotEqual(id(test_obj), id(obj))
self.assertEqual(test_obj.test, obj.test)
def test_grandchild_targeted_message(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell)
tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
def test_grandchild_targeted_message_with_response(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
call_info = {}
def our_fake_method(message, **kwargs):
call_info['context'] = message.ctxt
call_info['routing_path'] = message.routing_path
call_info['kwargs'] = kwargs
return 'our_fake_response'
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertEqual(self.ctxt, call_info['context'])
self.assertEqual(method_kwargs, call_info['kwargs'])
self.assertEqual(target_cell, call_info['routing_path'])
self.assertFalse(response.failure)
self.assertEqual(response.value_or_raise(), 'our_fake_response')
def test_grandchild_targeted_message_with_error(self):
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('this should be returned')
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
def test_grandchild_targeted_message_max_hops(self):
self.flags(max_hop_count=2, group='cells')
target_cell = 'api-cell!child-cell2!grandchild-cell1'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('should not be reached')
fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
our_fake_method)
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellMaxHopCountReached,
response.value_or_raise)
def test_targeted_message_invalid_cell(self):
target_cell = 'api-cell!child-cell2!grandchild-cell4'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellRoutingInconsistency,
response.value_or_raise)
def test_targeted_message_invalid_cell2(self):
target_cell = 'unknown-cell!child-cell2'
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
tgt_message = messaging._TargetedMessage(self.msg_runner,
self.ctxt, method,
method_kwargs, direction,
target_cell,
need_response=True)
response = tgt_message.process()
self.assertTrue(response.failure)
self.assertRaises(exception.CellRoutingInconsistency,
response.value_or_raise)
def test_broadcast_routing(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True)
bcast_message.process()
# fakes creates 8 cells (including ourself).
self.assertEqual(len(cells), 8)
def test_broadcast_routing_up(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'up'
msg_runner = fakes.get_message_runner('grandchild-cell3')
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
method, method_kwargs,
direction,
run_locally=True)
bcast_message.process()
# Paths are reversed, since going 'up'
expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3',
'grandchild-cell3!child-cell3!api-cell'])
self.assertEqual(expected, cells)
def test_broadcast_routing_without_ourselves(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
cells = set()
def our_fake_method(message, **kwargs):
cells.add(message.routing_path)
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=False)
bcast_message.process()
# fakes creates 8 cells (including ourself). So we should see
# only 7 here.
self.assertEqual(len(cells), 7)
def test_broadcast_routing_with_response(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
for response in responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
def test_broadcast_routing_with_response_max_hops(self):
self.flags(max_hop_count=2, group='cells')
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
# Should only get responses from our immediate children (and
# ourselves)
self.assertEqual(len(responses), 5)
for response in responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
def test_broadcast_routing_with_all_erroring(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method(message, **kwargs):
raise test.TestingException('fake failure')
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
for response in responses:
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
def test_broadcast_routing_with_two_erroring(self):
method = 'our_fake_method'
method_kwargs = dict(arg1=1, arg2=2)
direction = 'down'
def our_fake_method_failing(message, **kwargs):
raise test.TestingException('fake failure')
def our_fake_method(message, **kwargs):
return 'response-%s' % message.routing_path
fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method',
our_fake_method_failing)
fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method',
our_fake_method_failing)
bcast_message = messaging._BroadcastMessage(self.msg_runner,
self.ctxt, method,
method_kwargs,
direction,
run_locally=True,
need_response=True)
responses = bcast_message.process()
self.assertEqual(len(responses), 8)
failure_responses = [resp for resp in responses if resp.failure]
success_responses = [resp for resp in responses if not resp.failure]
self.assertEqual(len(failure_responses), 2)
self.assertEqual(len(success_responses), 6)
for response in success_responses:
self.assertFalse(response.failure)
self.assertEqual('response-%s' % response.cell_name,
response.value_or_raise())
for response in failure_responses:
self.assertIn(response.cell_name, ['api-cell!child-cell2',
'api-cell!child-cell3!grandchild-cell3'])
self.assertTrue(response.failure)
self.assertRaises(test.TestingException, response.value_or_raise)
class CellsTargetedMethodsTestCase(test.TestCase):
"""Test case for _TargetedMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
stone, even though it's a little more than a unit test.
"""
def setUp(self):
super(CellsTargetedMethodsTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self._setup_attrs('api-cell', 'api-cell!child-cell2')
def _setup_attrs(self, source_cell, target_cell):
self.tgt_cell_name = target_cell
self.src_msg_runner = fakes.get_message_runner(source_cell)
self.src_state_manager = self.src_msg_runner.state_manager
tgt_shortname = target_cell.split('!')[-1]
self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname)
self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner
self.tgt_scheduler = self.tgt_msg_runner.scheduler
self.tgt_state_manager = self.tgt_msg_runner.state_manager
methods_cls = self.tgt_msg_runner.methods_by_type['targeted']
self.tgt_methods_cls = methods_cls
self.tgt_compute_api = methods_cls.compute_api
self.tgt_host_api = methods_cls.host_api
self.tgt_db_inst = methods_cls.db
self.tgt_c_rpcapi = methods_cls.compute_rpcapi
def test_schedule_run_instance(self):
host_sched_kwargs = {'filter_properties': {},
'key1': 'value1',
'key2': 'value2'}
self.mox.StubOutWithMock(self.tgt_scheduler, 'run_instance')
self.tgt_scheduler.run_instance(self.ctxt, host_sched_kwargs)
self.mox.ReplayAll()
self.src_msg_runner.schedule_run_instance(self.ctxt,
self.tgt_cell_name,
host_sched_kwargs)
def test_build_instances(self):
build_inst_kwargs = {'filter_properties': {},
'key1': 'value1',
'key2': 'value2'}
self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances')
self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs)
self.mox.ReplayAll()
self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name,
build_inst_kwargs)
def test_run_compute_api_method(self):
instance_uuid = 'fake_instance_uuid'
method_info = {'method': 'backup',
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_compute_api, 'backup')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn('fake_instance')
self.tgt_compute_api.backup(self.ctxt, 'fake_instance', 2, 3,
arg1='val1', arg2='val2').AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def _run_compute_api_method_expects_object(self, tgt_compute_api_function,
method_name,
expected_attrs=None):
# runs compute api methods which expects instance to be an object
instance_uuid = 'fake_instance_uuid'
method_info = {'method': method_name,
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn('fake_instance')
def get_instance_mock():
# NOTE(comstud): This block of code simulates the following
# mox code:
#
# self.mox.StubOutWithMock(instance_obj, 'Instance',
# use_mock_anything=True)
# self.mox.StubOutWithMock(instance_obj.Instance,
# '_from_db_object')
# instance_mock = self.mox.CreateMock(instance_obj.Instance)
# instance_obj.Instance().AndReturn(instance_mock)
#
# Unfortunately, the above code fails on py27 do to some
# issue with the Mock object do to similar issue as this:
# https://code.google.com/p/pymox/issues/detail?id=35
#
class FakeInstance(object):
@classmethod
def _from_db_object(cls, ctxt, obj, db_obj, **kwargs):
pass
instance_mock = FakeInstance()
def fake_instance():
return instance_mock
self.stubs.Set(instance_obj, 'Instance', fake_instance)
self.mox.StubOutWithMock(instance_mock, '_from_db_object')
return instance_mock
instance = get_instance_mock()
instance._from_db_object(self.ctxt,
instance,
'fake_instance',
expected_attrs=expected_attrs
).AndReturn(instance)
tgt_compute_api_function(self.ctxt, instance, 2, 3,
arg1='val1', arg2='val2').AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_run_compute_api_method_expects_obj(self):
# Run compute_api start method
self.mox.StubOutWithMock(self.tgt_compute_api, 'start')
self._run_compute_api_method_expects_object(self.tgt_compute_api.start,
'start')
def test_run_compute_api_method_expects_obj_with_info_cache(self):
# Run compute_api shelve method as it requires info_cache and
# metadata to be present in instance object
self.mox.StubOutWithMock(self.tgt_compute_api, 'shelve')
self._run_compute_api_method_expects_object(
self.tgt_compute_api.shelve, 'shelve',
expected_attrs=['metadata', 'info_cache'])
def test_run_compute_api_method_unknown_instance(self):
# Unknown instance should send a broadcast up that instance
# is gone.
instance_uuid = 'fake_instance_uuid'
instance = {'uuid': instance_uuid}
method_info = {'method': 'reboot',
'method_args': (instance_uuid, 2, 3),
'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_destroy_at_top')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
'fake_instance_uuid').AndRaise(
exception.InstanceNotFound(instance_id=instance_uuid))
self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance)
self.mox.ReplayAll()
response = self.src_msg_runner.run_compute_api_method(
self.ctxt,
self.tgt_cell_name,
method_info,
True)
self.assertRaises(exception.InstanceNotFound,
response.value_or_raise)
def test_update_capabilities(self):
# Route up to API
self._setup_attrs('child-cell2', 'child-cell2!api-cell')
capabs = {'cap1': set(['val1', 'val2']),
'cap2': set(['val3'])}
# The list(set([])) seems silly, but we can't assume the order
# of the list... This behavior should match the code we're
# testing... which is check that a set was converted to a list.
expected_capabs = {'cap1': list(set(['val1', 'val2'])),
'cap2': ['val3']}
self.mox.StubOutWithMock(self.src_state_manager,
'get_our_capabilities')
self.mox.StubOutWithMock(self.tgt_state_manager,
'update_cell_capabilities')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capabilities')
self.src_state_manager.get_our_capabilities().AndReturn(capabs)
self.tgt_state_manager.update_cell_capabilities('child-cell2',
expected_capabs)
self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.tell_parents_our_capabilities(self.ctxt)
def test_update_capacities(self):
self._setup_attrs('child-cell2', 'child-cell2!api-cell')
capacs = 'fake_capacs'
self.mox.StubOutWithMock(self.src_state_manager,
'get_our_capacities')
self.mox.StubOutWithMock(self.tgt_state_manager,
'update_cell_capacities')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capacities')
self.src_state_manager.get_our_capacities().AndReturn(capacs)
self.tgt_state_manager.update_cell_capacities('child-cell2',
capacs)
self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.tell_parents_our_capacities(self.ctxt)
def test_announce_capabilities(self):
self._setup_attrs('api-cell', 'api-cell!child-cell1')
# To make this easier to test, make us only have 1 child cell.
cell_state = self.src_state_manager.child_cells['child-cell1']
self.src_state_manager.child_cells = {'child-cell1': cell_state}
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capabilities')
self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.ask_children_for_capabilities(self.ctxt)
def test_announce_capacities(self):
self._setup_attrs('api-cell', 'api-cell!child-cell1')
# To make this easier to test, make us only have 1 child cell.
cell_state = self.src_state_manager.child_cells['child-cell1']
self.src_state_manager.child_cells = {'child-cell1': cell_state}
self.mox.StubOutWithMock(self.tgt_msg_runner,
'tell_parents_our_capacities')
self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.src_msg_runner.ask_children_for_capacities(self.ctxt)
def test_service_get_by_compute_host(self):
fake_host_name = 'fake-host-name'
self.mox.StubOutWithMock(self.tgt_db_inst,
'service_get_by_compute_host')
self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
fake_host_name).AndReturn('fake-service')
self.mox.ReplayAll()
response = self.src_msg_runner.service_get_by_compute_host(
self.ctxt,
self.tgt_cell_name,
fake_host_name)
result = response.value_or_raise()
self.assertEqual('fake-service', result)
def test_service_update(self):
binary = 'nova-compute'
fake_service = dict(id=42, host='fake_host', binary='nova-compute',
topic='compute')
fake_compute = dict(
id=7116, service_id=42, host='fake_host', vcpus=0, memory_mb=0,
local_gb=0, vcpus_used=0, memory_mb_used=0, local_gb_used=0,
hypervisor_type=0, hypervisor_version=0, hypervisor_hostname=0,
free_ram_mb=0, free_disk_gb=0, current_workload=0, running_vms=0,
cpu_info='HAL', disk_available_least=0)
params_to_update = {'disabled': True, 'report_count': 13}
ctxt = context.RequestContext('fake_user', 'fake_project',
is_admin=True)
# We use the real DB for this test, as it's too hard to reach the
# host_api to mock out its DB methods
db.service_create(ctxt, fake_service)
db.compute_node_create(ctxt, fake_compute)
self.mox.ReplayAll()
response = self.src_msg_runner.service_update(
ctxt, self.tgt_cell_name,
'fake_host', binary, params_to_update)
result = response.value_or_raise()
result.pop('created_at', None)
result.pop('updated_at', None)
result.pop('disabled_reason', None)
expected_result = dict(
deleted=0, deleted_at=None,
binary=fake_service['binary'],
disabled=True, # We just updated this..
report_count=13, # ..and this
host='fake_host', id=42,
topic='compute')
self.assertEqual(expected_result, result)
def test_service_delete(self):
fake_service = dict(id=42, host='fake_host', binary='nova-compute',
topic='compute')
ctxt = self.ctxt.elevated()
db.service_create(ctxt, fake_service)
self.src_msg_runner.service_delete(
ctxt, self.tgt_cell_name, fake_service['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, ctxt, fake_service['id'])
def test_proxy_rpc_to_manager_call(self):
fake_topic = 'fake-topic'
fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
fake_host_name = 'fake-host-name'
self.mox.StubOutWithMock(self.tgt_db_inst,
'service_get_by_compute_host')
self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
fake_host_name)
target = oslo_messaging.Target(topic='fake-topic')
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpc, 'get_client')
rpc.get_client(target).AndReturn(rpcclient)
rpcclient.prepare(timeout=5).AndReturn(rpcclient)
rpcclient.call(mox.IgnoreArg(),
'fake_rpc_method').AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.proxy_rpc_to_manager(
self.ctxt,
self.tgt_cell_name,
fake_host_name,
fake_topic,
fake_rpc_message, True, timeout=5)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_proxy_rpc_to_manager_cast(self):
fake_topic = 'fake-topic'
fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
fake_host_name = 'fake-host-name'
self.mox.StubOutWithMock(self.tgt_db_inst,
'service_get_by_compute_host')
self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
fake_host_name)
target = oslo_messaging.Target(topic='fake-topic')
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpc, 'get_client')
rpc.get_client(target).AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'fake_rpc_method')
self.mox.ReplayAll()
self.src_msg_runner.proxy_rpc_to_manager(
self.ctxt,
self.tgt_cell_name,
fake_host_name,
fake_topic,
fake_rpc_message, False, timeout=None)
def test_task_log_get_all_targetted(self):
task_name = 'fake_task_name'
begin = 'fake_begin'
end = 'fake_end'
host = 'fake_host'
state = 'fake_state'
self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
self.tgt_db_inst.task_log_get_all(self.ctxt, task_name,
begin, end, host=host,
state=state).AndReturn(['fake_result'])
self.mox.ReplayAll()
response = self.src_msg_runner.task_log_get_all(self.ctxt,
self.tgt_cell_name, task_name, begin, end, host=host,
state=state)
self.assertIsInstance(response, list)
self.assertEqual(1, len(response))
result = response[0].value_or_raise()
self.assertEqual(['fake_result'], result)
def test_compute_node_get(self):
compute_id = 'fake-id'
self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get')
self.tgt_db_inst.compute_node_get(self.ctxt,
compute_id).AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.compute_node_get(self.ctxt,
self.tgt_cell_name, compute_id)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_actions_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
self.mox.StubOutWithMock(self.tgt_db_inst, 'actions_get')
self.tgt_db_inst.actions_get(self.ctxt,
'fake-uuid').AndReturn([fake_act])
self.mox.ReplayAll()
response = self.src_msg_runner.actions_get(self.ctxt,
self.tgt_cell_name,
'fake-uuid')
result = response.value_or_raise()
self.assertEqual([jsonutils.to_primitive(fake_act)], result)
def test_action_get_by_request_id(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
self.mox.StubOutWithMock(self.tgt_db_inst, 'action_get_by_request_id')
self.tgt_db_inst.action_get_by_request_id(self.ctxt,
'fake-uuid', 'req-fake').AndReturn(fake_act)
self.mox.ReplayAll()
response = self.src_msg_runner.action_get_by_request_id(self.ctxt,
self.tgt_cell_name, 'fake-uuid', 'req-fake')
result = response.value_or_raise()
self.assertEqual(jsonutils.to_primitive(fake_act), result)
def test_action_events_get(self):
fake_action_id = fake_instance_actions.FAKE_ACTION_ID1
fake_events = fake_instance_actions.FAKE_EVENTS[fake_action_id]
self.mox.StubOutWithMock(self.tgt_db_inst, 'action_events_get')
self.tgt_db_inst.action_events_get(self.ctxt,
'fake-action').AndReturn(fake_events)
self.mox.ReplayAll()
response = self.src_msg_runner.action_events_get(self.ctxt,
self.tgt_cell_name,
'fake-action')
result = response.value_or_raise()
self.assertEqual(jsonutils.to_primitive(fake_events), result)
def test_validate_console_port(self):
instance_uuid = 'fake_instance_uuid'
instance = {'uuid': instance_uuid}
console_port = 'fake-port'
console_type = 'fake-type'
self.mox.StubOutWithMock(self.tgt_c_rpcapi, 'validate_console_port')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn(instance)
self.tgt_c_rpcapi.validate_console_port(self.ctxt,
instance, console_port, console_type).AndReturn('fake_result')
self.mox.ReplayAll()
response = self.src_msg_runner.validate_console_port(self.ctxt,
self.tgt_cell_name, instance_uuid, console_port,
console_type)
result = response.value_or_raise()
self.assertEqual('fake_result', result)
def test_get_migrations_for_a_given_cell(self):
filters = {'cell_name': 'child-cell2', 'status': 'confirmed'}
migrations_in_progress = [{'id': 123}]
self.mox.StubOutWithMock(self.tgt_compute_api,
'get_migrations')
self.tgt_compute_api.get_migrations(self.ctxt, filters).\
AndReturn(migrations_in_progress)
self.mox.ReplayAll()
responses = self.src_msg_runner.get_migrations(
self.ctxt,
self.tgt_cell_name, False, filters)
result = responses[0].value_or_raise()
self.assertEqual(migrations_in_progress, result)
def test_get_migrations_for_an_invalid_cell(self):
filters = {'cell_name': 'invalid_Cell', 'status': 'confirmed'}
responses = self.src_msg_runner.get_migrations(
self.ctxt,
'api_cell!invalid_cell', False, filters)
self.assertEqual(0, len(responses))
def test_call_compute_api_with_obj(self):
instance = instance_obj.Instance()
instance.uuid = uuidutils.generate_uuid()
self.mox.StubOutWithMock(instance, 'refresh')
# Using 'snapshot' for this test, because it
# takes args and kwargs.
self.mox.StubOutWithMock(self.tgt_compute_api, 'snapshot')
instance.refresh(self.ctxt)
self.tgt_compute_api.snapshot(
self.ctxt, instance, 'name',
extra_properties='props').AndReturn('foo')
self.mox.ReplayAll()
result = self.tgt_methods_cls._call_compute_api_with_obj(
self.ctxt, instance, 'snapshot', 'name',
extra_properties='props')
self.assertEqual('foo', result)
def test_call_compute_api_with_obj_no_cache(self):
instance = instance_obj.Instance()
instance.uuid = uuidutils.generate_uuid()
error = exception.InstanceInfoCacheNotFound(
instance_uuid=instance.uuid)
with mock.patch.object(instance, 'refresh', side_effect=error):
self.assertRaises(exception.InstanceInfoCacheNotFound,
self.tgt_methods_cls._call_compute_api_with_obj,
self.ctxt, instance, 'snapshot')
def test_call_delete_compute_api_with_obj_no_cache(self):
instance = instance_obj.Instance()
instance.uuid = uuidutils.generate_uuid()
error = exception.InstanceInfoCacheNotFound(
instance_uuid=instance.uuid)
with contextlib.nested(
mock.patch.object(instance, 'refresh',
side_effect=error),
mock.patch.object(self.tgt_compute_api, 'delete')) as (inst,
delete):
self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt,
instance,
'delete')
delete.assert_called_once_with(self.ctxt, instance)
def test_call_compute_with_obj_unknown_instance(self):
instance = instance_obj.Instance()
instance.uuid = uuidutils.generate_uuid()
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
self.mox.StubOutWithMock(instance, 'refresh')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_destroy_at_top')
instance.refresh(self.ctxt).AndRaise(
exception.InstanceNotFound(instance_id=instance.uuid))
self.tgt_msg_runner.instance_destroy_at_top(self.ctxt,
{'uuid': instance.uuid})
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
self.tgt_methods_cls._call_compute_api_with_obj,
self.ctxt, instance, 'snapshot', 'name')
def _instance_update_helper(self, admin_state_reset):
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
instance = instance_obj.Instance()
instance.cell_name = self.tgt_cell_name
instance.obj_reset_changes()
instance.task_state = 'meow'
instance.vm_state = 'wuff'
instance.user_data = 'foo'
instance.metadata = {'meta': 'data'}
instance.system_metadata = {'system': 'metadata'}
self.assertEqual(set(['user_data', 'vm_state', 'task_state',
'metadata', 'system_metadata']),
instance.obj_what_changed())
self.mox.StubOutWithMock(instance, 'save')
def _check_object(*args, **kwargs):
# task_state and vm_state changes should have been cleared
# before calling save()
if admin_state_reset:
self.assertEqual(
set(['user_data', 'vm_state', 'task_state']),
instance.obj_what_changed())
else:
self.assertEqual(set(['user_data']),
instance.obj_what_changed())
instance.save(self.ctxt, expected_task_state='exp_task',
expected_vm_state='exp_vm').WithSideEffects(
_check_object)
self.mox.ReplayAll()
self.tgt_methods_cls.instance_update_from_api(
message,
instance,
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset=admin_state_reset)
def test_instance_update_from_api(self):
self._instance_update_helper(False)
def test_instance_update_from_api_admin_state_reset(self):
self._instance_update_helper(True)
def _test_instance_action_method(self, method, args, kwargs,
expected_args, expected_kwargs,
expect_result):
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
message.need_response = expect_result
meth_cls = self.tgt_methods_cls
self.mox.StubOutWithMock(meth_cls, '_call_compute_api_with_obj')
method_corrections = {
'terminate': 'delete',
}
api_method = method_corrections.get(method, method)
meth_cls._call_compute_api_with_obj(
self.ctxt, 'fake-instance', api_method,
*expected_args, **expected_kwargs).AndReturn('meow')
self.mox.ReplayAll()
method_translations = {'revert_resize': 'revert_resize',
'confirm_resize': 'confirm_resize',
'reset_network': 'reset_network',
'inject_network_info': 'inject_network_info',
}
tgt_method = method_translations.get(method,
'%s_instance' % method)
result = getattr(meth_cls, tgt_method)(
message, 'fake-instance', *args, **kwargs)
if expect_result:
self.assertEqual('meow', result)
def test_start_instance(self):
self._test_instance_action_method('start', (), {}, (), {}, False)
def test_stop_instance_cast(self):
self._test_instance_action_method('stop', (), {}, (),
{'do_cast': True}, False)
def test_stop_instance_call(self):
self._test_instance_action_method('stop', (), {}, (),
{'do_cast': False}, True)
def test_reboot_instance(self):
kwargs = dict(reboot_type='HARD')
self._test_instance_action_method('reboot', (), kwargs, (),
kwargs, False)
def test_suspend_instance(self):
self._test_instance_action_method('suspend', (), {}, (), {}, False)
def test_resume_instance(self):
self._test_instance_action_method('resume', (), {}, (), {}, False)
def test_get_host_uptime(self):
host_name = "fake-host"
host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.mox.StubOutWithMock(self.tgt_host_api, 'get_host_uptime')
self.tgt_host_api.get_host_uptime(self.ctxt, host_name).\
AndReturn(host_uptime)
self.mox.ReplayAll()
response = self.src_msg_runner.get_host_uptime(self.ctxt,
self.tgt_cell_name,
host_name)
expected_host_uptime = response.value_or_raise()
self.assertEqual(host_uptime, expected_host_uptime)
def test_terminate_instance(self):
self._test_instance_action_method('terminate',
(), {}, (), {}, False)
def test_soft_delete_instance(self):
self._test_instance_action_method('soft_delete',
(), {}, (), {}, False)
def test_pause_instance(self):
self._test_instance_action_method('pause', (), {}, (), {}, False)
def test_unpause_instance(self):
self._test_instance_action_method('unpause', (), {}, (), {}, False)
def test_resize_instance(self):
kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'),
extra_instance_updates=dict(cow='moo'))
expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo')
self._test_instance_action_method('resize', (), kwargs,
(), expected_kwargs,
False)
def test_live_migrate_instance(self):
kwargs = dict(block_migration='fake-block-mig',
disk_over_commit='fake-commit',
host_name='fake-host')
expected_args = ('fake-block-mig', 'fake-commit', 'fake-host')
self._test_instance_action_method('live_migrate', (), kwargs,
expected_args, {}, False)
def test_revert_resize(self):
self._test_instance_action_method('revert_resize',
(), {}, (), {}, False)
def test_confirm_resize(self):
self._test_instance_action_method('confirm_resize',
(), {}, (), {}, False)
def test_reset_network(self):
self._test_instance_action_method('reset_network',
(), {}, (), {}, False)
def test_inject_network_info(self):
self._test_instance_action_method('inject_network_info',
(), {}, (), {}, False)
def test_snapshot_instance(self):
inst = instance_obj.Instance()
meth_cls = self.tgt_methods_cls
self.mox.StubOutWithMock(inst, 'refresh')
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'snapshot_instance')
def check_state(expected_task_state=None):
self.assertEqual(task_states.IMAGE_SNAPSHOT_PENDING,
inst.task_state)
inst.refresh()
inst.save(expected_task_state=[None]).WithSideEffects(check_state)
meth_cls.compute_rpcapi.snapshot_instance(self.ctxt,
inst, 'image-id')
self.mox.ReplayAll()
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
message.need_response = False
meth_cls.snapshot_instance(message, inst, image_id='image-id')
def test_backup_instance(self):
inst = instance_obj.Instance()
meth_cls = self.tgt_methods_cls
self.mox.StubOutWithMock(inst, 'refresh')
self.mox.StubOutWithMock(inst, 'save')
self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'backup_instance')
def check_state(expected_task_state=None):
self.assertEqual(task_states.IMAGE_BACKUP, inst.task_state)
inst.refresh()
inst.save(expected_task_state=[None]).WithSideEffects(check_state)
meth_cls.compute_rpcapi.backup_instance(self.ctxt,
inst,
'image-id',
'backup-type',
'rotation')
self.mox.ReplayAll()
class FakeMessage(object):
pass
message = FakeMessage()
message.ctxt = self.ctxt
message.need_response = False
meth_cls.backup_instance(message, inst,
image_id='image-id',
backup_type='backup-type',
rotation='rotation')
class CellsBroadcastMethodsTestCase(test.TestCase):
"""Test case for _BroadcastMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
stone, even though it's a little more than a unit test.
"""
def setUp(self):
super(CellsBroadcastMethodsTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self._setup_attrs()
def _setup_attrs(self, up=True):
mid_cell = 'child-cell2'
if up:
src_cell = 'grandchild-cell1'
tgt_cell = 'api-cell'
else:
src_cell = 'api-cell'
tgt_cell = 'grandchild-cell1'
self.src_msg_runner = fakes.get_message_runner(src_cell)
methods_cls = self.src_msg_runner.methods_by_type['broadcast']
self.src_methods_cls = methods_cls
self.src_db_inst = methods_cls.db
self.src_compute_api = methods_cls.compute_api
self.src_ca_rpcapi = methods_cls.consoleauth_rpcapi
if not up:
# fudge things so we only have 1 child to broadcast to
state_manager = self.src_msg_runner.state_manager
for cell in state_manager.get_child_cells():
if cell.name != 'child-cell2':
del state_manager.child_cells[cell.name]
self.mid_msg_runner = fakes.get_message_runner(mid_cell)
methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
self.mid_methods_cls = methods_cls
self.mid_db_inst = methods_cls.db
self.mid_compute_api = methods_cls.compute_api
self.mid_ca_rpcapi = methods_cls.consoleauth_rpcapi
self.tgt_msg_runner = fakes.get_message_runner(tgt_cell)
methods_cls = self.tgt_msg_runner.methods_by_type['broadcast']
self.tgt_methods_cls = methods_cls
self.tgt_db_inst = methods_cls.db
self.tgt_compute_api = methods_cls.compute_api
self.tgt_ca_rpcapi = methods_cls.consoleauth_rpcapi
def test_at_the_top(self):
self.assertTrue(self.tgt_methods_cls._at_the_top())
self.assertFalse(self.mid_methods_cls._at_the_top())
self.assertFalse(self.src_methods_cls._at_the_top())
def test_apply_expected_states_building(self):
instance_info = {'vm_state': vm_states.BUILDING}
expected = dict(instance_info,
expected_vm_state=[vm_states.BUILDING, None])
self.src_methods_cls._apply_expected_states(instance_info)
self.assertEqual(expected, instance_info)
def test_apply_expected_states_resize_finish(self):
instance_info = {'task_state': task_states.RESIZE_FINISH}
exp_states = [task_states.RESIZE_FINISH,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_PREP]
expected = dict(instance_info, expected_task_state=exp_states)
self.src_methods_cls._apply_expected_states(instance_info)
self.assertEqual(expected, instance_info)
def _test_instance_update_at_top(self, net_info, exists=True):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
'network_info': net_info}
fake_sys_metadata = [{'id': 1,
'key': 'key1',
'value': 'value1'},
{'id': 2,
'key': 'key2',
'value': 'value2'}]
fake_instance = {'id': 2,
'uuid': 'fake_uuid',
'security_groups': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
'metadata': 'fake',
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata,
'other': 'meow'}
expected_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
expected_info_cache = {'network_info': "[]"}
expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
expected_instance = {'system_metadata': expected_sys_metadata,
'cell_name': expected_cell_name,
'other': 'meow',
'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.src_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_create')
self.mox.StubOutWithMock(self.tgt_db_inst,
'instance_info_cache_update')
mock = self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
expected_instance,
update_cells=False)
if not exists:
mock.AndRaise(exception.InstanceNotFound(instance_id='fake_uuid'))
self.tgt_db_inst.instance_create(self.ctxt,
expected_instance)
self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
expected_info_cache)
self.mox.ReplayAll()
self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
def test_instance_update_at_top(self):
self._test_instance_update_at_top("[]")
def test_instance_update_at_top_netinfo_list(self):
self._test_instance_update_at_top([])
def test_instance_update_at_top_netinfo_model(self):
self._test_instance_update_at_top(network_model.NetworkInfo())
def test_instance_update_at_top_doesnt_already_exist(self):
self._test_instance_update_at_top([], exists=False)
def test_instance_update_at_top_with_building_state(self):
fake_info_cache = {'id': 1,
'instance': 'fake_instance',
'other': 'moo'}
fake_sys_metadata = [{'id': 1,
'key': 'key1',
'value': 'value1'},
{'id': 2,
'key': 'key2',
'value': 'value2'}]
fake_instance = {'id': 2,
'uuid': 'fake_uuid',
'security_groups': 'fake',
'volumes': 'fake',
'cell_name': 'fake',
'name': 'fake',
'metadata': 'fake',
'info_cache': fake_info_cache,
'system_metadata': fake_sys_metadata,
'vm_state': vm_states.BUILDING,
'other': 'meow'}
expected_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
expected_info_cache = {'other': 'moo'}
expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
expected_instance = {'system_metadata': expected_sys_metadata,
'cell_name': expected_cell_name,
'other': 'meow',
'vm_state': vm_states.BUILDING,
'expected_vm_state': [vm_states.BUILDING, None],
'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.src_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'instance_info_cache_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
self.mox.StubOutWithMock(self.tgt_db_inst,
'instance_info_cache_update')
self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
expected_instance,
update_cells=False)
self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
expected_info_cache)
self.mox.ReplayAll()
self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
def test_instance_destroy_at_top(self):
fake_instance = {'uuid': 'fake_uuid'}
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_db_inst, 'instance_destroy')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_destroy')
self.tgt_db_inst.instance_destroy(self.ctxt, 'fake_uuid',
update_cells=False)
self.mox.ReplayAll()
self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance)
def test_instance_hard_delete_everywhere(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
instance = {'uuid': 'meow'}
# Should not be called in src (API cell)
self.mox.StubOutWithMock(self.src_compute_api, 'delete')
self.mox.StubOutWithMock(self.mid_compute_api, 'delete')
self.mox.StubOutWithMock(self.tgt_compute_api, 'delete')
self.mid_compute_api.delete(self.ctxt, instance)
self.tgt_compute_api.delete(self.ctxt, instance)
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'hard')
def test_instance_soft_delete_everywhere(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
instance = {'uuid': 'meow'}
# Should not be called in src (API cell)
self.mox.StubOutWithMock(self.src_compute_api, 'soft_delete')
self.mox.StubOutWithMock(self.mid_compute_api, 'soft_delete')
self.mox.StubOutWithMock(self.tgt_compute_api, 'soft_delete')
self.mid_compute_api.soft_delete(self.ctxt, instance)
self.tgt_compute_api.soft_delete(self.ctxt, instance)
self.mox.ReplayAll()
self.src_msg_runner.instance_delete_everywhere(self.ctxt,
instance, 'soft')
def test_instance_fault_create_at_top(self):
fake_instance_fault = {'id': 1,
'other stuff': 2,
'more stuff': 3}
expected_instance_fault = {'other stuff': 2,
'more stuff': 3}
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst, 'instance_fault_create')
self.mox.StubOutWithMock(self.mid_db_inst, 'instance_fault_create')
self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_fault_create')
self.tgt_db_inst.instance_fault_create(self.ctxt,
expected_instance_fault)
self.mox.ReplayAll()
self.src_msg_runner.instance_fault_create_at_top(self.ctxt,
fake_instance_fault)
def test_bw_usage_update_at_top(self):
fake_bw_update_info = {'uuid': 'fake_uuid',
'mac': 'fake_mac',
'start_period': 'fake_start_period',
'bw_in': 'fake_bw_in',
'bw_out': 'fake_bw_out',
'last_ctr_in': 'fake_last_ctr_in',
'last_ctr_out': 'fake_last_ctr_out',
'last_refreshed': 'fake_last_refreshed'}
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update')
self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update')
self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update')
self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info)
self.mox.ReplayAll()
self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
fake_bw_update_info)
def test_sync_instances(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
project_id = 'fake_project_id'
updated_since_raw = 'fake_updated_since_raw'
updated_since_parsed = 'fake_updated_since_parsed'
deleted = 'fake_deleted'
instance1 = dict(uuid='fake_uuid1', deleted=False)
instance2 = dict(uuid='fake_uuid2', deleted=True)
fake_instances = [instance1, instance2]
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_update_at_top')
self.mox.StubOutWithMock(self.tgt_msg_runner,
'instance_destroy_at_top')
self.mox.StubOutWithMock(timeutils, 'parse_isotime')
self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync')
# Middle cell.
timeutils.parse_isotime(updated_since_raw).AndReturn(
updated_since_parsed)
cells_utils.get_instances_to_sync(self.ctxt,
updated_since=updated_since_parsed,
project_id=project_id,
deleted=deleted).AndReturn([])
# Bottom/Target cell
timeutils.parse_isotime(updated_since_raw).AndReturn(
updated_since_parsed)
cells_utils.get_instances_to_sync(self.ctxt,
updated_since=updated_since_parsed,
project_id=project_id,
deleted=deleted).AndReturn(fake_instances)
self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1)
self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2)
self.mox.ReplayAll()
self.src_msg_runner.sync_instances(self.ctxt,
project_id, updated_since_raw, deleted)
def test_service_get_all_with_disabled(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
self.src_db_inst.service_get_all(ctxt,
disabled=None).AndReturn([1, 2])
self.mid_db_inst.service_get_all(ctxt,
disabled=None).AndReturn([3])
self.tgt_db_inst.service_get_all(ctxt,
disabled=None).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.service_get_all(ctxt,
filters={})
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_service_get_all_without_disabled(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
disabled = False
filters = {'disabled': disabled}
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
self.src_db_inst.service_get_all(ctxt,
disabled=disabled).AndReturn([1, 2])
self.mid_db_inst.service_get_all(ctxt,
disabled=disabled).AndReturn([3])
self.tgt_db_inst.service_get_all(ctxt,
disabled=disabled).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.service_get_all(ctxt,
filters=filters)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_task_log_get_all_broadcast(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
task_name = 'fake_task_name'
begin = 'fake_begin'
end = 'fake_end'
host = 'fake_host'
state = 'fake_state'
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
self.src_db_inst.task_log_get_all(ctxt, task_name,
begin, end, host=host, state=state).AndReturn([1, 2])
self.mid_db_inst.task_log_get_all(ctxt, task_name,
begin, end, host=host, state=state).AndReturn([3])
self.tgt_db_inst.task_log_get_all(ctxt, task_name,
begin, end, host=host, state=state).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.task_log_get_all(ctxt, None,
task_name, begin, end, host=host, state=state)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_compute_node_get_all(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_get_all')
self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_get_all')
self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get_all')
self.src_db_inst.compute_node_get_all(ctxt).AndReturn([1, 2])
self.mid_db_inst.compute_node_get_all(ctxt).AndReturn([3])
self.tgt_db_inst.compute_node_get_all(ctxt).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.compute_node_get_all(ctxt)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_compute_node_get_all_with_hyp_match(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
hypervisor_match = 'meow'
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst,
'compute_node_search_by_hypervisor')
self.mox.StubOutWithMock(self.mid_db_inst,
'compute_node_search_by_hypervisor')
self.mox.StubOutWithMock(self.tgt_db_inst,
'compute_node_search_by_hypervisor')
self.src_db_inst.compute_node_search_by_hypervisor(ctxt,
hypervisor_match).AndReturn([1, 2])
self.mid_db_inst.compute_node_search_by_hypervisor(ctxt,
hypervisor_match).AndReturn([3])
self.tgt_db_inst.compute_node_search_by_hypervisor(ctxt,
hypervisor_match).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.compute_node_get_all(ctxt,
hypervisor_match=hypervisor_match)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_compute_node_stats(self):
# Reset this, as this is a broadcast down.
self._setup_attrs(up=False)
ctxt = self.ctxt.elevated()
self.mox.StubOutWithMock(self.src_db_inst,
'compute_node_statistics')
self.mox.StubOutWithMock(self.mid_db_inst,
'compute_node_statistics')
self.mox.StubOutWithMock(self.tgt_db_inst,
'compute_node_statistics')
self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2])
self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3])
self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5])
self.mox.ReplayAll()
responses = self.src_msg_runner.compute_node_stats(ctxt)
response_values = [(resp.cell_name, resp.value_or_raise())
for resp in responses]
expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
('api-cell!child-cell2', [3]),
('api-cell', [1, 2])]
self.assertEqual(expected, response_values)
def test_consoleauth_delete_tokens(self):
fake_uuid = 'fake-instance-uuid'
# To show these should not be called in src/mid-level cell
self.mox.StubOutWithMock(self.src_ca_rpcapi,
'delete_tokens_for_instance')
self.mox.StubOutWithMock(self.mid_ca_rpcapi,
'delete_tokens_for_instance')
self.mox.StubOutWithMock(self.tgt_ca_rpcapi,
'delete_tokens_for_instance')
self.tgt_ca_rpcapi.delete_tokens_for_instance(self.ctxt, fake_uuid)
self.mox.ReplayAll()
self.src_msg_runner.consoleauth_delete_tokens(self.ctxt, fake_uuid)
def test_bdm_update_or_create_with_none_create(self):
fake_bdm = {'id': 'fake_id',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_update_or_create')
self.tgt_db_inst.block_device_mapping_update_or_create(
self.ctxt, expected_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=None)
def test_bdm_update_or_create_with_true_create(self):
fake_bdm = {'id': 'fake_id',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_create')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_create')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_create')
self.tgt_db_inst.block_device_mapping_create(
self.ctxt, fake_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=True)
def test_bdm_update_or_create_with_false_create_vol_id(self):
fake_bdm = {'id': 'fake_id',
'instance_uuid': 'fake_instance_uuid',
'device_name': 'fake_device_name',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
fake_inst_bdms = [{'id': 1,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'},
{'id': 2,
'volume_id': 'fake_volume_id',
'device_name': 'not-a-match'},
{'id': 3,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'}]
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_update')
self.tgt_db_inst.block_device_mapping_get_all_by_instance(
self.ctxt, 'fake_instance_uuid').AndReturn(
fake_inst_bdms)
# Should try to update ID 2.
self.tgt_db_inst.block_device_mapping_update(
self.ctxt, 2, expected_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=False)
def test_bdm_update_or_create_with_false_create_dev_name(self):
fake_bdm = {'id': 'fake_id',
'instance_uuid': 'fake_instance_uuid',
'device_name': 'fake_device_name',
'volume_id': 'fake_volume_id'}
expected_bdm = fake_bdm.copy()
expected_bdm.pop('id')
fake_inst_bdms = [{'id': 1,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'},
{'id': 2,
'volume_id': 'not-a-match',
'device_name': 'fake_device_name'},
{'id': 3,
'volume_id': 'not-a-match',
'device_name': 'not-a-match'}]
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_update')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_update')
self.tgt_db_inst.block_device_mapping_get_all_by_instance(
self.ctxt, 'fake_instance_uuid').AndReturn(
fake_inst_bdms)
# Should try to update ID 2.
self.tgt_db_inst.block_device_mapping_update(
self.ctxt, 2, expected_bdm, legacy=False)
self.mox.ReplayAll()
self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
fake_bdm,
create=False)
def test_bdm_destroy_by_volume(self):
fake_instance_uuid = 'fake-instance-uuid'
fake_volume_id = 'fake-volume-name'
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_destroy_by_instance_and_volume')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_destroy_by_instance_and_volume')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_destroy_by_instance_and_volume')
self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_volume(
self.ctxt, fake_instance_uuid, fake_volume_id)
self.mox.ReplayAll()
self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
volume_id=fake_volume_id)
def test_bdm_destroy_by_device(self):
fake_instance_uuid = 'fake-instance-uuid'
fake_device_name = 'fake-device-name'
# Shouldn't be called for these 2 cells
self.mox.StubOutWithMock(self.src_db_inst,
'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(self.mid_db_inst,
'block_device_mapping_destroy_by_instance_and_device')
self.mox.StubOutWithMock(self.tgt_db_inst,
'block_device_mapping_destroy_by_instance_and_device')
self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_device(
self.ctxt, fake_instance_uuid, fake_device_name)
self.mox.ReplayAll()
self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
device_name=fake_device_name)
def test_get_migrations(self):
self._setup_attrs(up=False)
filters = {'status': 'confirmed'}
migrations_from_cell1 = [{'id': 123}]
migrations_from_cell2 = [{'id': 456}]
self.mox.StubOutWithMock(self.mid_compute_api,
'get_migrations')
self.mid_compute_api.get_migrations(self.ctxt, filters).\
AndReturn(migrations_from_cell1)
self.mox.StubOutWithMock(self.tgt_compute_api,
'get_migrations')
self.tgt_compute_api.get_migrations(self.ctxt, filters).\
AndReturn(migrations_from_cell2)
self.mox.ReplayAll()
responses = self.src_msg_runner.get_migrations(
self.ctxt,
None, False, filters)
self.assertEqual(2, len(responses))
for response in responses:
self.assertIn(response.value_or_raise(), [migrations_from_cell1,
migrations_from_cell2])
|
|
from __future__ import unicode_literals
import re
from django.core.exceptions import ValidationError
from django.test import TestCase
from localflavor.au import forms, models
from localflavor.au.validators import AUBusinessNumberFieldValidator
from .forms import AustralianPlaceForm
from .models import AustralianPlace
SELECTED_OPTION_PATTERN = r'<option value="%s" selected="selected">'
BLANK_OPTION_PATTERN = r'<option value="">'
INPUT_VALUE_PATTERN = r'<input[^>]*value="%s"[^>]*>'
class AULocalflavorTests(TestCase):
def setUp(self):
self.form = AustralianPlaceForm(
{'state': 'WA',
'state_required': 'QLD',
'name': 'dummy',
'postcode': '1234',
'postcode_required': '4321',
'abn': '74457506140',
})
def test_get_display_methods(self):
""" Ensure get_*_display() methods are added to model instances. """
place = self.form.save()
self.assertEqual(place.get_state_display(), 'Western Australia')
self.assertEqual(place.get_state_required_display(), 'Queensland')
def test_default_values(self):
""" Ensure that default values are selected in forms. """
form = AustralianPlaceForm()
self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'NSW',
str(form['state_default'])))
self.assertTrue(re.search(INPUT_VALUE_PATTERN % '2500',
str(form['postcode_default'])))
def test_required(self):
""" Test that required AUStateFields throw appropriate errors. """
form = AustralianPlaceForm({'state': 'NSW', 'name': 'Wollongong'})
self.assertFalse(form.is_valid())
self.assertEqual(set(form.errors.keys()),
set(('state_required',
'postcode_required',
'abn')))
self.assertEqual(
form.errors['state_required'], ['This field is required.'])
self.assertEqual(
form.errors['postcode_required'], ['This field is required.'])
self.assertEqual(
form.errors['abn'], ['This field is required.'])
def test_field_blank_option(self):
""" Test that the empty option is there. """
self.assertTrue(re.search(BLANK_OPTION_PATTERN,
str(self.form['state'])))
def test_selected_values(self):
""" Ensure selected states match the initial values provided. """
self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'WA',
str(self.form['state'])))
self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'QLD',
str(self.form['state_required'])))
self.assertTrue(re.search(INPUT_VALUE_PATTERN % '1234',
str(self.form['postcode'])))
self.assertTrue(re.search(INPUT_VALUE_PATTERN % '4321',
str(self.form['postcode_required'])))
def test_AUStateSelect(self):
f = forms.AUStateSelect()
out = '''<select name="state">
<option value="ACT">Australian Capital Territory</option>
<option value="NSW" selected="selected">New South Wales</option>
<option value="NT">Northern Territory</option>
<option value="QLD">Queensland</option>
<option value="SA">South Australia</option>
<option value="TAS">Tasmania</option>
<option value="VIC">Victoria</option>
<option value="WA">Western Australia</option>
</select>'''
self.assertHTMLEqual(f.render('state', 'NSW'), out)
def test_AUPostCodeField(self):
error_format = ['Enter a 4 digit postcode.']
valid = {
'1234': '1234',
'2000': '2000',
}
invalid = {
'abcd': error_format,
'20001': ['Ensure this value has at most 4 characters (it has 5).'] + error_format,
}
self.assertFieldOutput(forms.AUPostCodeField, valid, invalid)
def test_AUPhoneNumberField(self):
error_format = ['Phone numbers must contain 10 digits.']
valid = {
'1234567890': '1234567890',
'0213456789': '0213456789',
'02 13 45 67 89': '0213456789',
'(02) 1345 6789': '0213456789',
'(02) 1345-6789': '0213456789',
'(02)1345-6789': '0213456789',
'0408 123 456': '0408123456',
}
invalid = {
'123': error_format,
'1800DJANGO': error_format,
}
self.assertFieldOutput(forms.AUPhoneNumberField, valid, invalid)
def test_abn(self):
error_format = ['Enter a valid ABN.']
valid = {
'53004085616': '53004085616',
}
invalid = {
'53 004 085 616': error_format,
'53004085617': error_format,
'5300A085616': error_format,
}
self.assertFieldOutput(forms.AUBusinessNumberField, valid, invalid)
class AULocalFlavorAUBusinessNumberFieldValidatorTests(TestCase):
def test_no_error_for_a_valid_abn(self):
"""Test a valid ABN does not cause an error."""
valid_abn = '53004085616'
validator = AUBusinessNumberFieldValidator()
validator(valid_abn)
def test_raises_error_for_abn_containing_a_letter(self):
"""Test an ABN containing a letter is invalid."""
invalid_abn = '5300408561A'
validator = AUBusinessNumberFieldValidator()
self.assertRaises(ValidationError, lambda: validator(invalid_abn))
def test_raises_error_for_too_short_abn(self):
"""Test an ABN with fewer than eleven digits is invalid."""
invalid_abn = '5300408561'
validator = AUBusinessNumberFieldValidator()
self.assertRaises(ValidationError, lambda: validator(invalid_abn))
def test_raises_error_for_too_long_abn(self):
"""Test an ABN with more than eleven digits is invalid."""
invalid_abn = '530040856160'
validator = AUBusinessNumberFieldValidator()
self.assertRaises(ValidationError, lambda: validator(invalid_abn))
def test_raises_error_for_whitespace(self):
"""Test an ABN can be valid when it contains whitespace."""
# NB: Form field should strip the whitespace before regex valdation is run.
invalid_abn = '5300 4085 616'
validator = AUBusinessNumberFieldValidator()
self.assertRaises(ValidationError, lambda: validator(invalid_abn))
def test_raises_error_for_invalid_abn(self):
"""Test that an ABN must pass the ATO's validation algorithm."""
invalid_abn = '53004085617'
validator = AUBusinessNumberFieldValidator()
self.assertRaises(ValidationError, lambda: validator(invalid_abn))
class AULocalFlavorAUBusinessNumberModelTests(TestCase):
def test_AUBusinessNumberModel_invalid_abn_raises_error(self):
place = AustralianPlace(**{
'state': 'WA',
'state_required': 'QLD',
'name': 'dummy',
'postcode': '1234',
'postcode_required': '4321',
'abn': '5300 4085 616 INVALID',
})
self.assertRaises(ValidationError, place.clean_fields)
class AULocalFlavourAUBusinessNumberFormFieldTests(TestCase):
def test_abn_with_spaces_remains_unchanged(self):
"""Test that an ABN with the formatting we expect is unchanged."""
field = forms.AUBusinessNumberField()
self.assertEqual('53 004 085 616', field.prepare_value('53 004 085 616'))
def test_spaces_are_reconfigured(self):
"""Test that an ABN with formatting we don't expect is transformed."""
field = forms.AUBusinessNumberField()
self.assertEqual('53 004 085 616', field.prepare_value('53004085616'))
self.assertEqual('53 004 085 616', field.prepare_value('53 0 04 08561 6'))
class AULocalFlavourAUBusinessNumberModelFieldTests(TestCase):
def test_to_python_strips_whitespace(self):
"""Test the value is stored without whitespace."""
field = models.AUBusinessNumberField()
self.assertEqual('53004085616', field.to_python('53 004 085 616'))
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
A list of commonly used multiple correction routines
"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import random
import numpy as np
import collections as cx
__copyright__ = "Copyright (C) 2010-2018, H Tang et al., All rights reserved."
__author__ = "various"
class Methods(object):
"""Class to manage multipletest methods from both local and remote sources."""
# https://github.com/statsmodels/statsmodels/blob/master/statsmodels/stats/multitest.py
all_methods = [
("local", ("bonferroni", "sidak", "holm", "fdr")),
("statsmodels", (
'bonferroni', # 0) Bonferroni one-step correction
'sidak', # 1) Sidak one-step correction
'holm-sidak', # 2) Holm-Sidak step-down method using Sidak adjustments
'holm', # 3) Holm step-down method using Bonferroni adjustments
'simes-hochberg', # 4) Simes-Hochberg step-up method (independent)
'hommel', # 5) Hommel closed method based on Simes tests (non-negative)
'fdr_bh', # 6) FDR Benjamini/Hochberg (non-negative)
'fdr_by', # 7) FDR Benjamini/Yekutieli (negative)
'fdr_tsbh', # 8) FDR 2-stage Benjamini-Hochberg (non-negative)
'fdr_tsbky', # 9) FDR 2-stage Benjamini-Krieger-Yekutieli (non-negative)
'fdr_gbs', # 10) FDR adaptive Gavrilov-Benjamini-Sarkar
)),
]
prefixes = {'statsmodels':'sm_'}
NtMethodInfo = cx.namedtuple("NtMethodInfo", "source method fieldname")
def __init__(self, usr_methods=None):
self._srcmethod2fieldname = self._init_srcmethod2fieldname()
self.statsmodels_multicomp = None
if usr_methods is None:
usr_methods = ['bonferroni']
self._init_methods(usr_methods)
def _init_methods(self, usr_methods):
"""From the methods list, set list of methods to be used during GOEA."""
self.methods = []
for usr_method in usr_methods:
self._add_method(usr_method)
def _add_method(self, method, method_source=None):
"""Determine method source if needed. Add method to list."""
try:
if method_source is not None:
self._add_method_src(method_source, method)
else:
self._add_method_nosrc(method)
except Exception as inst:
raise Exception("{ERRMSG}".format(ERRMSG=inst))
def _add_method_nosrc(self, usr_method):
"""Add method source, method, and fieldname to list of methods."""
for method_source, available_methods in self.all_methods:
if usr_method in available_methods:
fieldname = self.get_fldnm_method(usr_method)
nmtup = self.NtMethodInfo(method_source, usr_method, fieldname)
self.methods.append(nmtup)
return
for src, prefix in self.prefixes.items():
if usr_method.startswith(prefix):
method_source = src
method = usr_method[len(prefix):]
nmtup = self.NtMethodInfo(method_source, method, usr_method)
self.methods.append(nmtup)
return
raise self.rpt_invalid_method(usr_method)
def getmsg_valid_methods(self):
"""Return a string containing valid method names."""
msg = []
msg.append(" Available methods:")
for method_source, methods in self.all_methods:
msg.append(" {SRC}(".format(SRC=method_source))
for method in methods:
attrname = self._srcmethod2fieldname[(method_source, method)]
msg.append(" {ATTR}".format(ATTR=attrname))
msg.append(" )")
return "\n".join(msg)
def get_fieldname(self, method_source, method):
"""Get the name of the method used to create namedtuple fieldnames which store floats."""
return self._srcmethod2fieldname[(method_source, method)]
def _init_srcmethod2fieldname(self):
"""Return an OrderedDict with key, (method_src, method), and value, attrname."""
srcmethod_fieldname = []
ctr = self._get_method_cnts()
for method_source, methods in self.all_methods:
for method in methods:
prefix = self.prefixes.get(method_source, "")
prefix = prefix if ctr[method] != 1 else ""
fieldname = "{P}{M}".format(P=prefix, M=method.replace('-', '_'))
srcmethod_fieldname.append(((method_source, method), fieldname))
return cx.OrderedDict(srcmethod_fieldname)
def rpt_invalid_method(self, usr_method):
"""Report which methods are available."""
msgerr = "FATAL: UNRECOGNIZED METHOD({M})".format(M=usr_method)
msg = [msgerr, self.getmsg_valid_methods(), msgerr]
raise Exception("\n".join(msg))
def _get_method_cnts(self):
"""Count the number of times a method is seen."""
ctr = cx.Counter()
for source_methods in self.all_methods:
for method in source_methods[1]:
ctr[method] += 1
return ctr
def _add_method_src(self, method_source, usr_method, fieldname=None):
"""Add method source and method to list of methods."""
fieldname = self._srcmethod2fieldname.get((method_source, usr_method), None)
if fieldname is not None:
nmtup = self.NtMethodInfo(method_source, usr_method, fieldname)
self.methods.append(nmtup)
else: raise Exception("ERROR: FIELD({FN}) METHOD_SOURCE({MS}) AND METHOD({M})".format(
FN=fieldname, MS=method_source, M=usr_method))
@staticmethod
def get_fldnm_method(method):
"""Given method and source, return fieldname for method."""
fieldname = method.replace('-', '_')
return fieldname
def get_statsmodels_multipletests(self):
"""Only load statsmodels package if it is used."""
if self.statsmodels_multicomp is not None:
return self.statsmodels_multicomp
from statsmodels.sandbox.stats.multicomp import multipletests
self.statsmodels_multicomp = multipletests
return self.statsmodels_multicomp
def __iter__(self):
return iter(self.methods)
class _AbstractCorrection(object):
"""Base class for local multiple test correction calculations."""
def __init__(self, pvals, a=.05):
self.pvals = self.corrected_pvals = np.array(pvals)
self.n = len(self.pvals) # number of multiple tests
self.a = a # type-1 error cutoff for each test
self.set_correction()
# Reset all pvals > 1 to 1
self.corrected_pvals[self.corrected_pvals > 1] = 1
def set_correction(self):
# the purpose of multiple correction is to lower the alpha
# instead of the canonical value (like .05)
pass
class Bonferroni(_AbstractCorrection):
"""
>>> Bonferroni([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals
array([ 0.05 , 0.05 , 0.15 , 0.25 , 0.025])
"""
def set_correction(self):
"""Do Bonferroni multiple test correction on original p-values."""
self.corrected_pvals *= self.n
class Sidak(_AbstractCorrection):
"""http://en.wikipedia.org/wiki/Bonferroni_correction
>>> Sidak([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals
array([ 0.04898974, 0.04898974, 0.14696923, 0.24494871, 0.02449487])
"""
def set_correction(self):
"""Do Sidak multiple test correction on original p-values."""
if self.n != 0:
correction = self.a * 1. / (1 - (1 - self.a) ** (1. / self.n))
else:
correction = 1
self.corrected_pvals *= correction
class HolmBonferroni(_AbstractCorrection):
"""http://en.wikipedia.org/wiki/Holm-Bonferroni_method
given a list of pvals, perform the Holm-Bonferroni correction
and return the indexes from original list that are significant.
(cant use p-value as that may be repeated.)
>>> HolmBonferroni([0.01, 0.01, 0.03, 0.05, 0.005], a=0.05).corrected_pvals
array([ 0.04 , 0.04 , 0.06 , 0.05 , 0.025])
"""
def set_correction(self):
"""Do Holm-Bonferroni multiple test correction on original p-values."""
if len(self.pvals):
idxs, correction = list(zip(*self._generate_significant()))
idxs = list(idxs)
self.corrected_pvals[idxs] *= correction
def _generate_significant(self):
pvals = self.pvals
pvals_idxs = list(zip(pvals, list(range(len(pvals)))))
pvals_idxs.sort()
num_pvals = len(self.pvals)
from itertools import groupby
for _, idxs in groupby(pvals_idxs, lambda x: x[0]):
idxs = list(idxs)
for p, i in idxs:
if p * 1. / num_pvals < self.a:
yield (i, num_pvals)
num_pvals -= len(idxs)
class FDR(object):
"""
Generate a p-value distribution based on re-sampling, as described in:
http://www.biomedcentral.com/1471-2105/6/168
"""
def __init__(self, p_val_distribution, results, a=.05):
self.corrected_pvals = fdr = []
for rec in results:
q = (sum(1 for x in p_val_distribution if x < rec.p_uncorrected)
* 1.0 / len(p_val_distribution))
fdr.append(q)
def mcorrection_factory(pvals, alpha, method):
"""Return 'multiple correction' object of requested AbstractCorrection base class."""
correctioncls = globals().get(method, None)
if correctioncls is not None:
return correctioncls(pvals, alpha)
def calc_qval(study_n, pop_n,
pop, assoc, term_pop, obo_dag, T=500):
"""Generate p-value distribution for FDR based on resampling."""
from goatools.pvalcalc import FisherFactory
from goatools.ratio import count_terms
sys.stderr.write("Generate p-value distribution for FDR "
"based on resampling (this might take a while)\n")
distribution = []
calc_pvalue = FisherFactory().pval_obj.calc_pvalue
pop = list(pop)
for i in range(T):
new_study = random.sample(pop, study_n)
new_term_study = count_terms(new_study, assoc, obo_dag)
smallest_p = 1
for term, study_count in list(new_term_study.items()):
pop_count = term_pop[term]
p_uncorrected = calc_pvalue(study_count,
study_n,
pop_count,
pop_n)
if p_uncorrected < smallest_p:
smallest_p = p_uncorrected
distribution.append(smallest_p)
if i % 10 == 0:
sys.stderr.write("Sample {0} / {1}: "
"p-value {2}\n".format(i, T, smallest_p))
return distribution
if __name__ == '__main__':
import doctest
doctest.testmod()
# Copyright (C) 2010-2018, H Tang et al., All rights reserved.
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import logging
import struct
from binascii import hexlify
import common
from enum import IntEnum
from tlvs_parsing import UnknownTlvFactory
class CommandType(IntEnum):
LINK_REQUEST = 0
LINK_ACCEPT = 1
LINK_ACCEPT_AND_REQUEST = 2
LINK_REJECT = 3
ADVERTISEMENT = 4
UPDATE = 5
UPDATE_REQUEST = 6
DATA_REQUEST = 7
DATA_RESPONSE = 8
PARENT_REQUEST = 9
PARENT_RESPONSE = 10
CHILD_ID_REQUEST = 11
CHILD_ID_RESPONSE = 12
CHILD_UPDATE_REQUEST = 13
CHILD_UPDATE_RESPONSE = 14
ANNOUNCE = 15
DISCOVERY_REQUEST = 16
DISCOVERY_RESPONSE = 17
LINK_METRICS_MANAGEMENT_REQUEST = 18
LINK_METRICS_MANAGEMENT_RESPONSE = 19
LINK_PROBE = 20
TIME_SYNC = 99
class TlvType(IntEnum):
SOURCE_ADDRESS = 0
MODE = 1
TIMEOUT = 2
CHALLENGE = 3
RESPONSE = 4
LINK_LAYER_FRAME_COUNTER = 5
MLE_FRAME_COUNTER = 8
ROUTE64 = 9
ADDRESS16 = 10
LEADER_DATA = 11
NETWORK_DATA = 12
TLV_REQUEST = 13
SCAN_MASK = 14
CONNECTIVITY = 15
LINK_MARGIN = 16
STATUS = 17
VERSION = 18
ADDRESS_REGISTRATION = 19
CHANNEL = 20
PANID = 21
ACTIVE_TIMESTAMP = 22
PENDING_TIMESTAMP = 23
ACTIVE_OPERATIONAL_DATASET = 24
PENDING_OPERATIONAL_DATASET = 25
THREAD_DISCOVERY = 26
CSL_CHANNEL = 80
CSL_SYNCHRONIZED_TIMEOUT = 85
LINK_METRICS_QUERY = 87
LINK_METRICS_MANAGEMENT = 88
LINK_METRICS_REPORT = 89
LINK_PROBE = 90
TIME_REQUEST = 252
TIME_PARAMETER = 253
class LinkMetricsSubTlvType(IntEnum):
LINK_METRICS_REPORT = 0
LINK_METRICS_QUERY_ID = 1
LINK_METRICS_QUERY_OPTIONS = 2
FORWARD_PROBING_REGISTRATION = 3
LINK_METRICS_STATUS = 5
ENHANCED_ACK_LINK_METRICS_CONFIGURATION = 7
class SourceAddress(object):
def __init__(self, address):
self._address = address
@property
def address(self):
return self._address
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.address == other.address
def __repr__(self):
return "SourceAddress(address={})".format(hex(self._address))
class SourceAddressFactory:
def parse(self, data, message_info):
address = struct.unpack(">H", data.read(2))[0]
return SourceAddress(address)
class Mode(object):
def __init__(self, receiver, secure, device_type, network_data):
self._receiver = receiver
self._secure = secure
self._device_type = device_type
self._network_data = network_data
@property
def receiver(self):
return self._receiver
@property
def secure(self):
return self._secure
@property
def device_type(self):
return self._device_type
@property
def network_data(self):
return self._network_data
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.receiver == other.receiver and self.secure == other.secure and
self.device_type == other.device_type and self.network_data == other.network_data)
def __repr__(self):
return "Mode(receiver={}, secure={}, device_type={}, network_data={})".format(
self.receiver, self.secure, self.device_type, self.network_data)
class ModeFactory:
def parse(self, data, message_info):
mode = ord(data.read(1))
receiver = (mode >> 3) & 0x01
secure = (mode >> 2) & 0x01
device_type = (mode >> 1) & 0x01
network_data = (mode >> 0) & 0x01
return Mode(receiver, secure, device_type, network_data)
class Timeout(object):
def __init__(self, timeout):
self._timeout = timeout
@property
def timeout(self):
return self._timeout
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.timeout == other.timeout
def __repr__(self):
return "Timeout(timeout={})".format(self.timeout)
class TimeoutFactory:
def parse(self, data, message_info):
timeout = struct.unpack(">I", data.read(4))[0]
return Timeout(timeout)
class Challenge(object):
def __init__(self, challenge):
self._challenge = challenge
@property
def challenge(self):
return self._challenge
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.challenge == other.challenge
def __repr__(self):
return "Challenge(challenge={})".format(hexlify(self.challenge))
class ChallengeFactory:
def parse(self, data, message_info):
challenge = data.read()
return Challenge(challenge)
class Response(object):
def __init__(self, response):
self._response = response
@property
def response(self):
return self._response
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.response == other.response
def __repr__(self):
return "Response(response={})".format(hexlify(self.response))
class ResponseFactory:
def parse(self, data, message_info):
response = data.read()
return Response(response)
class LinkLayerFrameCounter(object):
def __init__(self, frame_counter):
self._frame_counter = frame_counter
@property
def frame_counter(self):
return self._frame_counter
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.frame_counter == other.frame_counter
def __repr__(self):
return "LinkLayerFrameCounter(frame_counter={})".format(self.frame_counter)
class LinkLayerFrameCounterFactory:
def parse(self, data, message_info):
frame_counter = struct.unpack(">I", data.read(4))[0]
return LinkLayerFrameCounter(frame_counter)
class MleFrameCounter(object):
def __init__(self, frame_counter):
self._frame_counter = frame_counter
@property
def frame_counter(self):
return self._frame_counter
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.frame_counter == other.frame_counter
def __repr__(self):
return "MleFrameCounter(frame_counter={})".format(self.frame_counter)
class MleFrameCounterFactory:
def parse(self, data, message_info):
frame_counter = struct.unpack(">I", data.read(4))[0]
return MleFrameCounter(frame_counter)
class LinkQualityAndRouteData(object):
def __init__(self, output, _input, route):
self._output = output
self._input = _input
self._route = route
@property
def output(self):
return self._output
@property
def input(self):
return self._input
@property
def route(self):
return self._route
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.output == other.output and self.input == other.input and self.route == other.route)
def __repr__(self):
return "LinkQualityAndRouteData(ouput={}, input={}, route={})".format(self.output, self.input, self.route)
class LinkQualityAndRouteDataFactory:
def parse(self, data, message_info):
lqrd = ord(data.read(1))
output = (lqrd >> 6) & 0x3
_input = (lqrd >> 4) & 0x3
route = lqrd & 0x0F
return LinkQualityAndRouteData(output, _input, route)
class Route64(object):
def __init__(self, id_sequence, router_id_mask, link_quality_and_route_data):
self._id_sequence = id_sequence
self._router_id_mask = router_id_mask
self._link_quality_and_route_data = link_quality_and_route_data
@property
def id_sequence(self):
return self._id_sequence
@property
def router_id_mask(self):
return self._router_id_mask
@property
def link_quality_and_route_data(self):
return self._link_quality_and_route_data
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.id_sequence == other.id_sequence and self.router_id_mask == other.router_id_mask and
self.link_quality_and_route_data == other.link_quality_and_route_data)
def __repr__(self):
lqrd_str = ", ".join(["{}".format(lqrd) for lqrd in self.link_quality_and_route_data])
return "Route64(id_sequence={}, router_id_mask={}, link_quality_and_route_data=[{}])".format(
self.id_sequence, hex(self.router_id_mask), lqrd_str)
class Route64Factory:
def __init__(self, link_quality_and_route_data_factory):
self._lqrd_factory = link_quality_and_route_data_factory
def parse(self, data, message_info):
id_sequence = ord(data.read(1))
router_id_mask = struct.unpack(">Q", data.read(8))[0]
link_quality_and_route_data = []
while data.tell() < len(data.getvalue()):
link_quality_and_route_data.append(self._lqrd_factory.parse(data, message_info))
return Route64(id_sequence, router_id_mask, link_quality_and_route_data)
class Address16(object):
def __init__(self, address):
self._address = address
@property
def address(self):
return self._address
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.address == other.address
def __repr__(self):
return "Address16(address={})".format(hex(self.address))
class Address16Factory:
def parse(self, data, message_info):
address = struct.unpack(">H", data.read(2))[0]
return Address16(address)
class LeaderData(object):
def __init__(
self,
partition_id,
weighting,
data_version,
stable_data_version,
leader_router_id,
):
self._partition_id = partition_id
self._weighting = weighting
self._data_version = data_version
self._stable_data_version = stable_data_version
self._leader_router_id = leader_router_id
@property
def partition_id(self):
return self._partition_id
@property
def weighting(self):
return self._weighting
@property
def data_version(self):
return self._data_version
@property
def stable_data_version(self):
return self._stable_data_version
@property
def leader_router_id(self):
return self._leader_router_id
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.partition_id == other.partition_id and self.weighting == other.weighting and
self.data_version == other.data_version and self.stable_data_version == other.stable_data_version and
self.leader_router_id == other.leader_router_id)
def __repr__(self):
return 'LeaderData(partition_id={}, weighting={}, data_version={}, stable_data_version={},leader_router_id={}'.format(
self.partition_id,
self.weighting,
self.data_version,
self.stable_data_version,
self.leader_router_id,
)
class LeaderDataFactory:
def parse(self, data, message_info):
partition_id = struct.unpack(">I", data.read(4))[0]
weighting = ord(data.read(1))
data_version = ord(data.read(1))
stable_data_version = ord(data.read(1))
leader_router_id = ord(data.read(1))
return LeaderData(
partition_id,
weighting,
data_version,
stable_data_version,
leader_router_id,
)
class NetworkData(object):
def __init__(self, tlvs):
self._tlvs = tlvs
@property
def tlvs(self):
return self._tlvs
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.tlvs == other.tlvs
def __repr__(self):
tlvs_str = ", ".join(["{}".format(tlv) for tlv in self.tlvs])
return "NetworkData(tlvs=[{}])".format(tlvs_str)
class NetworkDataFactory:
def __init__(self, network_data_tlvs_factory):
self._tlvs_factory = network_data_tlvs_factory
def parse(self, data, message_info):
tlvs = self._tlvs_factory.parse(data, message_info)
return NetworkData(tlvs)
class TlvRequest(object):
def __init__(self, tlvs):
self._tlvs = tlvs
@property
def tlvs(self):
return self._tlvs
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.tlvs == other.tlvs
def __repr__(self):
tlvs_str = ", ".join(["{}".format(tlv) for tlv in self.tlvs])
return "TlvRequest(tlvs=[{}])".format(tlvs_str)
class TlvRequestFactory:
def parse(self, data, message_info):
tlvs = [b for b in bytearray(data.read())]
return TlvRequest(tlvs)
class ScanMask(object):
def __init__(self, router, end_device):
self._router = router
self._end_device = end_device
@property
def router(self):
return self._router
@property
def end_device(self):
return self._end_device
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.router == other.router and self.end_device == other.end_device)
def __repr__(self):
return "ScanMask(router={}, end_device={})".format(self.router, self.end_device)
class ScanMaskFactory:
def parse(self, data, message_info):
scan_mask = ord(data.read(1))
router = (scan_mask >> 7) & 0x01
end_device = (scan_mask >> 6) & 0x01
return ScanMask(router, end_device)
class Connectivity(object):
def __init__(
self,
pp_byte,
link_quality_3,
link_quality_2,
link_quality_1,
leader_cost,
id_sequence,
active_routers,
sed_buffer_size=None,
sed_datagram_count=None,
):
self._pp_byte = pp_byte
self._link_quality_3 = link_quality_3
self._link_quality_2 = link_quality_2
self._link_quality_1 = link_quality_1
self._leader_cost = leader_cost
self._id_sequence = id_sequence
self._active_routers = active_routers
self._sed_buffer_size = sed_buffer_size
self._sed_datagram_count = sed_datagram_count
@property
def pp_byte(self):
return self._pp_byte
@property
def pp(self):
return common.map_pp(self._pp_byte)
@property
def link_quality_3(self):
return self._link_quality_3
@property
def link_quality_2(self):
return self._link_quality_2
@property
def link_quality_1(self):
return self._link_quality_1
@property
def leader_cost(self):
return self._leader_cost
@property
def id_sequence(self):
return self._id_sequence
@property
def active_routers(self):
return self._active_routers
@property
def sed_buffer_size(self):
return self._sed_buffer_size
@property
def sed_datagram_count(self):
return self._sed_datagram_count
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.pp == other.pp and self.link_quality_3 == other.link_quality_3 and
self.link_quality_2 == other.link_quality_2 and self.link_quality_1 == other.link_quality_1 and
self.leader_cost == other.leader_cost and self.id_sequence == other.id_sequence and
self.active_routers == other.active_routers and self.sed_buffer_size == other.sed_buffer_size and
self.sed_datagram_count == other.sed_datagram_count)
def __repr__(self):
return r"Connectivity(pp={}, \
link_quality_3={}, \
link_quality_2={}, \
link_quality_1={}, \
leader_cost={}, \
id_sequence={}, \
active_routers={}, \
sed_buffer_size={}, \
sed_datagram_count={})".format(
self.pp,
self.link_quality_3,
self.link_quality_2,
self.link_quality_1,
self.leader_cost,
self.id_sequence,
self.active_routers,
self.sed_buffer_size,
self.sed_datagram_count,
)
class ConnectivityFactory:
def parse(self, data, message_info):
pp_byte = ord(data.read(1))
link_quality_3 = ord(data.read(1))
link_quality_2 = ord(data.read(1))
link_quality_1 = ord(data.read(1))
leader_cost = ord(data.read(1))
id_sequence = ord(data.read(1))
active_routers = ord(data.read(1))
sed_data = io.BytesIO(data.read(3))
if len(sed_data.getvalue()) > 0:
sed_buffer_size = struct.unpack(">H", sed_data.read(2))[0]
sed_datagram_count = ord(sed_data.read(1))
else:
sed_buffer_size = None
sed_datagram_count = None
return Connectivity(
pp_byte,
link_quality_3,
link_quality_2,
link_quality_1,
leader_cost,
id_sequence,
active_routers,
sed_buffer_size,
sed_datagram_count,
)
class LinkMargin(object):
def __init__(self, link_margin):
self._link_margin = link_margin
@property
def link_margin(self):
return self._link_margin
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.link_margin == other.link_margin
def __repr__(self):
return "LinkMargin(link_margin={})".format(self.link_margin)
class LinkMarginFactory:
def parse(self, data, message_info):
link_margin = ord(data.read(1))
return LinkMargin(link_margin)
class Status(object):
def __init__(self, status):
self._status = status
@property
def status(self):
return self._status
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.status == other.status
def __repr__(self):
return "Status(status={})".format(self.status)
class StatusFactory:
def parse(self, data, message_info):
status = ord(data.read(1))
return Status(status)
class Version(object):
def __init__(self, version):
self._version = version
@property
def version(self):
return self._version
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.version == other.version
def __repr__(self):
return "Version(version={})".format(self.version)
class VersionFactory:
def parse(self, data, message_info):
version = struct.unpack(">H", data.read(2))[0]
return Version(version)
class AddressFull(object):
def __init__(self, ipv6_address):
self._ipv6_address = ipv6_address
@property
def ipv6_address(self):
return self._ipv6_address
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.ipv6_address == other.ipv6_address
def __repr__(self):
return "AddressFull(ipv6_address={}')".format(hexlify(self.ipv6_address))
class AddressFullFactory:
def parse(self, data, message_info):
data.read(1) # first byte is ignored
ipv6_address = data.read(16)
return AddressFull(ipv6_address)
class AddressCompressed(object):
def __init__(self, cid, iid):
self._cid = cid
self._iid = iid
@property
def cid(self):
return self._cid
@property
def iid(self):
return self._iid
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.cid == other.cid and self.iid == other.iid
def __repr__(self):
return "AddressCompressed(cid={}, iid={}')".format(self.cid, hexlify(self.iid))
class AddressCompressedFactory:
def parse(self, data, message_info):
cid = ord(data.read(1)) & 0x0F
iid = bytearray(data.read(8))
return AddressCompressed(cid, iid)
class AddressRegistration(object):
def __init__(self, addresses):
self._addresses = addresses
@property
def addresses(self):
return self._addresses
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.addresses == other.addresses
def __repr__(self):
addresses_str = ", ".join(["{}".format(address) for address in self.addresses])
return "AddressRegistration(addresses=[{}])".format(addresses_str)
class AddressRegistrationFactory:
def __init__(self, addr_compressed_factory, addr_full_factory):
self._addr_compressed_factory = addr_compressed_factory
self._addr_full_factory = addr_full_factory
def parse(self, data, message_info):
addresses = []
while data.tell() < len(data.getvalue()):
compressed = (ord(data.read(1)) >> 7) & 0x01
data.seek(-1, io.SEEK_CUR)
if compressed:
addresses.append(self._addr_compressed_factory.parse(data, message_info))
else:
addresses.append(self._addr_full_factory.parse(data, message_info))
return AddressRegistration(addresses)
class Channel(object):
def __init__(self, channel_page, channel):
self._channel_page = channel_page
self._channel = channel
@property
def channel_page(self):
return self._channel_page
@property
def channel(self):
return self._channel
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.channel_page == other.channel_page and self.channel == other.channel)
def __repr__(self):
return "Channel(channel_page={}, channel={})".format(self.channel_page, self.channel)
class ChannelFactory:
def parse(self, data, message_info):
channel_page = ord(data.read(1))
channel = struct.unpack(">H", data.read(2))[0]
return Channel(channel_page, channel)
class PanId:
def __init__(self, pan_id):
self._pan_id = pan_id
@property
def pan_id(self):
return self._pan_id
def __eq__(self, other):
common.expect_the_same_class(self, other)
return self.pan_id == other.pan_id
def __repr__(self):
return "PanId(pan_id={})".format(self.pan_id)
class PanIdFactory:
def parse(self, data, message_info):
pan_id = struct.unpack(">H", data.read(2))[0]
return PanId(pan_id)
class ActiveTimestamp(object):
def __init__(self, timestamp_seconds, timestamp_ticks, u):
self._timestamp_seconds = timestamp_seconds
self._timestamp_ticks = timestamp_ticks
self._u = u
@property
def timestamp_seconds(self):
return self._timestamp_seconds
@property
def timestamp_ticks(self):
return self._timestamp_ticks
@property
def u(self):
return self._u
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.timestamp_seconds == other.timestamp_seconds and self.timestamp_ticks == other.timestamp_ticks and
self.u == other.u)
def __repr__(self):
return "ActiveTimestamp(timestamp_seconds={}, timestamp_ticks={}, u={})".format(
self.timestamp_seconds, self.timestamp_ticks, self.u)
class ActiveTimestampFactory:
def parse(self, data, message_info):
seconds = bytearray([0x00, 0x00]) + bytearray(data.read(6))
ticks = struct.unpack(">H", data.read(2))[0]
timestamp_seconds = struct.unpack(">Q", bytes(seconds))[0]
timestamp_ticks = ticks >> 1
u = ticks & 0x01
return ActiveTimestamp(timestamp_seconds, timestamp_ticks, u)
class PendingTimestamp(object):
def __init__(self, timestamp_seconds, timestamp_ticks, u):
self._timestamp_seconds = timestamp_seconds
self._timestamp_ticks = timestamp_ticks
self._u = u
@property
def timestamp_seconds(self):
return self._timestamp_seconds
@property
def timestamp_ticks(self):
return self._timestamp_ticks
@property
def u(self):
return self._u
def __eq__(self, other):
common.expect_the_same_class(self, other)
return (self.timestamp_seconds == other.timestamp_seconds and self.timestamp_ticks == other.timestamp_ticks and
self.u == other.u)
def __repr__(self):
return "PendingTimestamp(timestamp_seconds={}, timestamp_ticks={}, u={})".format(
self.timestamp_seconds, self.timestamp_ticks, self.u)
class PendingTimestampFactory:
def parse(self, data, message_info):
seconds = bytearray([0x00, 0x00]) + bytearray(data.read(6))
ticks = struct.unpack(">H", data.read(2))[0]
timestamp_seconds = struct.unpack(">Q", bytes(seconds))[0]
timestamp_ticks = ticks >> 1
u = ticks & 0x01
return PendingTimestamp(timestamp_seconds, timestamp_ticks, u)
class ActiveOperationalDataset:
# TODO: Not implemented yet
def __init__(self):
print("ActiveOperationalDataset is not implemented yet.")
class ActiveOperationalDatasetFactory:
def parse(self, data, message_info):
return ActiveOperationalDataset()
class PendingOperationalDataset:
# TODO: Not implemented yet
def __init__(self):
print("PendingOperationalDataset is not implemented yet.")
class PendingOperationalDatasetFactory:
def parse(self, data, message_info):
return PendingOperationalDataset()
class ThreadDiscovery(object):
def __init__(self, tlvs):
self._tlvs = tlvs
@property
def tlvs(self):
return self._tlvs
def __eq__(self, other):
return self.tlvs == other.tlvs
def __repr__(self):
return "ThreadDiscovery(tlvs={})".format(self.tlvs)
class ThreadDiscoveryFactory:
def __init__(self, thread_discovery_tlvs_factory):
self._tlvs_factory = thread_discovery_tlvs_factory
def parse(self, data, message_info):
tlvs = self._tlvs_factory.parse(data, message_info)
return ThreadDiscovery(tlvs)
class CslChannel:
# TODO: Not implemented yet
def __init__(self):
print("CslChannel is not implemented yet.")
class CslChannelFactory:
# TODO: Not implemented yet
def parse(self, data, message_info):
return CslChannel()
class CslSynchronizedTimeout:
# TODO: Not implemented yet
def __init__(self):
print("CslSynchronizedTimeout is not implemented yet.")
class CslSynchronizedTimeoutFactory:
def parse(self, data, message_info):
return CslSynchronizedTimeout()
class TimeRequest:
# TODO: Not implemented yet
def __init__(self):
print("TimeRequest is not implemented yet.")
class TimeRequestFactory:
def parse(self, data, message_info):
return TimeRequest()
class TimeParameter:
# TODO: Not implemented yet
def __init__(self):
print("TimeParameter is not implemented yet.")
class TimeParameterFactory:
def parse(self, data, message_info):
return TimeParameter()
class LinkMetricsQuery:
# TODO: Not implemented yet
def __init__(self):
print("LinkMetricsQuery is not implemented yet.")
class LinkMetricsQueryFactory:
def parse(self, data, message_info):
return LinkMetricsQuery()
class LinkMetricsManagement:
# TODO: Not implemented yet
def __init__(self):
print("LinkMetricsManagement is not implemented yet.")
class LinkMetricsManagementFactory:
def parse(self, data, message_info):
return LinkMetricsManagement()
class LinkMetricsReport:
# TODO: Not implemented yet
def __init__(self):
print("LinkMetricsReport is not implemented yet.")
class LinkMetricsReportFactory:
def parse(self, data, message_info):
return LinkMetricsReport()
class LinkProbe:
# TODO: Not implemented yet
def __init__(self):
print("LinkProbe is not implemented yet.")
class LinkProbeFactory:
def parse(self, data, message_info):
return LinkProbe()
class MleCommand(object):
def __init__(self, _type, tlvs):
self._type = _type
self._tlvs = tlvs
@property
def type(self):
return self._type
@property
def tlvs(self):
return self._tlvs
def __repr__(self):
tlvs_str = ", ".join(["{}".format(tlv) for tlv in self.tlvs])
return "MleCommand(type={}, tlvs=[{}])".format(self.type.name, tlvs_str)
class MleCommandFactory:
_MARKER_EXTENDED_LENGTH = 0xff
def __init__(self, tlvs_factories):
self._tlvs_factories = tlvs_factories
def _get_length(self, data):
length = ord(data.read(1))
if length == self._MARKER_EXTENDED_LENGTH:
length = struct.unpack(">H", data.read(2))[0]
return length
def _get_tlv_factory(self, _type):
try:
return self._tlvs_factories[_type]
except KeyError:
logging.error('Could not find TLV factory. Unsupported TLV type: {}'.format(_type))
return UnknownTlvFactory(_type)
def _parse_tlv(self, data, message_info):
_type = TlvType(ord(data.read(1)))
length = self._get_length(data)
value = data.read(length)
factory = self._get_tlv_factory(_type)
return factory.parse(io.BytesIO(value), message_info)
def parse(self, data, message_info):
cmd_type = CommandType(ord(data.read(1)))
tlvs = []
while data.tell() < len(data.getvalue()):
tlv = self._parse_tlv(data, message_info)
tlvs.append(tlv)
return MleCommand(cmd_type, tlvs)
class MleMessage(object):
def __init__(self, command):
self._command = command
@property
def command(self):
return self._command
def __repr__(self):
return "MleMessage(command={})".format(self.command)
class MleMessageSecured(MleMessage):
def __init__(self, aux_sec_hdr, command, mic):
super(MleMessageSecured, self).__init__(command)
self._aux_sec_hdr = aux_sec_hdr
self._mic = mic
@property
def aux_sec_hdr(self):
return self._aux_sec_hdr
@property
def mic(self):
return self._mic
def __repr__(self):
return "MleMessageSecured(aux_sec_hdr={}, command={}, mic=\"{}\")".format(self.aux_sec_hdr, self.command,
hexlify(self.mic))
class MleMessageFactory:
def __init__(self, aux_sec_hdr_factory, mle_command_factory, crypto_engine):
self._aux_sec_hdr_factory = aux_sec_hdr_factory
self._mle_command_factory = mle_command_factory
self._crypto_engine = crypto_engine
def _create_mle_secured_message(self, data, message_info):
aux_sec_hdr = self._aux_sec_hdr_factory.parse(data, message_info)
enc_data_length = len(data.getvalue())
enc_data = bytearray(data.read(enc_data_length - data.tell() - self._crypto_engine.mic_length))
mic = bytearray(data.read())
dec_data = self._crypto_engine.decrypt(enc_data, mic, message_info)
command = self._mle_command_factory.parse(io.BytesIO(dec_data), message_info)
return MleMessageSecured(aux_sec_hdr, command, mic)
def _create_mle_message(self, data, message_info):
command = self._mle_command_factory.parse(data, message_info)
return MleMessage(command)
def parse(self, data, message_info):
security_indicator = ord(data.read(1))
if security_indicator == 0:
return self._create_mle_secured_message(data, message_info)
elif security_indicator == 255:
return self._create_mle_message(data, message_info)
else:
raise RuntimeError(
"Could not create MLE message. Unknown security indicator value: {}".format(security_indicator))
|
|
# ==============================================================================
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from __future__ import print_function
import os
import argparse
import math
import cntk
import numpy as np
import cntk as C
from cntk.logging import *
from cntk import input, cross_entropy_with_softmax, classification_error
from cntk import Trainer, cntk_py
from cntk.learners import momentum_sgd, learning_rate_schedule, momentum_as_time_constant_schedule, UnitType
from cntk.debugging import set_computation_network_trace_level
from cntk.device import try_set_default_device, gpu
from cntk import data_parallel_distributed_learner, block_momentum_distributed_learner, Communicator
from cntk.train.training_session import *
from cntk.debugging import *
from resnet_models import *
# Paths relative to current python file.
abs_path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(abs_path, "..", "..", "..", "DataSets", "CIFAR-10")
model_path = os.path.join(abs_path, "Models")
# For this example we are using the same data source as for conv net - CIFAR
sys.path.append(os.path.join(abs_path, "..", "..", "ConvNet", "Python"))
from ConvNet_CIFAR10_DataAug_Distributed import create_image_mb_source
# model dimensions - these match the ones from convnet_cifar10_dataaug
# so we can use the same data source
image_height = 32
image_width = 32
num_channels = 3 # RGB
num_classes = 10
model_name = "ResNet_CIFAR10_DataAug.model"
# Create network
def create_resnet_network(network_name):
# Input variables denoting the features and label data
input_var = C.input_variable((num_channels, image_height, image_width))
label_var = C.input_variable((num_classes))
# create model, and configure learning parameters
if network_name == 'resnet20':
z = create_cifar10_model(input_var, 3, num_classes)
elif network_name == 'resnet110':
z = create_cifar10_model(input_var, 18, num_classes)
else:
return RuntimeError("Unknown model name!")
# loss and metric
ce = cross_entropy_with_softmax(z, label_var)
pe = classification_error(z, label_var)
return {
'name' : network_name,
'feature': input_var,
'label': label_var,
'ce' : ce,
'pe' : pe,
'output': z
}
# Create trainer
def create_trainer(network, minibatch_size, epoch_size, num_quantization_bits, block_size, warm_up, progress_printer):
if network['name'] == 'resnet20':
lr_per_mb = [1.0]*80+[0.1]*40+[0.01]
elif network['name'] == 'resnet110':
lr_per_mb = [0.1]*1+[1.0]*80+[0.1]*40+[0.01]
else:
return RuntimeError("Unknown model name!")
momentum_time_constant = -minibatch_size/np.log(0.9)
l2_reg_weight = 0.0001
# Set learning parameters
lr_per_sample = [lr/minibatch_size for lr in lr_per_mb]
lr_schedule = learning_rate_schedule(lr_per_sample, epoch_size=epoch_size, unit=UnitType.sample)
mm_schedule = momentum_as_time_constant_schedule(momentum_time_constant)
# learner object
if block_size != None and num_quantization_bits != 32:
raise RuntimeError("Block momentum cannot be used with quantization, please remove quantized_bits option.")
local_learner = momentum_sgd(network['output'].parameters, lr_schedule, mm_schedule,
l2_regularization_weight = l2_reg_weight)
if block_size != None:
learner = block_momentum_distributed_learner(local_learner, block_size=block_size)
else:
learner = data_parallel_distributed_learner(local_learner, num_quantization_bits=num_quantization_bits, distributed_after=warm_up)
return Trainer(network['output'], (network['ce'], network['pe']), learner, progress_printer)
# Train and test
def train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore, profiling=False):
# define mapping from intput streams to network inputs
input_map = {
network['feature']: train_source.streams.features,
network['label']: train_source.streams.labels
}
if profiling:
start_profiler(sync_gpu=True)
training_session(
trainer=trainer, mb_source = train_source,
mb_size = minibatch_size,
model_inputs_to_streams = input_map,
checkpoint_config = CheckpointConfig(filename = os.path.join(model_path, model_name), restore=restore),
progress_frequency=epoch_size,
test_config = TestConfig(test_source, minibatch_size=16)
).train()
if profiling:
stop_profiler()
# Train and evaluate the network.
def resnet_cifar10(train_data, test_data, mean_data, network_name, epoch_size, num_quantization_bits=32, block_size=3200, warm_up=0,
max_epochs=5, restore=True, log_to_file=None, num_mbs_per_log=None, gen_heartbeat=False, scale_up=False, profiling=False):
set_computation_network_trace_level(0)
# NOTE: scaling up minibatch_size increases sample throughput. In 8-GPU machine,
# ResNet110 samples-per-second is ~7x of single GPU, comparing to ~3x without scaling
# up. However, bigger minimatch size on the same number of samples means less updates,
# thus leads to higher training error. This is a trade-off of speed and accuracy
minibatch_size = 128 * (Communicator.num_workers() if scale_up else 1)
progress_printer = ProgressPrinter(
freq=num_mbs_per_log,
tag='Training',
log_to_file=log_to_file,
rank=Communicator.rank(),
gen_heartbeat=gen_heartbeat,
num_epochs=max_epochs)
network = create_resnet_network(network_name)
trainer = create_trainer(network, minibatch_size, epoch_size, num_quantization_bits, block_size, warm_up, progress_printer)
train_source = create_image_mb_source(train_data, mean_data, train=True, total_number_of_samples=max_epochs * epoch_size)
test_source = create_image_mb_source(test_data, mean_data, train=False, total_number_of_samples=cntk.io.FULL_DATA_SWEEP)
train_and_test(network, trainer, train_source, test_source, minibatch_size, epoch_size, restore, profiling)
if __name__=='__main__':
data_path = os.path.join(abs_path, "..", "..", "..", "DataSets", "CIFAR-10")
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--network', help='network type, resnet20 or resnet110', required=False, default='resnet20')
parser.add_argument('-s', '--scale_up', help='scale up minibatch size with #workers for better parallelism', type=bool, required=False, default='False')
parser.add_argument('-datadir', '--datadir', help='Data directory where the CIFAR dataset is located', required=False, default=data_path)
parser.add_argument('-outputdir', '--outputdir', help='Output directory for checkpoints and models', required=False, default=None)
parser.add_argument('-logdir', '--logdir', help='Log file', required=False, default=None)
parser.add_argument('-e', '--epochs', help='Total number of epochs to train', type=int, required=False, default='160')
parser.add_argument('-es', '--epoch_size', help='Size of epoch in samples', type=int, required=False, default=None)
parser.add_argument('-q', '--quantized_bits', help='Number of quantized bits used for gradient aggregation', type=int, required=False, default='32')
parser.add_argument('-b', '--block_samples', type=int, help="Number of samples per block for block momentum (BM) distributed learner (if 0 BM learner is not used)", required=False, default=None)
parser.add_argument('-a', '--distributed_after', help='Number of samples to train with before running distributed', type=int, required=False, default='0')
parser.add_argument('-r', '--restart', help='Indicating whether to restart from scratch (instead of restart from checkpoint file by default)', action='store_true')
parser.add_argument('-device', '--device', type=int, help="Force to run the script on a specified device", required=False, default=None)
parser.add_argument('-profile', '--profile', help="Turn on profiling", action='store_true', default=False)
args = vars(parser.parse_args())
epoch_size = 50000
if args['outputdir'] != None:
model_path = args['outputdir'] + "/models"
if args['device'] != None:
try_set_default_device(gpu(args['device']))
if args['epoch_size'] is not None:
epoch_size = args['epoch_size']
data_path = args['datadir']
if not os.path.isdir(data_path):
raise RuntimeError("Directory %s does not exist" % data_path)
mean_data=os.path.join(data_path, 'CIFAR-10_mean.xml')
train_data=os.path.join(data_path, 'train_map.txt')
test_data=os.path.join(data_path, 'test_map.txt')
num_quantization_bits = args['quantized_bits']
epochs = args['epochs']
warm_up = args['distributed_after']
network_name = args['network']
scale_up = bool(args['scale_up'])
# Create distributed trainer factory
print("Start training: quantize_bit = {}, epochs = {}, distributed_after = {}".format(num_quantization_bits, epochs, warm_up))
resnet_cifar10(train_data, test_data, mean_data,
network_name,
epoch_size,
num_quantization_bits,
block_size=args['block_samples'],
warm_up=args['distributed_after'],
max_epochs=epochs,
restore=not args['restart'],
scale_up=scale_up,
log_to_file=args['logdir'],
profiling=args['profile'])
# Must call MPI finalize when process exit without exceptions
Communicator.finalize()
|
|
# License for code in this file that was taken from Python 2.5.
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python
# alone or in any derivative version, provided, however, that PSF's
# License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
# 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation;
# All Rights Reserved" are retained in Python alone or in any derivative
# version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
### Begin from Python 2.5 functools.py ########################################
# Summary of changes made to the Python 2.5 code below:
# * swapped ``partial`` for ``curry`` to maintain backwards-compatibility
# in Django.
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation.
# All Rights Reserved.
###############################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr))
# Return the wrapper so this can be used as a decorator via curry()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying curry() to
update_wrapper().
"""
return curry(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
### End from Python 2.5 functools.py ##########################################
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wraps(func)(wrapper)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(self.__func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for (k, v) in resultclass.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_str = str in resultclasses
cls._delegate_unicode = unicode in resultclasses
assert not (cls._delegate_str and cls._delegate_unicode), "Cannot call lazy() with both str and unicode return types."
if cls._delegate_unicode:
cls.__unicode__ = cls.__unicode_cast
elif cls._delegate_str:
cls.__str__ = cls.__str_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, func):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = self.__func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = func
return __wrapper__
__promise__ = classmethod(__promise__)
def __unicode_cast(self):
return self.__func(*self.__args, **self.__kw)
def __str_cast(self):
return str(self.__func(*self.__args, **self.__kw))
def __cmp__(self, rhs):
if self._delegate_str:
s = str(self.__func(*self.__args, **self.__kw))
elif self._delegate_unicode:
s = unicode(self.__func(*self.__args, **self.__kw))
else:
s = self.__func(*self.__args, **self.__kw)
if isinstance(rhs, Promise):
return -cmp(rhs, s)
else:
return cmp(s, rhs)
def __mod__(self, rhs):
if self._delegate_str:
return str(self) % rhs
elif self._delegate_unicode:
return unicode(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return wraps(func)(__wrapper__)
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
def wrapper(*args, **kwargs):
for arg in list(args) + kwargs.values():
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wraps(func)(wrapper)
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
def __init__(self):
self._wrapped = None
def __getattr__(self, name):
if self._wrapped is None:
self._setup()
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is None:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is None:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# introspection support:
__members__ = property(lambda self: self.__dir__())
def __dir__(self):
if self._wrapped is None:
self._setup()
return dir(self._wrapped)
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
# For some reason, we have to inline LazyObject.__init__ here to avoid
# recursion
self._wrapped = None
def __str__(self):
if self._wrapped is None: self._setup()
return str(self._wrapped)
def __unicode__(self):
if self._wrapped is None: self._setup()
return unicode(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is None:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
# Changed to use deepcopy from copycompat, instead of copy
# For Python 2.4.
from airy.utils.copycompat import deepcopy
return deepcopy(self._wrapped, memo)
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
def __get_class(self):
if self._wrapped is None: self._setup()
return self._wrapped.__class__
__class__ = property(__get_class)
def __eq__(self, other):
if self._wrapped is None: self._setup()
return self._wrapped == other
def __hash__(self):
if self._wrapped is None: self._setup()
return hash(self._wrapped)
def _setup(self):
self._wrapped = self._setupfunc()
|
|
"""Location for database handling codes."""
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sqlalchemy as sa
from matplotlib import colors, ticker, cm
from sqlalchemy import and_
import simulators
from mingle.utilities import chi2_at_sigma
from mingle.utilities.param_file import parse_paramfile
from mingle.utilities.phoenix_utils import closest_model_params
from simulators.iam_module import target_params
class DBExtractor(object):
"""Methods for extracting the relevant code out of database table."""
def __init__(self, table):
self.table = table
self.cols = table.c
self.bind = self.table.metadata.bind
def simple_extraction(self, columns, limit=-1):
"""Simple table extraction, cols provided as list
col: list of string
limit: int (optional) default=10000
Returns as pandas DataFrame.
"""
table_columns = [self.cols[c] for c in columns]
df = pd.read_sql(
sa.select(table_columns).limit(limit), self.bind)
return df
def fixed_extraction(self, columns, fixed, limit=-1):
"""Table extraction with fixed value contitions.
col: list of string
Columns to return
fixed: dict(key, value)
limit: int (optional) default=10000
Returns as pandas DataFrame.
"""
assert isinstance(fixed, dict)
table_columns = [self.cols[c] for c in columns]
conditions = and_(self.cols[key] == value for key, value in fixed.items())
df = pd.read_sql(
sa.select(table_columns).where(conditions).limit(limit), self.bind)
return df
def ordered_extraction(self, order_by, columns=None, limit=-1, asc=True):
"""Table extraction with fixed value contitions.
order_by: string
Column name to order by.
columns: list of strings
Columns to return, default=None returns all.
limit: int (optional) default=10000
Returns as pandas dataframe.
"""
if columns is not None:
table_columns = [self.cols[c] for c in columns]
else:
table_columns = self.cols
if asc:
df = pd.read_sql(
sa.select(table_columns).order_by(
self.cols[order_by].asc()).limit(limit), self.bind)
else:
df = pd.read_sql(
sa.select(table_columns).order_by(
self.cols[order_by].desc()).limit(limit), self.bind)
return df
def fixed_ordered_extraction(self, columns, fixed, order_by, limit=-1, asc=True):
"""Table extraction with fixed value contitions.
col: list of string
Columns to return
fixed: dict(key, value)
order_by: string
Column name to order by.
limit: int (optional) default=10000
Returns as pandas dataframe.
"""
assert isinstance(fixed, dict)
table_columns = [self.cols[c] for c in columns]
conditions = and_(self.cols[key] == value for key, value in fixed.items())
if asc:
df = pd.read_sql(
sa.select(table_columns).where(conditions).order_by(
self.cols[order_by].asc()).limit(limit), self.bind)
else:
df = pd.read_sql(
sa.select(table_columns).where(conditions).order_by(
self.cols[order_by].desc()).limit(limit), self.bind)
return df
def minimum_value_of(self, column):
"""Return only the entry for the minimum column value, limited to one value.
"""
selection = self.cols[column]
df = pd.read_sql(
sa.select(self.cols).order_by(selection.asc()).limit(1), self.bind)
return df
def full_extraction(self):
"""Return Full database:"""
import warnings
warnings.warn("Loading in a database may cause memory cap issues.")
return pd.read_sql(sa.select(self.table.c), self.bind)
class SingleSimReader(object):
def __init__(self, base=".",
name="BSBHMNOISE",
prefix="", mode="bhm", chi2_val="coadd_chi2"):
self.base = base
self.name = name.upper()
self.prefix = prefix.upper()
if mode in ["iam", "tcm", "bhm"]:
self.mode = mode
else:
raise ValueError("Invalid SimReader mode")
if chi2_val in ["chi2_1", "chi2_2", "chi2_3", "chi2_4", "coadd_chi2"]:
self.chi2_val = chi2_val
else:
raise ValueError("Invalid chi2_val.")
def list_sims(self):
return glob.glob(os.path.join(self.base, "*"))
def load_df(self, params=["teff_1", "teff_2", "logg_1", "feh_1"]):
params.append(self.chi2_val)
table = self.get_table()
# print(table.c)
params = [table.c[p] for p in params]
dbdf = pd.read_sql(sa.select(params).order_by(table.c[self.chi2_val].asc()), table.metadata.bind)
# Coerce to be numeric columns
c = dbdf.columns[dbdf.dtypes.eq(object)]
dbdf[c] = dbdf[c].apply(pd.to_numeric, errors='coerce', axis=0)
return dbdf
def get_table(self):
starname = self.name
directory = os.path.join(self.base, starname, self.mode)
# print(directory)
dbs = glob.glob(os.path.join(directory, "*_coadd_{}_chisqr_results.db".format(self.mode)))
# print(dbs)
assert len(dbs) == 1, print(len(dbs))
dbname = dbs[0]
table = load_sql_table(dbname, verbose=False, echo=False)
return table
def params(self):
"""Get params from param file."""
if simulators.paths["parameters"].startswith("."):
param_file = os.path.join(self.base, "../", simulators.paths["parameters"],
"{}_params.dat".format(self.name))
else:
param_file = os.path.join(simulators.paths["parameters"], "{}_params.dat".format(self.name))
params = parse_paramfile(param_file, path=None)
print(params)
print("self mode", self.mode)
if self.mode == "bhm":
host_params = target_params(params, mode=self.mode)
closest_host_model = closest_model_params(*host_params) # unpack temp, logg, fe_h with *
else:
host_params, comp_params = target_params(params, mode=self.mode)
closest_host_model = closest_model_params(*host_params) # unpack temp, logg, fe_h with *
closest_comp_model = closest_model_params(*comp_params)
params.update(
{"teff_2": closest_comp_model[0], "logg_2": closest_comp_model[1], "feh_2": closest_comp_model[2]})
params.update(
{"teff_1": closest_host_model[0], "logg_1": closest_host_model[1], "feh_1": closest_host_model[2]})
return params
def df_contour(df, xcol, ycol, zcol, df_min, lim_params, correct=None, logscale=False, dof=1):
df_lim = df.copy()
for param in lim_params:
df_lim = df_lim[df_lim[param] == df_min[param].values[0]]
dfpivot = df_lim.pivot(xcol, ycol, zcol)
Y = dfpivot.columns.values
X = dfpivot.index.values
Z = dfpivot.values
print(X, Y, Z.shape)
x, y = np.meshgrid(X, Y, indexing="ij")
fig, ax = plt.subplots()
if logscale:
c = ax.contourf(x, y, Z, locator=ticker.LogLocator(), cmap=cm.viridis)
else:
c = ax.contourf(x, y, Z, cmap=cm.viridis)
# Chi levels values
print("Using chisquare dof=", dof)
sigmas = [Z.ravel()[Z.argmin()] + chi2_at_sigma(sigma, dof=dof) for sigma in range(1, 6)]
sigma_labels = {sigmas[sig - 1]: "${}-\sigma$".format(sig) for sig in range(1, 6)}
c2 = plt.contour(c, levels=sigmas)
plt.clabel(c2, fmt=sigma_labels, colors='w', fontsize=14)
cbar = plt.colorbar(c)
cbar.ax.set_ylabel(zcol)
plt.xlabel(xcol)
plt.ylabel(ycol)
if correct:
# Correct location of simulation
plt.plot(correct[xcol], correct[ycol], "ro", markersize=7)
# Mark minimum with a +.
min_i, min_j = divmod(Z.argmin(), Z.shape[1])
plt.plot(X[min_i], Y[min_j], "g*", markersize=7, label="$Min \chi^2$")
plt.show()
def decompose_database_name(database):
"""Database names of form */Star_obsnum_chip...db."""
os.path.split(database)
path, name = os.path.split(database)
name_split = name.split("_")
star, obsnum = name_split[0].split("-")
chip = name_split[1]
return path, star, obsnum, chip
def load_sql_table(database, name="chi2_table", echo=False, verbose=False):
sqlite_db = 'sqlite:///{0}'.format(database)
try:
engine = sa.create_engine(sqlite_db, echo=echo)
table_names = engine.table_names()
except Exception as e:
print("\nAccessing sqlite_db = {0}\n".format(sqlite_db))
print("cwd =", os.getcwd())
raise e
if verbose:
print("Table names in database =", engine.table_names())
if len(table_names) == 1:
tb_name = table_names[0]
else:
raise ValueError("Database does not just have 1 table. {0}, len={1}".format(table_names, len(table_names)))
if tb_name != name:
raise NameError("Name {0} given does not match table in database, {1}.".format(tb_name, table_names))
meta = sa.MetaData(bind=engine)
db_table = sa.Table(name, meta, autoload=True)
return db_table
|
|
#!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyreRing Utility Classes.
This package is the lib file for PyreRing
It contains the following classes:
PyreRingFrameworkAdaptor:
An abstract class. It defines the interface of a framework. All
frameworks which need to be able to plug into PyreRing need to implement
this adaptor's methods.
PyreRingSuiteRunner:
The runner class to invoke the actual test framework to run the test.
PRConfigParser:
The config parser to parser the content of script files to figure out some
running configuration for the test scripts.
"""
__author__ = '[email protected] (Mingyu Wu)'
import glob
import logging
import os
import sys
import tarfile
import traceback
from lib import common_util
from lib import filesystemhandlerextend
from lib import pyreringconfig
global_settings = pyreringconfig.GlobalPyreRingConfig.settings
logger = logging.getLogger('PyreRing')
DEBUG = common_util.DebugLog
START_SIGN = '# PR_START'
END_SIGN = '# PR_END'
BINARY_SUFFIXES = ['.par']
class PyreRingFrameworkAdaptor(object):
"""Abstract class defined the pyrering framework interface.
Any new framework which needs to plug into pyrering needs to implement this
adaptor. It defines the general methods will be invoked by runner.
"""
def Prepare(self):
"""This method should be implemented to prepare the framework to run.
Like prepare test directory, prepare test data. It will depend on the
framework to make up the mind.
"""
raise NotImplementedError('Prepare method not implemented.')
def CleanUp(self):
"""This method should be implemented to clean up after the test.
Like delete foot print, put things back, etc. It is up to the
implementation to decide what to clean.
"""
raise NotImplementedError('CleanUp method not implemented.')
def Run(self, suite_list, email_flag):
"""This method will be invoked to actualy run the test.
It should take a list of tests/suites as the argument and a flag for
sending email report.
Args:
suite_list: a list of test/suites understandable by the framework.
email_flag: a boolean for email result or not.
Returns:
None.
"""
raise NotImplementedError('Run method not implemented.')
def GetFrameworkName(self):
"""Get the framework name.
Ideally each instance should have a unique name. But this is not used at
current stage.
"""
raise NotImplementedError('GetFrameworkName method not implemented.')
def GetFrameworkType(self):
"""This should be implemented to return the framework type."""
raise NotImplementedError('GetFrameworkType method not implemented.')
def CheckFramework(self):
"""Check if the framework is avaiable to use.
Returns:
True if it is ready to use
"""
raise NotImplementedError('CheckFramework method not implemented.')
class PyreRingSuiteRunner(object):
"""This class is a univeral runner.
It is supposed to be constructed with any test framework which has implemented
PyreRingFrameworkAdaptor abstract class.
It takes care of populating the environment variables: PYTHONPATH and PATH,
feeding a list of test suites to the framework it is constructed with and
archiving test result files.
"""
def __init__(self, framework, suite_list):
"""Init the runner to a specific test framework.
The Runner will be inited to a given framework with a list of test suites
which are to be run by the framework.
Args:
framework: any test framework as long as it implements
PyreRingFramworkAdaptor.
suite_list: a list of test suites passed by user command line, the suite
names should be recognizable by the framework.
Returns:
None
"""
self.framework = framework
self.run_suite = suite_list
self.prop = global_settings
# This is the list of log or report types will be generated at report_dir.
# It will be used to do archiving and also clean up previous leftover.
self.report_types = ['.txt']
self.output_types = ['.out']
@DEBUG
def SetUp(self):
"""The method will try to setup the test environment.
Specifically, it will clean up the report directory for new reports and
call the framework's Prepare method to let it prepare itself.
Returns:
None
"""
# This is the list of log or report types that will be generated at
# report_dir. It will be used to do archiving and also clean up previous
# leftover runs.
report_dir = self.prop['report_dir']
host_name = self.prop['host_name']
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
self.report_file_list = [os.path.join(report_dir, host_name + '*' + x)
for x in self.report_types]
olist = [os.path.join(report_dir, '*' + x) for x in self.output_types]
self.report_file_list.extend(olist)
files_present = os.listdir(report_dir)
self.CleanFiles(self.report_file_list)
self.framework.Prepare()
@DEBUG
def TearDown(self):
"""It will give the test framework a chance to clean up after itself.
Basically it just calls the framework's Cleanup method.
"""
self.framework.Cleanup()
def _SetEnvironment(self):
"""Called by Run to set up environment variables.
there are some environment variables we need to pass on to the subshell
which will be used to run the actual test scripts.
1. the source_dir: this variable defines the root directory of the project
specific directory. testcase scripts should be under a subdir of this dir.
And all other project specific things should be put under this dir.
Also users can refer os.environ['source_dir'] to get the absolute path and
looking around the file system for other files.
2. PYTHONPATH: here we will add the use defined python libs to python path,
so user will not need to bother with python path management. The default
one is $source_dir, also user can add any other path by defining
'python_lib_dir' variable in the configure file.
So in the test script, pythonpath will be set automatically, user can drop
their lib files to $source_dir as the top level and follow the import
format of python import rule or any where they define in
python_lib_dir
3. PERL5LIB: Similar as PYTHONPATH with extra 'perl_lib_dir' for user to
define extra libary path.
Returns:
None
"""
environment_path_list = (
('PYTHONPATH', 'python_lib_dir'),
('PATH', 'shell_lib_dir'),
('PERL5LIB', 'perl_lib_dir'),
)
# Setting up environment first, these variables will be passed on to the
# subshell which is used to run the actual test scripts.
# This is default lib path we need to add on
lib_path = self.prop['source_dir']
os.environ['source_dir'] = lib_path
for environment_path, user_var in environment_path_list:
self._AttachEnvironmentPath(environment_path, [lib_path])
if user_var in self.prop:
self._AttachEnvironmentPath(
environment_path,
self.prop[user_var].split(os.pathsep))
def _AttachEnvironmentPath(self, env_var, new_paths):
"""Attach new_path to the environment path variable.
This method is used to update path environment variables.
It will append a list of paths to the given environment variable.
Args:
env_var: <string> the target environment variable to be appended to.
new_paths: <list> a list of pathes to append the env_var.
Returns:
None.
"""
for new_path in new_paths:
try:
path = os.environ[env_var]
# Attach the new_path if it is not already in it.
if not new_path in path.split(os.pathsep):
path = os.pathsep.join([path, new_path])
os.environ[env_var] = path
except KeyError:
# There is no such variable in environment.
os.environ[env_var] = new_path
logger.debug('%s is updated to: %s' %(env_var, new_path))
@DEBUG
def Run(self, email_flag=True):
"""The public method to start the test.
The actual runner to invoke the test framework to run a set of tests,
then it will try to collect all types of files in report directory and
archive them. Remember the archive is not actual log file. Just an archive
with all the junks generated during the test run. You might need them
later, who knows.
Args:
email_flag: a boolean value to state if an email is expected or not. But
it is up to the framwork implementer to decide if email will come or
not.
Returns:
The count of non-successful test cases.
"""
self._SetEnvironment()
failure_count = self.framework.Run(self.run_suite, email_flag)
# After the test run, try to go to report directory collect all log files
# and archive them.
archive_name = os.path.join(self.prop['report_dir'], '%s_%s.tar.gz' %
(self.prop['host_name'], self.prop['time']))
keep_log = True
host_name = self.prop['host_name']
self.TarReports(archive_name, self.report_file_list, keep_log)
return failure_count
@DEBUG
def TarReports(self, archive_file, report_file_list, keep=True):
"""Generate an tar.gz archive file.
This method will generate a tar.gz file using the given the list of file
patterns to collect. The purpose of this method is to archive all the files
logs generated during the test run. It is not a log nor a report, just an
archive.
Args:
archive_file: the file name to generate, it should end with tar.gz, since
the generate file will be tar.gz format.
report_file_list: the list of files/pattern to collect in the archive
keep: a boolean value to identify if the original log file should be
removed or not after the archive.
Returns:
None. The archive file should be created as specified.
"""
tar = tarfile.open(archive_file, 'w:gz')
for name in report_file_list:
for onefile in glob.glob(name):
os.chdir(os.path.dirname(onefile))
tar.add(os.path.basename(onefile))
if not keep:
os.remove(onefile)
tar.close()
@DEBUG
def CleanFiles(self, report_file_list):
"""Remove a list of files/patterns.
It is used to clean up leftover reports/log files from previous run.
Args:
report_file_list: the list of file names, patterns to clean
Returns:
None
"""
for name in report_file_list:
for onefile in glob.glob(name):
os.remove(onefile)
class PRConfigParser(object):
"""A util class to parse pyrering specific runner config.
It will read in a section of comments in a given text file(normally it should
be the actual test script or test suite file and the config info is embedded
as a comment section. the section should look like this:
# PR_START
# key1 = value1
# key2 = value2
# PR_END
Currently supported keys are: TIMEOUT, ROOT_ACCESS, EXPECTED_RETURN,
CONCURRENT, NFS, ERROR. These configs describe how this test script
should be run with.
This info will be read in and packed in a dictionary and send to the actual
runner to execute the script, which has the final decision how the test script
should be run.
"""
def __init__(self,
filesystem=filesystemhandlerextend.FileSystemHandlerExtend()):
"""Provide a empty dictionary and a list of supported keys.
Args:
filesystem: a FileSystemHandlerExtend object as a layer between this code
and the actual filesystem. So I can swap this layer with a mock
filesystem for testing.
"""
self.filesystem = filesystem
# This is the list of currently supported config keys. Any other keys not
# defined in this list will be take as strings only.
self.key_list = ['TEST_SCRIPT',
'TIMEOUT',
'ROOT_ACCESS',
'EXPECTED_RETURN',
'CONCURRENT',
'NFS',
'VERSION',
'SIZE',
'COMMENTS',
'FLAGS',
'ERROR',
]
@DEBUG
def Default(self):
"""Return a config dictionary with default value populated.
Returns:
A dictionary of configuration with default value populated.
The default keys are:
'TEST_SCRIPT'
'TIMEOUT'
'ROOT_ACCESS'
'EXPECTED_RETURN'
'CONCURRENT'
'NFS'
'VERSION'
'SIZE'
'COMMENTS'
'FLAGS'
'ERROR'
"""
test_case_config = {}
test_case_config['TEST_SCRIPT'] = ''
test_case_config['TIMEOUT'] = 600
test_case_config['ROOT_ACCESS'] = False
test_case_config['EXPECTED_RETURN'] = 0
test_case_config['CONCURRENT'] = True
test_case_config['NFS'] = False
test_case_config['VERSION'] = '1.0'
test_case_config['SIZE'] = 'SMALL'
test_case_config['COMMENTS'] = None
test_case_config['FLAGS'] = None
test_case_config['ERROR'] = 255
return test_case_config
def _ParseLine(self, line):
"""Assistant method to parse a line of config.
The line has to start with '#' not '##' and has a key value pair seperated
by a '=', otherwise the line will be ignored.
Args:
line: a line of config file.
Returns:
A dictionary has one pair of key, value corresponding to the line.
Raises:
ValueError: if ROOT_ACCESS, CONCURRENT, NFS are given non-valid boolean
values or TIMEOUT, EXPECTED_RETURN, ERROR are given none integers.
"""
temp_dict = {}
if (not line.startswith('#') or
line.startswith('##') or
len(line.split('=')) != 2):
return temp_dict
key, value = line[1:].split('=', 1)
key = key.strip().upper()
value = value.strip().strip('"').strip("'")
if key in ['TIMEOUT', 'EXPECTED_RETURN', 'ERROR']:
try:
temp_dict[key] = int(value)
except:
raise ValueError('Invalid integer %s for key:%s' % (value, key))
elif key in ['ROOT_ACCESS', 'CONCURRENT', 'NFS']:
if value.lower().startswith('false'):
temp_dict[key] = False
elif value.lower().startswith('true'):
temp_dict[key] = True
else:
raise ValueError('Invalid boolean value %s for key:%s' % (value, key))
else:
# Otherwise, just store it as string value
temp_dict[key] = value
return temp_dict
@DEBUG
def ParseList(self, lines, populate_default=True):
"""Parser a list of lines and return a dictionary of configs.
The list of lines can come from readlines() of a file or a user defined
list of info.
Args:
lines: a list of lines
populate_default: boolean value if missing value should be populated
by default values.
Returns:
the dictionary of configuration
Raises:
ValueError: if missing end of config sign: END_SIGN.
"""
# Reset self.test_case_config each time start a new ParseList
test_case_config = {}
for i in range(len(lines)):
one_line = lines[i].strip()
if one_line.startswith(START_SIGN):
# Start of PyreRing config section
for j in range(i+1, len(lines)):
config_line = lines[j].strip()
if config_line.startswith(END_SIGN):
# End of the PyreRing config section
break
elif config_line.startswith('#'):
# This is a config line, parse it
test_case_config.update(self._ParseLine(config_line))
else:
# If no break ever got, it is ill formatted
raise ValueError('Missing end of %s line' % END_SIGN)
break
if populate_default:
default = self.Default()
default.update(test_case_config)
logger.debug('exit PRConfigParser.ParseList with default')
return default
else:
logger.debug('exit PRConfigParser.ParseList with user settings')
return test_case_config
@DEBUG
def ParseFile(self, anyfile, populate_default=True):
"""Given a file parse out the config section.
Args:
anyfile: a file path
populate_default: a boolean value if default value should be given if not
defined.
Returns:
a dictionary of the configuration
Raises:
ValueError: if the file has invalid configuration info
"""
# If this file is a binary file. Don't scan it, populate with default
# values and return the config.
if os.path.splitext(anyfile)[1] in BINARY_SUFFIXES:
configs = self.Default()
configs['TEST_SCRIPT'] = anyfile
logger.debug('exit PRConfigParser.ParseFile with binary default')
return configs
config_file = self.filesystem.FileOpenForRead(anyfile)
try:
try:
configs = self.ParseList(config_file.readlines(), populate_default)
except ValueError:
err_msg = ('Exception[%s] on file: [%s].\n\tSTACK TRACE:\n%s' %
(sys.exc_type, anyfile, traceback.format_exc()))
raise ValueError(err_msg)
finally:
self.filesystem.FileClose(config_file)
# This always overwrites whatever defined in configuration.
configs['TEST_SCRIPT'] = anyfile
return configs
@DEBUG
def ParseFiles(self, files, populate_default=True):
"""Parse a list of files.
Args:
files: a list of files
populate_default: a boolean value to populate none defined keys with
default value or not
Returns:
a list of dictionaries of configurations.
"""
config_list = []
for one_file in files:
config_list.append(self.ParseFile(one_file, populate_default))
return config_list
@DEBUG
def ParseSuite(self, suite_file, files, populate_default=True):
"""Parse a list of files for a suite.
The suite file configuration will overwrite the individual files
configuration if defined.
Args:
suite_file: a pyrering suite defination file path
files: a list of files
populate_default: boolean value should provide default value if some keys
are not defined.
Returns:
a list of config dictionary with the suite config overwrite script
config
"""
config_list = []
suite_config = self.ParseFile(suite_file, False)
# Remove the TEST_SCRIPT key, so suite config will not wipe out
# TEST_SCRIPT key value.
suite_config.pop('TEST_SCRIPT')
config_list = self.ParseFiles(files, populate_default)
for one_config in config_list:
one_config.update(suite_config)
return config_list
|
|
from __future__ import absolute_import, unicode_literals
import collections
import logging
import pkg_resources
from mopidy import config as config_lib, exceptions
logger = logging.getLogger(__name__)
_extension_data_fields = ['extension', 'entry_point', 'config_schema',
'config_defaults', 'command']
ExtensionData = collections.namedtuple('ExtensionData', _extension_data_fields)
class Extension(object):
"""Base class for Mopidy extensions"""
dist_name = None
"""The extension's distribution name, as registered on PyPI
Example: ``Mopidy-Soundspot``
"""
ext_name = None
"""The extension's short name, as used in setup.py and as config section
name
Example: ``soundspot``
"""
version = None
"""The extension's version
Should match the :attr:`__version__` attribute on the extension's main
Python module and the version registered on PyPI.
"""
def get_default_config(self):
"""The extension's default config as a bytestring
:returns: bytes or unicode
"""
raise NotImplementedError(
'Add at least a config section with "enabled = true"')
def get_config_schema(self):
"""The extension's config validation schema
:returns: :class:`~mopidy.config.schema.ExtensionConfigSchema`
"""
schema = config_lib.ConfigSchema(self.ext_name)
schema['enabled'] = config_lib.Boolean()
return schema
def get_command(self):
"""Command to expose to command line users running ``mopidy``.
:returns:
Instance of a :class:`~mopidy.commands.Command` class.
"""
pass
def validate_environment(self):
"""Checks if the extension can run in the current environment.
Dependencies described by :file:`setup.py` are checked by Mopidy, so
you should not check their presence here.
If a problem is found, raise :exc:`~mopidy.exceptions.ExtensionError`
with a message explaining the issue.
:raises: :exc:`~mopidy.exceptions.ExtensionError`
:returns: :class:`None`
"""
pass
def setup(self, registry):
"""
Register the extension's components in the extension :class:`Registry`.
For example, to register a backend::
def setup(self, registry):
from .backend import SoundspotBackend
registry.add('backend', SoundspotBackend)
See :class:`Registry` for a list of registry keys with a special
meaning. Mopidy will instantiate and start any classes registered under
the ``frontend`` and ``backend`` registry keys.
This method can also be used for other setup tasks not involving the
extension registry.
:param registry: the extension registry
:type registry: :class:`Registry`
"""
raise NotImplementedError
class Registry(collections.Mapping):
"""Registry of components provided by Mopidy extensions.
Passed to the :meth:`~Extension.setup` method of all extensions. The
registry can be used like a dict of string keys and lists.
Some keys have a special meaning, including, but not limited to:
- ``backend`` is used for Mopidy backend classes.
- ``frontend`` is used for Mopidy frontend classes.
- ``local:library`` is used for Mopidy-Local libraries.
Extensions can use the registry for allow other to extend the extension
itself. For example the ``Mopidy-Local`` use the ``local:library`` key to
allow other extensions to register library providers for ``Mopidy-Local``
to use. Extensions should namespace custom keys with the extension's
:attr:`~Extension.ext_name`, e.g. ``local:foo`` or ``http:bar``.
"""
def __init__(self):
self._registry = {}
def add(self, name, cls):
"""Add a component to the registry.
Multiple classes can be registered to the same name.
"""
self._registry.setdefault(name, []).append(cls)
def __getitem__(self, name):
return self._registry.setdefault(name, [])
def __iter__(self):
return iter(self._registry)
def __len__(self):
return len(self._registry)
def load_extensions():
"""Find all installed extensions.
:returns: list of installed extensions
"""
installed_extensions = []
for entry_point in pkg_resources.iter_entry_points('mopidy.ext'):
logger.debug('Loading entry point: %s', entry_point)
extension_class = entry_point.load(require=False)
try:
if not issubclass(extension_class, Extension):
raise TypeError # issubclass raises TypeError on non-class
except TypeError:
logger.error('Entry point %s did not contain a valid extension'
'class: %r', entry_point.name, extension_class)
continue
try:
extension = extension_class()
config_schema = extension.get_config_schema()
default_config = extension.get_default_config()
command = extension.get_command()
except Exception:
logger.exception('Setup of extension from entry point %s failed, '
'ignoring extension.', entry_point.name)
continue
installed_extensions.append(ExtensionData(
extension, entry_point, config_schema, default_config, command))
logger.debug(
'Loaded extension: %s %s', extension.dist_name, extension.version)
names = (ed.extension.ext_name for ed in installed_extensions)
logger.debug('Discovered extensions: %s', ', '.join(names))
return installed_extensions
def validate_extension_data(data):
"""Verify extension's dependencies and environment.
:param extensions: an extension to check
:returns: if extension should be run
"""
logger.debug('Validating extension: %s', data.extension.ext_name)
if data.extension.ext_name != data.entry_point.name:
logger.warning(
'Disabled extension %(ep)s: entry point name (%(ep)s) '
'does not match extension name (%(ext)s)',
{'ep': data.entry_point.name, 'ext': data.extension.ext_name})
return False
try:
data.entry_point.require()
except pkg_resources.DistributionNotFound as ex:
logger.info(
'Disabled extension %s: Dependency %s not found',
data.extension.ext_name, ex)
return False
except pkg_resources.VersionConflict as ex:
if len(ex.args) == 2:
found, required = ex.args
logger.info(
'Disabled extension %s: %s required, but found %s at %s',
data.extension.ext_name, required, found, found.location)
else:
logger.info(
'Disabled extension %s: %s', data.extension.ext_name, ex)
return False
try:
data.extension.validate_environment()
except exceptions.ExtensionError as ex:
logger.info(
'Disabled extension %s: %s', data.extension.ext_name, ex.message)
return False
except Exception:
logger.exception('Validating extension %s failed with an exception.',
data.extension.ext_name)
return False
if not data.config_schema:
logger.error('Extension %s does not have a config schema, disabling.',
data.extension.ext_name)
return False
elif not isinstance(data.config_schema.get('enabled'), config_lib.Boolean):
logger.error('Extension %s does not have the required "enabled" config'
' option, disabling.', data.extension.ext_name)
return False
for key, value in data.config_schema.items():
if not isinstance(value, config_lib.ConfigValue):
logger.error('Extension %s config schema contains an invalid value'
' for the option "%s", disabling.',
data.extension.ext_name, key)
return False
if not data.config_defaults:
logger.error('Extension %s does not have a default config, disabling.',
data.extension.ext_name)
return False
return True
|
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 User action implementations"""
import logging
import six
import sys
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
class CreateUser(show.ShowOne):
"""Create new user"""
log = logging.getLogger(__name__ + '.CreateUser')
def get_parser(self, prog_name):
parser = super(CreateUser, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<user-name>',
help='New user name',
)
parser.add_argument(
'--password',
metavar='<user-password>',
help='New user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<user-email>',
help='New user email address',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Set default project (name or ID)',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='New default domain name or ID',
)
parser.add_argument(
'--description',
metavar='<description>',
help='Description for new user',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.project:
project_id = utils.find_resource(
identity_client.projects,
parsed_args.project,
).id
else:
project_id = None
if parsed_args.domain:
domain_id = utils.find_resource(
identity_client.domains, parsed_args.domain).id
else:
domain_id = None
enabled = True
if parsed_args.disable:
enabled = False
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
user = identity_client.users.create(
parsed_args.name,
domain=domain_id,
default_project=project_id,
password=parsed_args.password,
email=parsed_args.email,
description=parsed_args.description,
enabled=enabled
)
info = {}
info.update(user._info)
return zip(*sorted(six.iteritems(info)))
class DeleteUser(command.Command):
"""Delete user"""
log = logging.getLogger(__name__ + '.DeleteUser')
def get_parser(self, prog_name):
parser = super(DeleteUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to delete (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
identity_client.users.delete(user.id)
return
class ListUser(lister.Lister):
"""List users and optionally roles assigned to users"""
log = logging.getLogger(__name__ + '.ListUser')
def get_parser(self, prog_name):
parser = super(ListUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
nargs='?',
help='Name or ID of user to list [required with --role]',
)
parser.add_argument(
'--role',
action='store_true',
default=False,
help='List the roles assigned to <user>',
)
domain_or_project = parser.add_mutually_exclusive_group()
domain_or_project.add_argument(
'--domain',
metavar='<domain>',
help='Filter list by <domain> [Only valid with --role]',
)
domain_or_project.add_argument(
'--project',
metavar='<project>',
help='Filter list by <project> [Only valid with --role]',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.role:
# List roles belonging to user
# User is required here, bail if it is not supplied
if not parsed_args.user:
sys.stderr.write('Error: User must be specified')
return ([], [])
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
# List a user's roles
if not parsed_args.domain and not parsed_args.project:
columns = ('ID', 'Name')
data = identity_client.roles.list(
user=user,
domain='default',
)
# List a user's roles on a domain
elif parsed_args.user and parsed_args.domain:
columns = ('ID', 'Name', 'Domain', 'User')
domain = utils.find_resource(
identity_client.domains,
parsed_args.domain,
)
data = identity_client.roles.list(
user=user,
domain=domain,
)
for user_role in data:
user_role.user = user.name
user_role.domain = domain.name
# List a user's roles on a project
elif parsed_args.user and parsed_args.project:
columns = ('ID', 'Name', 'Project', 'User')
project = utils.find_resource(
identity_client.projects,
parsed_args.project,
)
data = identity_client.roles.list(
user=user,
project=project,
)
for user_role in data:
user_role.user = user.name
user_role.project = project.name
else:
# TODO(dtroyer): raise exception here, this really is an error
sys.stderr.write("Error: Must specify --domain or --project "
"with --role\n")
return ([], [])
else:
# List users
if parsed_args.long:
columns = ('ID', 'Name', 'Project Id', 'Domain Id',
'Description', 'Email', 'Enabled')
else:
columns = ('ID', 'Name')
data = self.app.client_manager.identity.users.list()
return (columns,
(utils.get_item_properties(
s, columns,
formatters={},
) for s in data))
class SetUser(command.Command):
"""Set user properties"""
log = logging.getLogger(__name__ + '.SetUser')
def get_parser(self, prog_name):
parser = super(SetUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to change (name or ID)',
)
parser.add_argument(
'--name',
metavar='<new-user-name>',
help='New user name',
)
parser.add_argument(
'--password',
metavar='<user-password>',
help='New user password',
)
parser.add_argument(
'--password-prompt',
dest="password_prompt",
action="store_true",
help='Prompt interactively for password',
)
parser.add_argument(
'--email',
metavar='<user-email>',
help='New user email address',
)
parser.add_argument(
'--domain',
metavar='<domain>',
help='New domain name or ID',
)
parser.add_argument(
'--project',
metavar='<project>',
help='New project name or ID',
)
parser.add_argument(
'--description',
metavar='<description>',
help='New description',
)
enable_group = parser.add_mutually_exclusive_group()
enable_group.add_argument(
'--enable',
action='store_true',
help='Enable user (default)',
)
enable_group.add_argument(
'--disable',
action='store_true',
help='Disable user',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
if parsed_args.password_prompt:
parsed_args.password = utils.get_password(self.app.stdin)
if (not parsed_args.name
and not parsed_args.name
and not parsed_args.password
and not parsed_args.email
and not parsed_args.domain
and not parsed_args.project
and not parsed_args.description
and not parsed_args.enable
and not parsed_args.disable):
return
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
kwargs = {}
if parsed_args.name:
kwargs['name'] = parsed_args.name
if parsed_args.email:
kwargs['email'] = parsed_args.email
if parsed_args.password:
kwargs['password'] = parsed_args.password
if parsed_args.description:
kwargs['description'] = parsed_args.description
if parsed_args.project:
project_id = utils.find_resource(
identity_client.projects, parsed_args.project).id
kwargs['projectId'] = project_id
if parsed_args.domain:
domain_id = utils.find_resource(
identity_client.domains, parsed_args.domain).id
kwargs['domainId'] = domain_id
kwargs['enabled'] = user.enabled
if parsed_args.enable:
kwargs['enabled'] = True
if parsed_args.disable:
kwargs['enabled'] = False
identity_client.users.update(user.id, **kwargs)
return
class ShowUser(show.ShowOne):
"""Show user details"""
log = logging.getLogger(__name__ + '.ShowUser')
def get_parser(self, prog_name):
parser = super(ShowUser, self).get_parser(prog_name)
parser.add_argument(
'user',
metavar='<user>',
help='User to display (name or ID)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
user = utils.find_resource(
identity_client.users,
parsed_args.user,
)
info = {}
info.update(user._info)
return zip(*sorted(six.iteritems(info)))
|
|
# DatabaseStorage for django.
# 2011 (c) Mike Mueller <[email protected]>
# 2009 (c) GameKeeper Gambling Ltd, Ivanov E.
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.core.files.storage import Storage
from django.core.files import File
from django.db import connection, transaction
import base64
import StringIO
import urlparse
class DatabaseStorage(Storage):
"""
Implements the Django Storage API for storing files in the database,
rather than on the filesystem. Uses the Django database layer, so any
database supported by Django should theoretically work.
Usage: Create an instance of DatabaseStorage and pass it as the storage
parameter of your FileField, ImageField, etc.::
image = models.ImageField(
null=True,
blank=True,
upload_to='attachments/',
storage=DatabaseStorage(options=DBS_OPTIONS),
)
Files submitted using this field will be saved into the default Django
database, using the options specified in the constructor. The upload_to
path will be prepended to uploads, so that the file 'bar.png' would be
retrieved later as 'attachments/bar.png' in this example.
Uses the default get_available_name strategy, so duplicate filenames will
be silently renamed to foo_1.jpg, foo_2.jpg, etc.
You are responsible for creating a table in the database with the
following columns:
filename VARCHAR(256) NOT NULL PRIMARY KEY,
data TEXT NOT NULL,
size INTEGER NOT NULL,
The best place to do this is probably in your_app/sql/foo.sql, which will
run during syncdb. The length 256 is up to you, you can also pass a
max_length parameter to FileFields to be consistent with your column here.
On SQL Server, you should probably use nvarchar to support unicode.
Remember, this is not designed for huge objects. It is probably best used
on files under 1MB in size. All files are base64-encoded before being
stored, so they will use 1.33x the storage of the original file.
Here's an example view to serve files stored in the database.
def image_view(request, filename):
# Read file from database
storage = DatabaseStorage(options=DBS_OPTIONS)
image_file = storage.open(filename, 'rb')
if not image_file:
raise Http404
file_content = image_file.read()
# Prepare response
content_type, content_encoding = mimetypes.guess_type(filename)
response = HttpResponse(content=file_content, mimetype=content_type)
response['Content-Disposition'] = 'inline; filename=%s' % filename
if content_encoding:
response['Content-Encoding'] = content_encoding
return response
"""
def __init__(self, options):
"""
Create a DatabaseStorage object with the specified options dictionary.
Required options:
'table': The name of the database table for file storage.
'base_url': The base URL where database files should be found.
This is used to construct URLs for FileFields and
you will need to define a view that handles requests
at this location (example given above).
Allowed options:
'name_column': Name of the filename column (default: 'filename')
'data_column': Name of the data column (default: 'data')
'size_column': Name of the size column (default: 'size')
'data_column', 'size_column', 'base_url' keys.
"""
required_keys = [
'table',
'base_url',
]
allowed_keys = [
'name_column',
'data_column',
'size_column',
]
for key in required_keys:
if key not in options:
raise ImproperlyConfigured(
'DatabaseStorage missing required option: ' + key)
for key in options:
if key not in required_keys and key not in allowed_keys:
raise ImproperlyConfigured(
'Unrecognized DatabaseStorage option: ' + key)
# Note: These fields are used as keys in string substitutions
# throughout this class. If you change a name here, be sure to update
# all the affected format strings.
self.table = options['table']
self.base_url = options['base_url']
self.name_column = options.get('name_column', 'filename')
self.data_column = options.get('data_column', 'data')
self.size_column = options.get('size_column', 'size')
def _open(self, name, mode='rb'):
"""
Open a file stored in the database. name should be the full name of
the file, including the upload_to path that may have been used.
Path separator should always be '/'. mode should always be 'rb'.
Returns a Django File object if found, otherwise None.
"""
assert mode == 'rb', "DatabaseStorage open mode must be 'rb'."
query = 'SELECT %(data_column)s FROM %(table)s ' + \
'WHERE %(name_column)s = %%s'
query %= self.__dict__
cursor = connection.cursor()
cursor.execute(query, [name])
row = cursor.fetchone()
if row is None:
return None
inMemFile = StringIO.StringIO(base64.b64decode(row[0]))
inMemFile.name = name
inMemFile.mode = mode
return File(inMemFile)
def _save(self, name, content):
"""
Save the given content as file with the specified name. Backslashes
in the name will be converted to forward '/'.
"""
name = name.replace('\\', '/')
binary = content.read()
size = len(binary)
encoded = base64.b64encode(binary)
cursor = connection.cursor()
if self.exists(name):
query = 'UPDATE %(table)s SET %(data_column)s = %%s, ' + \
'%(size_column)s = %%s WHERE %(name_column)s = %%s'
query %= self.__dict__
cursor.execute(query, [encoded, size, name])
else:
query = 'INSERT INTO %(table)s (%(name_column)s, ' + \
'%(data_column)s, %(size_column)s) VALUES (%%s, %%s, %%s)'
query %= self.__dict__
cursor.execute(query, (name, encoded, size))
transaction.commit_unless_managed(using='default')
return name
def exists(self, name):
query = 'SELECT COUNT(*) FROM %(table)s WHERE %(name_column)s = %%s'
query %= self.__dict__
cursor = connection.cursor()
cursor.execute(query, [name])
row = cursor.fetchone()
return int(row[0]) > 0
def delete(self, name):
if self.exists(name):
query = 'DELETE FROM %(table)s WHERE %(name_column)s = %%s'
query %= self.__dict__
connection.cursor().execute(query, [name])
transaction.commit_unless_managed(using='default')
def path(self, name):
raise NotImplementedError('DatabaseStorage does not support path().')
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
result = urlparse.urljoin(self.base_url, name).replace('\\', '/')
return result
def size(self, name):
"Get the size of the given filename or raise ObjectDoesNotExist."
query = 'SELECT %(size_column)s FROM %(table)s ' + \
'WHERE %(name_column)s = %%s'
query %= self.__dict__
cursor = connection.cursor()
cursor.execute(query, [name])
row = cursor.fetchone()
if not row:
raise ObjectDoesNotExist(
"DatabaseStorage file not found: %s" % name)
return int(row[0])
|
|
import collections
import itertools
import json
import click
import numpy
from .lattice import Atom
from .lattice import Vertex
from .lattice import Lattice
from .lattice import NanoParticle
from .lattice import DepletedLattice
from .material import Material
Geometry = collections.namedtuple(
'Geometry', ['sites', 'links', 'axes', 'patch']
)
def compute_geometry(lattice, material):
locator = material.locator()
lsites = []
linteractions = []
axes = []
# Patch the ids according to the removed material
new_ids = {}
for idx, site in enumerate(lattice.sites()):
lsites.append(site)
new_ids[lattice.index(site)] = idx
axis = numpy.array([0.0, 0.0, 0.0])
for interaction in lattice.interactions_for(site):
linteractions.append(interaction)
axis += locator.locate(interaction.target) - locator.locate(site)
axes.append(axis)
return Geometry(lsites, linteractions, axes, new_ids)
def echo_header(material, sites, links):
atom_kinds = material.atom_kinds()
click.echo('{}\t{}\t{}\t{}'.format(
len(sites), len(links), len(atom_kinds), 0))
click.echo('\n'.join(atom_kinds))
def echo_sites(material, lattice, sites, axes, patch=None):
patch = patch or {}
locator = material.locator()
anisotropy = material.anisotropy
for site, axis in zip(sites, axes):
px, py, pz = tuple(locator.locate(site))
index = lattice.index(site)
val, axis = anisotropy.from_axis(axis)
ax, ay, az = axis
click.echo(
'{uuid}\t{px}\t{py}\t{pz}\t{spin}\t'
'{ax}\t{ay}\t{az}\t{k}\t{kind}'.format(
uuid=patch.get(index, index),
px=px, py=py, pz=pz,
ax=ax, ay=ay, az=az, k=val,
spin=material.spin(site.atom.kind),
kind=site.atom.kind
))
def echo_interactions(material, lattice, interactions, patch=None):
patch = patch or {}
for source, target, vertex in interactions:
sindex = lattice.index(source)
tindex = lattice.index(target)
click.echo('{source}\t{target}\t{exchange}'.format(
source=patch.get(sindex, sindex),
target=patch.get(tindex, tindex),
exchange=material.exchange(vertex.kind)
))
@click.group()
def cli():
'''
Some generators for grid graph based nanostructures, this program would
generate samples from descriptor files using linear time algorithms which
are much faster than those based on checking distances between points
'''
pass
@cli.command()
@click.argument('descriptor', type=click.File('r'))
@click.option('--shape', default=(1, 1, 1),
help='shape of the lattice')
@click.option('--pbc', default=(True, True, True),
help='use periodic boundary conditions')
def bulk(descriptor, shape, pbc):
'''
Generates a bulk out of the given descriptor, the shape of the bulk in
unit cells as well as the periodic boundari conditions can be passed in
as options.
The descriptor file format is a simple json format, you should specify a
list of atoms under an "atoms" key as well as a list of interactions under
the "interactions" key, "spins" and "exchanges" are also required under
the "material" key. Furthermore, under the "material" key, you can specify
an "anisotropy" key with anisotropy information as well as an "unitcell" key
with geometric information.
See the example files under docs/ for more information.
'''
data = json.load(descriptor)
atoms = [Atom(**kw) for kw in data['atoms']]
vertices = [Vertex(**kw) for kw in data['interactions']]
material = Material(data['material'])
latt = Lattice(atoms, shape, pbc, vertices)
lsites, linteractions, axes, _ = compute_geometry(latt, material)
echo_header(material, lsites, linteractions)
echo_sites(material, latt, lsites, axes)
echo_interactions(material, latt, linteractions)
@cli.command()
@click.argument('descriptor', type=click.File('r'))
@click.option('--diameter', default=5,
help='Diameter of the nanoparticle')
def nanoparticle(descriptor, diameter):
'''
Generates a nanoparticle out of the given descriptor, the diameter of the
nanoparticle in unit cells can be passed in as an option.
The descriptor file format is a simple json format, you should specify a
list of atoms under an "atoms" key as well as a list of interactions under
the "interactions" key, "spins" and "exchanges" are also required under the
"material" key. Furthermore, under the "material" key, you can specify an
"anisotropy" key with anisotropy information as well as an "unitcell" key
with geometric information.
See the example files under docs/ for more information.
'''
data = json.load(descriptor)
atoms = [Atom(**kw) for kw in data['atoms']]
vertices = [Vertex(**kw) for kw in data['interactions']]
material = Material(data['material'])
shape = (diameter, ) * 3
pbc = (False, ) * 3
scale = numpy.amax(material.parameters)
locator = material.locator()
latt = NanoParticle(locator, diameter*scale/2, atoms, shape, pbc, vertices)
lsites, linteractions, axes, patch = compute_geometry(latt, material)
echo_header(material, lsites, linteractions)
echo_sites(material, latt, lsites, axes, patch=patch)
echo_interactions(material, latt, linteractions, patch=patch)
@cli.command()
@click.argument('descriptor', type=click.File('r'))
@click.argument('probability', type=float)
@click.option('--shape', default=(1, 1, 1),
help='shape of the lattice')
@click.option('--pbc', default=(True, True, True),
help='use periodic boundary conditions')
def depleted(descriptor, probability, shape, pbc):
'''
Generates a lattice with some depletion probability.
The descriptor file format is a simple json format, you should specify a
list of atoms under an "atoms" key as well as a list of interactions under
the "interactions" key, "spins" and "exchanges" are also required under
the "material" key. Furthermore, under the "material" key, you can specify
an "anisotropy" key with anisotropy information as well as an "unitcell" key
with geometric information.
See the example files under docs/ for more information.
'''
data = json.load(descriptor)
atoms = [Atom(**kw) for kw in data['atoms']]
vertices = [Vertex(**kw) for kw in data['interactions']]
material = Material(data['material'])
latt = DepletedLattice(probability, atoms, shape, pbc, vertices)
lsites, linteractions, axes, patch = compute_geometry(latt, material)
echo_header(material, lsites, linteractions)
echo_sites(material, latt, lsites, axes, patch=patch)
echo_interactions(material, latt, linteractions, patch=patch)
@cli.command()
@click.argument('sites', type=click.File('r'))
@click.argument('descriptor', type=click.File('w'), default='-')
@click.option('--lattice-params', default=(1.0, 1.0, 1.0),
help='Shape of the unitcell (default: a=b=c=1)')
@click.option('--cut', default=1.0,
help='Neighbor cutoff radius (default: 1.0)')
def describe(sites, descriptor, lattice_params, cut):
'''
Generates a descriptor file from a site list file, the site list file format
should be a plain text file with the following format:
<x> <y> <z> <kind>
the kind parameter is taked in as a string, and the identifiers of the sites
are generated according to the order in which this file is presented,
furthermore, you should include the lattice parameters in x y and z for
the unit cell.
If DESCRIPTOR is specified and writable, the json representation of the
descriptor file will be written there, otherwise the standard output will
be used.
'''
points = numpy.loadtxt(sites, usecols=(0, 1, 2))
# Rewind the file handle
sites.seek(0, 0)
labels = numpy.loadtxt(sites, usecols=(3, ), dtype=str)
labels = [l[2:-1] for l in labels]
images = map(numpy.array, itertools.product((-1, 0, 1), repeat=3))
spins = {label: None for label in labels}
exchanges = {
label + other: None
for label, other in itertools.product(spins.keys(), repeat=2)}
expanded = []
for image in images:
imaged = points + lattice_params * image
for site, kind, uuid in zip(imaged, labels, itertools.count(0)):
expanded.append({
'site': site,
'kind': kind,
'image': image,
'id': uuid,
})
norm = numpy.linalg.norm
interactions = []
for uuid, site in enumerate(points):
for other in expanded:
dis = norm(site - other['site'])
is_real = (other['image'] == (0, 0, 0, )).all()
if dis < cut and (not is_real or uuid != other['id']):
interactions.append(collections.OrderedDict([
('source', uuid),
('target', other['id']),
('delta', [int(i) for i in other['image']]),
('kind', labels[uuid] + other['kind']),
]))
atoms = [{'coords': list(site), 'kind': kind, 'id': uuid}
for site, kind, uuid in zip(points, labels, itertools.count(0))]
data = collections.OrderedDict([
('material', {'spins': spins, 'exchanges': exchanges, }),
('atoms', atoms),
('interactions', interactions)
])
json.dump(data, descriptor, indent=2)
if __name__ == '__main__':
cli()
|
|
import json
import math
import pdb
import random
import numpy as np
import sys
import time
sys.path.extend(['.', '..'])
from itertools import chain, repeat, izip
from collections import defaultdict
from operator import mul, and_, or_
from scipy.optimize import fsolve
from matplotlib import pyplot as plt
from ..util import *
from ..bottomup.bounding_box import *
from ..bottomup.cluster import *
from frontier import *
from rangemerger import RangeMerger2
_logger = get_logger()
class StreamRangeMerger(RangeMerger2):
"""
Streaming version of the range merger. This lets Scorpion
overlap the partitioning algorithm with the merging algorithm.
"""
def __init__(self, *args, **kwargs):
super(StreamRangeMerger, self).__init__(*args, **kwargs)
self.valid_cluster_f = kwargs.get('valid_cluster_f', lambda c: True)
# idx -> clusters to expand -- different than clusters on frontier!!
self.tasks = defaultdict(list)
# all values for each dimension
self.all_cont_vals = defaultdict(set) # idx -> values
# attribute name -> { attr val -> [sum of influence value at c=0.1, count] }
self.all_disc_vals = defaultdict(lambda: defaultdict(lambda: [0,0]))
# name -> { val -> # times failed }
self.failed_disc_vals = defaultdict(lambda: defaultdict(lambda:0))
# stores the frontier after each iteration
self.added = set()
self.seen = set()
self.frontiers = []
self.adj_graph = None
self.K = 2
self.nblocks = 50
if len(self.learner.full_table) < 40000:
self.K = 2
self.nblocks = 60
if len(self.learner.full_table) < 10000:
self.nblocks = 100
self.get_frontier = CheapFrontier(self.c_range, K=self.K, nblocks=self.nblocks, learner=self.learner)
self.get_frontier.stats = self.stats
if self.DEBUG:
self.renderer = InfRenderer('/tmp/merger.pdf', c_range=self.c_range)
def close(self):
if self.DEBUG:
self.renderer.close()
def get_frontier_obj(self, version):
while version >= len(self.frontiers):
frontier = CheapFrontier(self.c_range, K=self.K, nblocks=self.blocks, learner=self.learner)
frontier.stats = self.stats
self.frontiers.append(frontier)
return self.frontiers[version]
@property
def frontier_iter(self):
return list(self.frontiers)
@instrument
def setup_stats(self, clusters):
all_inf = lambda l: all([abs(v) == float('inf') for v in l])
clusters = filter(lambda c: c.bound_hash not in self.added, clusters)
clusters = filter(lambda c: not all_inf(c.inf_state[0]), clusters)
clusters = filter(lambda c: len(c.inf_state[2]) == 0 or not all_inf(c.inf_state[2]), clusters)
self.added.update([c.bound_hash for c in clusters])
super(StreamRangeMerger, self).setup_stats(clusters)
start = time.time()
if not self.adj_graph:
self.adj_graph = self.make_adjacency([], True)
self.adj_graph.insert(clusters)
self.adj_graph.sync()
self.stats['adj_sync'][0] += time.time() - start
self.stats['adj_sync'][1] += 1
for c in clusters:
for idx in xrange(len(c.cols)):
self.all_cont_vals[idx].add(c.bbox[0][idx])
self.all_cont_vals[idx].add(c.bbox[1][idx])
for disc, vals in c.discretes.iteritems():
if len(vals) < 3:
vals = [(v,) for v in vals]
else:
vals = [tuple(vals)]
for v in vals:
self.all_disc_vals[disc][v][0] += c.inf_func(0.1)
self.all_disc_vals[disc][v][1] += 1
#self.all_disc_vals[disc].update(vals)
return clusters
@instrument
def best_so_far(self, prune=False):
clusters = set()
for frontier in self.frontier_iter:
clusters.update(frontier.frontier)
if prune:
for c in clusters:
c.c_range = list(self.c_range)
clusters = self.get_frontier(clusters)[0]
clusters = filter(lambda c: r_vol(c.c_range), clusters)
if self.DEBUG:
self.renderer.new_page()
self.renderer.set_title('best so far')
self.renderer.plot_active_inf_curves(clusters)
return clusters
@instrument
def best_at_c(self, c_val, K=6):
clusters = set()
for frontier in self.frontier_iter:
clusters.update(frontier.seen_clusters)
rm_dups(clusters, key=lambda c: str(c.rule))
clusters = sorted(clusters, key=lambda c: c.inf_func(c_val), reverse=True)[:K]
return clusters
@instrument
def add_clusters(self, clusters, idx=0):
"""
Return list of new clusters that are on the frontier
"""
if not clusters: return []
if self.DEBUG:
print "add_clusters"
self.print_clusters(clusters)
self.renderer.new_page()
self.renderer.set_title("add_clusters %d clusters" % len(clusters))
for f in self.frontier_iter:
self.renderer.plot_inf_curves(f.frontier, color='grey')
self.renderer.plot_inf_curves(clusters, color='green')
clusters = self.setup_stats(clusters)
base_frontier = self.get_frontier_obj(idx)
clusters, _ = base_frontier.update(clusters)
if self.DEBUG:
print "base_frontier"
self.print_clusters(clusters)
self.renderer.plot_active_inf_curves(clusters, color='red')
# clear out current tasks
self.tasks[idx] = filter(base_frontier.__contains__, self.tasks[idx])
self.tasks[idx].extend(clusters)
# remove non-frontier-based expansions from future expansion
for tidx in self.tasks.keys():
if tidx <= idx: continue
checker = lambda c: not any(map(base_frontier.__contains__, c.ancestors))
self.tasks[tidx] = filter(checker, self.tasks[tidx])
if clusters:
_logger.debug("merger:\tadded %d clusters\t%d tasks left", len(clusters), self.ntasks)
return clusters
@property
def ntasks(self):
if len(self.tasks) == 0: return 0
return sum(map(len, self.tasks.values()))
def has_next_task(self):
if not self.tasks: return False
return self.ntasks > 0
def next_tasks(self, n=1):
ret = []
for tkey in reversed(self.tasks.keys()):
tasks = self.tasks[tkey]
while len(ret) < n and tasks:
idx = random.randint(0, len(tasks)-1)
ret.append((idx, tasks.pop(idx)))
return ret
@instrument
def __call__(self, n=2):
"""
Return any successfully expanded clusters (improvements)
"""
nmerged = self.nmerged
start = time.time()
tasks = self.next_tasks(n)
improvements = set()
for idx, cluster in tasks:
cur_frontier = self.get_frontier_obj(idx)
next_frontier = self.get_frontier_obj(idx+1)
new_clusters = self.run_task(idx, cluster, cur_frontier, next_frontier)
debug = self.DEBUG
self.DEBUG = False
self.add_clusters(new_clusters, idx+1)
self.DEBUG = debug
improvements.update(new_clusters)
_logger.debug("merger\ttook %.1f sec\t%d improved\t%d tried\t%d tasks left",
time.time()-start, len(improvements), (self.nmerged-nmerged), self.ntasks)
return improvements
def run_task(self, idx, cluster, cur_frontier, next_frontier):
if not (idx == 0 or self.valid_cluster_f(cluster)):
_logger.debug("merger\tbelow thresh skipping\t %s" % cluster)
return []
if self.DEBUG:
self.renderer.new_page()
self.renderer.set_title("expand %s" % str(cluster.rule))
self.renderer.plot_inf_curves([cluster], color='grey')
self.rejected_disc_vals = defaultdict(list)
self.rejected_cont_vals = defaultdict(set)
expanded = self.greedy_expansion(cluster, self.seen, idx, cur_frontier)
expanded = [c for c in expanded if c.bound_hash != cluster.bound_hash]
if self.DEBUG:
self.renderer.plot_inf_curves(expanded, color='green')
cur_expanded, rms = cur_frontier.update(expanded)
next_expanded, rms2 = next_frontier.update(cur_expanded)
f = lambda c: c.bound_hash != cluster.bound_hash
improved_clusters = set(filter(f, next_expanded))
to_hash = lambda cs: set([c.bound_hash for c in cs])
exp_bounds = to_hash(expanded)
cur_bounds = to_hash(cur_expanded)
next_bounds = to_hash(next_expanded)
for c in chain(cur_expanded, rms):
_logger.debug("merger\texpanded\tcur_idx(%s)\tnext_idx(%s)\t%.3f-%.3f\t%s",
(c.bound_hash in exp_bounds),
(c.bound_hash in next_bounds),
c.c_range[0], c.c_range[1],
c.rule.simplify())
if self.DEBUG:
self.renderer.plot_active_inf_curves(cur_frontier.frontier, color='blue')
self.renderer.plot_active_inf_curves(next_frontier.frontier, color='red')
return improved_clusters
@instrument
def dims_to_expand(self, cluster, seen, version=None):
for idx in xrange(len(cluster.cols)):
vals = np.array(list(self.all_cont_vals[idx]))
smaller = vals[(vals < cluster.bbox[0][idx])]
bigger = vals[(vals > cluster.bbox[1][idx])]
yield idx, 'dec', smaller.tolist()
yield idx, 'inc', bigger.tolist()
for name, vals in cluster.discretes.iteritems():
ret = []
maxval = (len(vals) > 1) and max(vals) or None
vals2infs = self.all_disc_vals[name].items()
vals2infs.sort(key=lambda p: p[1][0] / float(p[1][1]+1.), reverse=True)
for disc_vals, score in vals2infs:
subset = set(disc_vals).difference(vals)
subset.difference_update([v for v in subset if self.failed_disc_vals[name][str(v)] > 1])
if maxval:
subset = set(filter(lambda v: v >= maxval, subset))
ret.append(subset)
ret = filter(bool, ret)
if ret:
yield name, 'disc', ret
return
p = np.arange(len(ret), 0, -1).astype(float)
p /= p.sum()
ret = np.random.choice(ret, min(len(ret), 10), p=p, replace=False)
yield name, 'disc', ret
@instrument
def check_direction(self, cluster, dim, direction, vals):
key = cluster.bound_hash
if direction == 'disc':
for subset in self.rejected_disc_vals[dim]:
if subset.issubset(vals):
return []
if direction == 'inc':
cont_vals = self.rejected_cont_vals[(dim, direction)]
if cont_vals:
vals = filter(lambda v: v > max(cont_vals), vals)
if direction == 'dec':
cont_vals = self.rejected_cont_vals[(dim, direction)]
if cont_vals:
vals = filter(lambda v: v < min(cont_vals), vals)
return vals
@instrument
def update_rejected_directions(self, cluster, dim, direction, val):
if direction == 'disc':
if not hasattr(val, '__iter__'):
val = [val]
for v in list(val):
self.rejected_disc_vals[dim].append(set([v]))
self.failed_disc_vals[dim][str(v)] += 1
if direction == 'inc':
self.rejected_cont_vals[(dim, direction)].add(round(val, 1))
if direction == 'dec':
self.rejected_cont_vals[(dim, direction)].add(round(val, 1))
@instrument
def greedy_expansion(self, cluster, seen, version=None, frontier=None):
_logger.debug("merger\tgreedy_expand\t%s", cluster.rule.simplify())
if frontier is None:
frontier = CheapFrontier(self.c_range, K=1, nblocks=15, learner=self.learner)
frontier.stats = self.stats
frontier.update([cluster])
cols = cluster.cols
for dim, direction, vals in self.dims_to_expand(cluster, seen, version=version):
if len(vals) == 0: continue
attrname = isinstance(dim, basestring) and dim or cols[dim]
vals = self.check_direction(cluster, dim, direction, vals)
realvals = self.pick_expansion_vals(cluster, dim, direction, vals)
nfails = 0
for v in realvals:
tmp = None
if direction == 'inc':
tmp = self.dim_merge(cluster, dim, None, v, seen)
elif direction == 'dec':
tmp = self.dim_merge(cluster, dim, v, None, seen)
else:
tmp = self.disc_merge(cluster, dim, v)
if not tmp:
_logger.debug("merger\tnoexpand\t%s\t%s\t%s options", attrname[:15], direction, len(vals))
continue
improvements = frontier.improvement(tmp)
if improvements.max() > 0:
print str(tmp)
print "\t", [round(v,2) for v in improvements]
frontier.update([tmp])
isbetter = tmp in frontier
_logger.debug("merger\tcand\t%s\t%s\t%s\t%s", attrname[:15], direction, isbetter, v)
seen.add(tmp.bound_hash)
if not isbetter:
self.update_rejected_directions(cluster, dim, direction, v)
if direction != 'disc':
nfails += 1
if nfails > 1:
break
if direction != 'disc':
cluster = tmp
return frontier.frontier
class PartitionedStreamRangeMerger(StreamRangeMerger):
"""
Partitions the merger based on user defined labels so that frontier curves from
one partition do not suppress curves in another partition
MR labels based on dimensionality
BDT labels as leaf/non-leaf
"""
def __init__(self, *args, **kwargs):
super(PartitionedStreamRangeMerger, self).__init__(*args, **kwargs)
self.frontiers = defaultdict(list)
self.tasks = defaultdict(list)
def get_frontier_obj(self, version, partitionkey):
frontiers = self.frontiers[partitionkey]
while version >= len(frontiers):
frontier = CheapFrontier(self.c_range, K=self.K, nblocks=self.nblocks, learner=self.learner)
frontier.stats = self.stats
frontiers.append(frontier)
return frontiers[version]
@property
def frontier_iter(self):
return chain(*self.frontiers.values())
@instrument
def add_clusters(self,
clusters, idx=0, partitionkey=None, skip_frontier=False):
"""
Return list of new clusters that are on the frontier
"""
if partitionkey is None:
raise RuntimeError('addclusters partitionkey cannot be none')
if not clusters: return []
print "add %d clusters" % len(clusters)
if self.DEBUG:
self.renderer.new_page()
self.renderer.set_title("add_clusters %d clusters" % len(clusters))
for f in self.frontier_iter:
self.renderer.plot_inf_curves(f.frontier, color='grey')
self.renderer.plot_inf_curves(clusters, color='green')
nclusters = len(clusters)
clusters = self.setup_stats(clusters)
frontier = self.get_frontier_obj(idx, partitionkey)
if not skip_frontier:
clusters, _ = frontier.update(clusters)
# XXX: new cluster should be better than _all_ frontiers
#for f in self.frontier_iter:
#clusters, _ = f.update(clusters)
if not clusters:
return clusters
if self.DEBUG and not skip_frontier:
print "base_frontier"
self.print_clusters(clusters)
if self.DEBUG:
self.renderer.plot_active_inf_curves(clusters, color='red')
# clear out current tasks
tkey = (partitionkey, idx)
self.tasks[tkey] = filter(frontier.__contains__, self.tasks[tkey])
self.tasks[tkey].extend(clusters)
# remove non-frontier-based expansions from future expansion
for (pkey, tidx) in self.tasks.keys():
if pkey != partitionkey: continue
if tidx <= idx: continue
checker = lambda c: not any(map(frontier.__contains__, c.ancestors))
self.tasks[tkey] = filter(checker, self.tasks[tkey])
_logger.debug("merger\t%s\tadd %d of %d clusters\t%d idx\t%d tasks left", partitionkey, len(clusters), nclusters, idx, self.ntasks)
return clusters
def next_tasks(self, n=1):
ret = []
for tkey in reversed(self.tasks.keys()):
if len(ret) >= n: break
tasks = self.tasks[tkey]
ntasks = len(tasks)
if not ntasks: continue
idxs = np.random.choice(ntasks, min(ntasks, n-len(ret)), replace=False).tolist()
for idx in sorted(idxs, reverse=True):
ret.append((tkey[0], tkey[1], tasks.pop(idx)))
return ret
def __call__(self, n=2):
nmerged = self.nmerged
start = time.time()
tasks = self.next_tasks(n)
improvements = set()
for pkey, idx, cluster in tasks:
cur_frontier = self.get_frontier_obj(idx, pkey)
next_frontier = self.get_frontier_obj(idx+1, pkey)
new_clusters = self.run_task(idx, cluster, cur_frontier, next_frontier)
self.add_clusters(new_clusters, idx=idx+1, partitionkey=pkey, skip_frontier=True)
improvements.update(new_clusters)
_logger.debug("merger\t%s\ttook %.1f sec\t%d improved\t%d tried\t%d tasks left",
pkey, time.time()-start, len(improvements), (self.nmerged-nmerged), self.ntasks)
return improvements
|
|
#!/usr/bin/env python
"""Generic script for processing large data sets in small batches.
Reads events from one datasource and commits them into another one,
either one by one or in batches.
Config template::
[data_maintainer3]
job_name = dm_remove_expired_services
# if source is database, you need to specify dbread and sql_get_pk_list
dbread = dbname=sourcedb_test
sql_get_pk_list =
select username
from user_service
where expire_date < now();
# if source is csv file, you need to specify fileread and optionally csv_delimiter and csv_quotechar
#fileread = data.csv
#csv_delimiter = ,
#csv_quotechar = "
dbwrite = dbname=destdb port=1234 host=dbhost.com user=guest password=secret
dbbefore = dbname=destdb_test
dbafter = dbname=destdb_test
dbcrash = dbname=destdb_test
dbthrottle = dbname=queuedb_test
# It is a good practice to include same where condition on target side as on read side,
# to ensure that you are actually changing the same data you think you are,
# especially when reading from replica database or when processing takes days.
sql_modify =
delete from user_service
where username = %%(username)s
and expire_date < now();
# This will be run before executing the sql_get_pk_list query (optional)
#sql_before_run =
# select * from somefunction1(%(job_name)s);
# This will be run when the DM finishes (optional)
#sql_after_run =
# select * from somefunction2(%(job_name)s);
# Determines whether the sql_after_run query will be run in case the pk list query returns no rows
#after_zero_rows = 1
# This will be run if the DM crashes (optional)
#sql_on_crash =
# select * from somefunction3(%(job_name)s);
# This may be used to control throttling of the DM (optional)
#sql_throttle =
# select lag>'5 minutes'::interval from pgq.get_consumer_info('failoverconsumer');
# materialize query so that transaction should not be open while processing it (only used when source is a database)
#with_hold = 1
# how many records process to fetch at once and if batch processing is used then
# also how many records are processed in one commit
#fetch_count = 100
# by default commit after each row (safe when behind plproxy, bouncer or whatever)
# can be turned off for better performance when connected directly to database
#autocommit = 1
# just for tuning to throttle how much load we let onto write database
#commit_delay = 0.0
# quite often data_maintainer is run from crontab and then loop delay is not needed
# in case it has to be run as daemon set loop delay in seconds
#loop_delay = 1
logfile = ~/log/%(job_name)s.log
pidfile = ~/pid/%(job_name)s.pid
use_skylog = 0
"""
import csv
import datetime
import os.path
import sys
import time
import pkgloader
pkgloader.require('skytools', '3.0')
import skytools
class DataSource (object):
def __init__(self, log):
self.log = log
def open(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def fetch(self, count):
raise NotImplementedError
class DBDataSource (DataSource):
def __init__(self, log, db, query, bres = None, with_hold = False):
super(DBDataSource, self).__init__(log)
self.db = db
if with_hold:
self.query = "DECLARE data_maint_cur NO SCROLL CURSOR WITH HOLD FOR %s" % query
else:
self.query = "DECLARE data_maint_cur NO SCROLL CURSOR FOR %s" % query
self.bres = bres
self.with_hold = with_hold
def _run_query(self, query, params = None):
self.cur.execute(query, params)
self.log.debug(self.cur.query)
self.log.debug(self.cur.statusmessage)
def open(self):
self.cur = self.db.cursor()
self._run_query(self.query, self.bres) # pass results from before_query into sql_pk
def close(self):
self.cur.execute("CLOSE data_maint_cur")
if not self.with_hold:
self.db.rollback()
def fetch(self, count):
self._run_query("FETCH FORWARD %i FROM data_maint_cur" % count)
return self.cur.fetchall()
class CSVDataSource (DataSource):
def __init__(self, log, filename, delimiter, quotechar):
super(CSVDataSource, self).__init__(log)
self.filename = filename
self.delimiter = delimiter
self.quotechar = quotechar
def open(self):
self.fp = open(self.filename, 'rb')
self.reader = csv.DictReader(self.fp, delimiter = self.delimiter, quotechar = self.quotechar)
def close(self):
self.fp.close()
def fetch(self, count):
ret = []
for row in self.reader:
ret.append(row)
count -= 1
if count <= 0:
break
return ret
class DataMaintainer (skytools.DBScript):
__doc__ = __doc__
loop_delay = -1
def __init__(self, args):
super(DataMaintainer, self).__init__("data_maintainer3", args)
# source file
self.fileread = self.cf.get("fileread", "")
if self.fileread:
self.fileread = os.path.expanduser(self.fileread)
self.set_single_loop(True) # force single run if source is file
self.csv_delimiter = self.cf.get("csv_delimiter", ',')
self.csv_quotechar = self.cf.get("csv_quotechar", '"')
# query for fetching the PK-s of the data set to be maintained
self.sql_pk = self.cf.get("sql_get_pk_list", "")
if (int(bool(self.sql_pk)) + int(bool(self.fileread))) in (0,2):
raise skytools.UsageError("Either fileread or sql_get_pk_list must be specified in the configuration file")
# query for changing data tuple ( autocommit )
self.sql_modify = self.cf.get("sql_modify")
# query to be run before starting the data maintainer,
# useful for retrieving initialization parameters of the query
self.sql_before = self.cf.get("sql_before_run", "")
# query to be run after finishing the data maintainer
self.sql_after = self.cf.get("sql_after_run", "")
# whether to run the sql_after query in case of 0 rows
self.after_zero_rows = self.cf.getint("after_zero_rows", 1)
# query to be run if the process crashes
self.sql_crash = self.cf.get("sql_on_crash", "")
# query for checking if / how much to throttle
self.sql_throttle = self.cf.get("sql_throttle", "")
# how many records to fetch at once
self.fetchcnt = self.cf.getint("fetchcnt", 100)
self.fetchcnt = self.cf.getint("fetch_count", self.fetchcnt)
# specifies if non-transactional cursor should be created (0 -> without hold)
self.withhold = self.cf.getint("with_hold", 1)
# execution mode (0 -> whole batch is committed / 1 -> autocommit)
self.autocommit = self.cf.getint("autocommit", 1)
# delay in seconds after each commit
self.commit_delay = self.cf.getfloat("commit_delay", 0.0)
def work(self):
self.log.info('Starting..')
self.started = self.lap_time = time.time()
self.total_count = 0
bres = {}
if self.sql_before:
bdb = self.get_database("dbbefore", autocommit=1)
bcur = bdb.cursor()
bcur.execute(self.sql_before)
if bcur.statusmessage.startswith('SELECT'):
res = bcur.fetchall()
assert len(res)==1, "Result of a 'before' query must be 1 row"
bres = res[0].copy()
if self.sql_throttle:
dbt = self.get_database("dbthrottle", autocommit=1)
tcur = dbt.cursor()
if self.autocommit:
self.log.info("Autocommit after each modify")
dbw = self.get_database("dbwrite", autocommit=1)
else:
self.log.info("Commit in %i record batches", self.fetchcnt)
dbw = self.get_database("dbwrite", autocommit=0)
if self.fileread:
self.datasource = CSVDataSource(self.log, self.fileread, self.csv_delimiter, self.csv_quotechar)
else:
if self.withhold:
dbr = self.get_database("dbread", autocommit=1)
else:
dbr = self.get_database("dbread", autocommit=0)
self.datasource = DBDataSource(self.log, dbr, self.sql_pk, bres, self.withhold)
self.datasource.open()
mcur = dbw.cursor()
while True: # loop while fetch returns fetch_count rows
self.fetch_started = time.time()
res = self.datasource.fetch(self.fetchcnt)
count, lastitem = self.process_batch(res, mcur, bres)
self.total_count += count
if not self.autocommit:
dbw.commit()
self.stat_put("duration", time.time() - self.fetch_started)
self.send_stats()
if len(res) < self.fetchcnt or self.last_sigint:
break
if self.commit_delay > 0.0:
time.sleep(self.commit_delay)
if self.sql_throttle:
self.throttle(tcur)
self._print_count("--- Running count: %s duration: %s ---")
if self.last_sigint:
self.log.info("Exiting on user request")
self.datasource.close()
self.log.info("--- Total count: %s duration: %s ---",
self.total_count, datetime.timedelta(0, round(time.time() - self.started)))
if self.sql_after and (self.after_zero_rows > 0 or self.total_count > 0):
adb = self.get_database("dbafter", autocommit=1)
acur = adb.cursor()
acur.execute(self.sql_after, lastitem)
def process_batch(self, res, mcur, bres):
""" Process events in autocommit mode reading results back and trying to make some sense out of them
"""
try:
count = 0
item = bres.copy()
for i in res: # for each row in read query result
item.update(i)
mcur.execute(self.sql_modify, item)
self.log.debug(mcur.query)
if mcur.statusmessage.startswith('SELECT'): # if select was used we can expect some result
mres = mcur.fetchall()
for r in mres:
if 'stats' in r: # if specially handled column 'stats' is present
for k, v in skytools.db_urldecode(r['stats'] or '').items():
self.stat_increase(k, int(v))
self.log.debug(r)
else:
self.stat_increase('processed', mcur.rowcount)
self.log.debug(mcur.statusmessage)
if 'cnt' in item:
count += item['cnt']
self.stat_increase("count", item['cnt'])
else:
count += 1
self.stat_increase("count")
if self.last_sigint:
break
return count, item
except: # process has crashed, run sql_crash and re-raise the exception
if self.sql_crash:
dbc = self.get_database("dbcrash", autocommit=1)
ccur = dbc.cursor()
ccur.execute(self.sql_crash, item)
raise
def throttle(self, tcur):
while not self.last_sigint:
tcur.execute(self.sql_throttle)
_r = tcur.fetchall()
assert len(_r) == 1 and len(_r[0]) == 1, "Result of 'throttle' query must be 1 value"
throttle = _r[0][0]
if isinstance(throttle, bool):
tt = float(throttle and 30)
elif isinstance(throttle, (int, float)):
tt = float(throttle)
else:
self.log.warn("Result of 'throttle' query must be boolean or numeric")
break
if tt > 0.0:
self.log.debug("sleeping %f s", tt)
time.sleep(tt)
else:
break
self._print_count("--- Waiting count: %s duration: %s ---")
def _print_count(self, text):
if time.time() - self.lap_time > 60.0: # if one minute has passed print running totals
self.log.info(text, self.total_count, datetime.timedelta(0, round(time.time() - self.started)))
self.lap_time = time.time()
def shutdown(self):
super(DataMaintainer, self).shutdown()
self.log.info("Script finished, exiting")
if __name__ == '__main__':
script = DataMaintainer(sys.argv[1:])
script.start()
|
|
import seldon.fileutil as fu
import os.path
import logging
import shutil
from sklearn.externals import joblib
import logging
import random
from sklearn.base import BaseEstimator
logger = logging.getLogger(__name__)
class Recommender(BaseEstimator):
"""
General recommendation interface
"""
def recommend(self,user,ids,recent_interactions,client,limit):
"""
Recommend items
Parameters
----------
user : long
user id
ids : list(long)
item ids to score
recent_interactions : list(long)
recent items the user has interacted with
client : str
name of client to recommend for (business group, company, product..)
limit : int
number of recommendations to return
Returns
-------
list of pairs of (item_id,score)
"""
return []
def save(self,folder):
"""
Save the recommender model. Allows more fine grained control over model state saving than pickling would allow. The method should save objects that only can't be pickled.
Parameters
----------
folder : str
local folder to save model
"""
pass
def load(self,folder):
"""
Load the model into the recommender. Allows more complex models than can easily handled via pickling.
Parameters
----------
folder : str
local folder to load model
"""
return self
class Recommender_wrapper(object):
"""
Wrapper to allow recommenders to be easily saved and loaded
"""
def __init__(self,work_folder="/tmp",aws_key=None,aws_secret=None):
self.work_folder=work_folder
self.aws_key=aws_key
self.aws_secret=aws_secret
def get_work_folder(self):
return self.work_folder
def create_work_folder(self):
if not os.path.exists(self.work_folder):
logger.info("creating %s",self.work_folder)
os.makedirs(self.work_folder)
def save_recommender(self,recommender,location):
"""
Save recommender to external location
Parameters
----------
recommender : Recommender
recommender to be saved
location : str
external folder to save recommender
"""
self.create_work_folder()
rint = random.randint(1,999999)
recommender_folder = self.work_folder+"/recommender_tmp"+str(rint)
if not os.path.exists(recommender_folder):
logger.info("creating folder %s",recommender_folder)
os.makedirs(recommender_folder)
tmp_file = recommender_folder+"/rec"
joblib.dump(recommender,tmp_file)
recommender.save(recommender_folder)
futil = fu.FileUtil(aws_key=self.aws_key,aws_secret=self.aws_secret)
futil.copy(recommender_folder,location)
def load_recommender(self,recommender_folder):
"""
Load scikit learn recommender from external folder
Parameters
----------
recommender_folder : str
external folder holding recommender
"""
self.create_work_folder()
rint = random.randint(1,999999)
local_recommender_folder = self.work_folder+"/recommender_tmp"+str(rint)
if not os.path.exists(local_recommender_folder):
logger.info("creating folder %s",local_recommender_folder)
os.makedirs(local_recommender_folder)
futil = fu.FileUtil(aws_key=self.aws_key,aws_secret=self.aws_secret)
futil.copy(recommender_folder,local_recommender_folder)
recommender = joblib.load(local_recommender_folder+"/rec")
recommender.load(local_recommender_folder)
return recommender
class Extension(object):
"""
Generic function that takes dict input and return JSON
"""
def predict(self,input={}):
return {}
def save(self,folder):
"""
Save the extension model. Allows more fine grained control over model state saving than pickling would allow. The method should save objects that only can't be pickled.
Parameters
----------
folder : str
local folder to save model
"""
pass
def load(self,folder):
"""
Load the model into the extension. Allows more complex models than can easily handled via pickling.
Parameters
----------
folder : str
local folder to load model
"""
return self
class Extension_wrapper(object):
def __init__(self,work_folder="/tmp",aws_key=None,aws_secret=None):
self.work_folder=work_folder
self.aws_key=aws_key
self.aws_secret=aws_secret
def get_work_folder(self):
return self.work_folder
def create_work_folder(self):
if not os.path.exists(self.work_folder):
logger.info("creating %s",self.work_folder)
os.makedirs(self.work_folder)
def load_extension(self,extension_folder):
self.create_work_folder()
rint = random.randint(1,999999)
local_extension_folder = self.work_folder+"/extension_tmp"+str(rint)
if not os.path.exists(local_extension_folder):
logger.info("creating folder %s",local_extension_folder)
os.makedirs(local_extension_folder)
futil = fu.FileUtil(aws_key=self.aws_key,aws_secret=self.aws_secret)
futil.copy(extension_folder,local_extension_folder)
extension = joblib.load(local_extension_folder+"/ext")
extension.load(local_extension_folder)
return extension
def save_extension(self,extension,location):
self.create_work_folder()
rint = random.randint(1,999999)
extension_folder = self.work_folder+"/extension_tmp"+str(rint)
if not os.path.exists(extension_folder):
logger.info("creating folder %s",extension_folder)
os.makedirs(extension_folder)
tmp_file = extension_folder+"/ext"
joblib.dump(extension,tmp_file)
extension.save(extension_folder)
futil = fu.FileUtil(aws_key=self.aws_key,aws_secret=self.aws_secret)
futil.copy(extension_folder,location)
|
|
import argparse
import gym
import numpy as np
import os
import tensorflow as tf
import tempfile
import time
import json
import baselines.common.tf_util as U
from baselines import logger
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from baselines.common.misc_util import (
boolean_flag,
pickle_load,
pretty_eta,
relatively_safe_pickle_dump,
set_global_seeds,
RunningAvg,
)
from baselines.common.schedules import LinearSchedule, PiecewiseSchedule
from baselines import bench
from baselines.common.atari_wrappers_deprecated import wrap_dqn
from baselines.common.azure_utils import Container
from .model import model, dueling_model
def parse_args():
parser = argparse.ArgumentParser("DQN experiments for Atari games")
# Environment
parser.add_argument("--env", type=str, default="Pong", help="name of the game")
parser.add_argument("--seed", type=int, default=42, help="which seed to use")
# Core DQN parameters
parser.add_argument("--replay-buffer-size", type=int, default=int(1e6), help="replay buffer size")
parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for Adam optimizer")
parser.add_argument("--num-steps", type=int, default=int(2e8), help="total number of steps to run the environment for")
parser.add_argument("--batch-size", type=int, default=32, help="number of transitions to optimize at the same time")
parser.add_argument("--learning-freq", type=int, default=4, help="number of iterations between every optimization step")
parser.add_argument("--target-update-freq", type=int, default=40000, help="number of iterations between every target network update")
parser.add_argument("--param-noise-update-freq", type=int, default=50, help="number of iterations between every re-scaling of the parameter noise")
parser.add_argument("--param-noise-reset-freq", type=int, default=10000, help="maximum number of steps to take per episode before re-perturbing the exploration policy")
# Bells and whistles
boolean_flag(parser, "double-q", default=True, help="whether or not to use double q learning")
boolean_flag(parser, "dueling", default=False, help="whether or not to use dueling model")
boolean_flag(parser, "prioritized", default=False, help="whether or not to use prioritized replay buffer")
boolean_flag(parser, "param-noise", default=False, help="whether or not to use parameter space noise for exploration")
boolean_flag(parser, "layer-norm", default=False, help="whether or not to use layer norm (should be True if param_noise is used)")
boolean_flag(parser, "gym-monitor", default=False, help="whether or not to use a OpenAI Gym monitor (results in slower training due to video recording)")
parser.add_argument("--prioritized-alpha", type=float, default=0.6, help="alpha parameter for prioritized replay buffer")
parser.add_argument("--prioritized-beta0", type=float, default=0.4, help="initial value of beta parameters for prioritized replay")
parser.add_argument("--prioritized-eps", type=float, default=1e-6, help="eps parameter for prioritized replay buffer")
# Checkpointing
parser.add_argument("--save-dir", type=str, default=None, help="directory in which training state and model should be saved.")
parser.add_argument("--save-azure-container", type=str, default=None,
help="It present data will saved/loaded from Azure. Should be in format ACCOUNT_NAME:ACCOUNT_KEY:CONTAINER")
parser.add_argument("--save-freq", type=int, default=1e6, help="save model once every time this many iterations are completed")
boolean_flag(parser, "load-on-start", default=True, help="if true and model was previously saved then training will be resumed")
return parser.parse_args()
def make_env(game_name):
env = gym.make(game_name + "NoFrameskip-v4")
monitored_env = bench.Monitor(env, logger.get_dir()) # puts rewards and number of steps in info, before environment is wrapped
env = wrap_dqn(monitored_env) # applies a bunch of modification to simplify the observation space (downsample, make b/w)
return env, monitored_env
def maybe_save_model(savedir, container, state):
"""This function checkpoints the model and state of the training algorithm."""
if savedir is None:
return
start_time = time.time()
model_dir = "model-{}".format(state["num_iters"])
U.save_state(os.path.join(savedir, model_dir, "saved"))
if container is not None:
container.put(os.path.join(savedir, model_dir), model_dir)
relatively_safe_pickle_dump(state, os.path.join(savedir, 'training_state.pkl.zip'), compression=True)
if container is not None:
container.put(os.path.join(savedir, 'training_state.pkl.zip'), 'training_state.pkl.zip')
relatively_safe_pickle_dump(state["monitor_state"], os.path.join(savedir, 'monitor_state.pkl'))
if container is not None:
container.put(os.path.join(savedir, 'monitor_state.pkl'), 'monitor_state.pkl')
logger.log("Saved model in {} seconds\n".format(time.time() - start_time))
def maybe_load_model(savedir, container):
"""Load model if present at the specified path."""
if savedir is None:
return
state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
if container is not None:
logger.log("Attempting to download model from Azure")
found_model = container.get(savedir, 'training_state.pkl.zip')
else:
found_model = os.path.exists(state_path)
if found_model:
state = pickle_load(state_path, compression=True)
model_dir = "model-{}".format(state["num_iters"])
if container is not None:
container.get(savedir, model_dir)
U.load_state(os.path.join(savedir, model_dir, "saved"))
logger.log("Loaded models checkpoint at {} iterations".format(state["num_iters"]))
return state
if __name__ == '__main__':
args = parse_args()
# Parse savedir and azure container.
savedir = args.save_dir
if savedir is None:
savedir = os.getenv('OPENAI_LOGDIR', None)
if args.save_azure_container is not None:
account_name, account_key, container_name = args.save_azure_container.split(":")
container = Container(account_name=account_name,
account_key=account_key,
container_name=container_name,
maybe_create=True)
if savedir is None:
# Careful! This will not get cleaned up. Docker spoils the developers.
savedir = tempfile.TemporaryDirectory().name
else:
container = None
# Create and seed the env.
env, monitored_env = make_env(args.env)
if args.seed > 0:
set_global_seeds(args.seed)
env.unwrapped.seed(args.seed)
if args.gym_monitor and savedir:
env = gym.wrappers.Monitor(env, os.path.join(savedir, 'gym_monitor'), force=True)
if savedir:
with open(os.path.join(savedir, 'args.json'), 'w') as f:
json.dump(vars(args), f)
with U.make_session(4) as sess:
# Create training graph and replay buffer
def model_wrapper(img_in, num_actions, scope, **kwargs):
actual_model = dueling_model if args.dueling else model
return actual_model(img_in, num_actions, scope, layer_norm=args.layer_norm, **kwargs)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=lambda name: U.Uint8Input(env.observation_space.shape, name=name),
q_func=model_wrapper,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=args.lr, epsilon=1e-4),
gamma=0.99,
grad_norm_clipping=10,
double_q=args.double_q,
param_noise=args.param_noise
)
approximate_num_iters = args.num_steps / 4
exploration = PiecewiseSchedule([
(0, 1.0),
(approximate_num_iters / 50, 0.1),
(approximate_num_iters / 5, 0.01)
], outside_value=0.01)
if args.prioritized:
replay_buffer = PrioritizedReplayBuffer(args.replay_buffer_size, args.prioritized_alpha)
beta_schedule = LinearSchedule(approximate_num_iters, initial_p=args.prioritized_beta0, final_p=1.0)
else:
replay_buffer = ReplayBuffer(args.replay_buffer_size)
U.initialize()
update_target()
num_iters = 0
# Load the model
state = maybe_load_model(savedir, container)
if state is not None:
num_iters, replay_buffer = state["num_iters"], state["replay_buffer"],
monitored_env.set_state(state["monitor_state"])
start_time, start_steps = None, None
steps_per_iter = RunningAvg(0.999)
iteration_time_est = RunningAvg(0.999)
obs = env.reset()
num_iters_since_reset = 0
reset = True
# Main trianing loop
while True:
num_iters += 1
num_iters_since_reset += 1
# Take action and store transition in the replay buffer.
kwargs = {}
if not args.param_noise:
update_eps = exploration.value(num_iters)
update_param_noise_threshold = 0.
else:
if args.param_noise_reset_freq > 0 and num_iters_since_reset > args.param_noise_reset_freq:
# Reset param noise policy since we have exceeded the maximum number of steps without a reset.
reset = True
update_eps = 0.01 # ensures that we cannot get stuck completely
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(num_iters) + exploration.value(num_iters) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = (num_iters % args.param_noise_update_freq == 0)
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
reset = False
new_obs, rew, done, info = env.step(action)
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
if done:
num_iters_since_reset = 0
obs = env.reset()
reset = True
if (num_iters > max(5 * args.batch_size, args.replay_buffer_size // 20) and
num_iters % args.learning_freq == 0):
# Sample a bunch of transitions from replay buffer
if args.prioritized:
experience = replay_buffer.sample(args.batch_size, beta=beta_schedule.value(num_iters))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(args.batch_size)
weights = np.ones_like(rewards)
# Minimize the error in Bellman's equation and compute TD-error
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
# Update the priorities in the replay buffer
if args.prioritized:
new_priorities = np.abs(td_errors) + args.prioritized_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
# Update target network.
if num_iters % args.target_update_freq == 0:
update_target()
if start_time is not None:
steps_per_iter.update(info['steps'] - start_steps)
iteration_time_est.update(time.time() - start_time)
start_time, start_steps = time.time(), info["steps"]
# Save the model and training state.
if num_iters > 0 and (num_iters % args.save_freq == 0 or info["steps"] > args.num_steps):
maybe_save_model(savedir, container, {
'replay_buffer': replay_buffer,
'num_iters': num_iters,
'monitor_state': monitored_env.get_state(),
})
if info["steps"] > args.num_steps:
break
if done:
steps_left = args.num_steps - info["steps"]
completion = np.round(info["steps"] / args.num_steps, 1)
logger.record_tabular("% completion", completion)
logger.record_tabular("steps", info["steps"])
logger.record_tabular("iters", num_iters)
logger.record_tabular("episodes", len(info["rewards"]))
logger.record_tabular("reward (100 epi mean)", np.mean(info["rewards"][-100:]))
logger.record_tabular("exploration", exploration.value(num_iters))
if args.prioritized:
logger.record_tabular("max priority", replay_buffer._max_priority)
fps_estimate = (float(steps_per_iter) / (float(iteration_time_est) + 1e-6)
if steps_per_iter._value is not None else "calculating...")
logger.dump_tabular()
logger.log()
logger.log("ETA: " + pretty_eta(int(steps_left / fps_estimate)))
logger.log()
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for handling Tensorflow checkpoints."""
import collections
import os
import re
from typing import Dict, List, Optional, Sequence, Text, Tuple
import tensorflow.compat.v1 as tf
def get_assignment_map_from_checkpoint(
variables,
ckpt_path,
variable_scope = "",
ckpt_variable_scope = "",
require_all_variables_initialized = False
):
"""Gets the mapping from checkpoint variable names to `variable` names.
Computes the *intersection* of `variables` (under `variable_scope`) and
checkpoint variables (under `ckpt_variable_scope`) and gets the name
mapping from the latter to the former.
Args:
variables: The list of Tensorflow variables one aims to initialize.
ckpt_path: Path to the checkpoint to load `variables` from.
variable_scope: The scope of `variables` to initialize. `Variables` outside
this scope will be ignored. If "", use all `variables`; otherwise it
should end with '/'.
ckpt_variable_scope: The scope of checkpoint variables to initialize from.
Checkpoint variables outside this scope will be ignored. If "", use all
`variables`; otherwise it should end with '/'.
require_all_variables_initialized: If True, a ValueError will be raised if
not all `variables` in the `variable_scope` can be mapped to the
corresponding checkpoint variables in the `ckpt_variable_scope`.
Returns:
assignment_map: Mapping from checkpoint variable names to `variable`.
Keys and values are matching variables under the `ckpt_variable_scope`
and `variable_scope` (sub-)trees.
initialized_variable_names: Names of `variables` that get matched to
checkpoint variables.
Raises:
ValueError if
(a) input scope name is not empty and doesn't end with "/"; or
(b) names of `variables` doesn't end with ':0' (unlikely to happen); or
(c) not all variables in variable_scope are initialized
(if `require_all_variables_initialized` is True).
Example
Input:
variables: ["a/aa/aaa:0", "a/c/cc/ccc:0", "d/dd:0"]
ckpt_variables: ["b/aa/aaa", "b/f"]
variable_scope: "a/"
ckpt_variable_scope: "b/"
Output:
assignment_map: {"b/aa/aaa": <tf.Variable "a/aa/aaa:0">}
initialized_variable_names: ["a/aa/aaa:0"]
"""
if variable_scope and not variable_scope.endswith("/"):
raise ValueError("{} should end with '/'.".format(variable_scope))
if ckpt_variable_scope and not ckpt_variable_scope.endswith("/"):
raise ValueError("{} should end with '/'.".format(ckpt_variable_scope))
variable_names_stripped = set()
for var in variables:
var_name = var.name
# Ignores `variables` outside scope.
# Note that all strings start with "".
if not var_name.startswith(variable_scope):
continue
# Names of variables from Tensorflow API all have the suffix of ":0"
# while those from checkpoint don't. Here we strip the suffix out.
m = re.match("^(.*):\\d+$", var_name)
if m is not None:
var_name = m.group(1)
else:
raise ValueError(
"Variable name doesn't end with ':0': {}".format(var_name))
# Strips the `variable_scope` prefix out.
var_name_stripped = var_name[len(variable_scope):]
if var_name_stripped:
variable_names_stripped.add(var_name_stripped)
var_name_to_variable = {var.name: var for var in variables}
assignment_map = collections.OrderedDict()
initialized_variable_names = []
for ckpt_var_name, _ in tf.train.list_variables(ckpt_path):
# Ignores checkpoint variables outside scope.
# Note that all strings start with "".
if not ckpt_var_name.startswith(ckpt_variable_scope):
continue
ckpt_var_name_stripped = ckpt_var_name[len(ckpt_variable_scope):]
if ckpt_var_name_stripped not in variable_names_stripped:
continue
variable_names_stripped.remove(ckpt_var_name_stripped)
var_name = variable_scope + ckpt_var_name_stripped + ":0"
assignment_map[ckpt_var_name] = var_name_to_variable[var_name]
initialized_variable_names.append(var_name)
if variable_names_stripped and require_all_variables_initialized:
raise ValueError(
f"The following variables in variable_scope cannot be mapped to any "
f"checkpoint variable in ckpt_variable_scope: "
f"{variable_names_stripped}.")
return (assignment_map, initialized_variable_names)
def _log_customized_initialization(
init_checkpoint,
variables,
global_variables,
initialized_variable_names = ()):
"""Logs customized initialization."""
if init_checkpoint:
tf.logging.info("Initialize from the ckpt %s", init_checkpoint)
else:
tf.logging.info("Random initialized.")
if global_variables:
tf.logging.info("**** Global Variables ****")
else:
tf.logging.info("**** Trainable Variables ****")
for var in variables:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
def get_scaffold_fn(
init_checkpoint,
global_vars = False,
variable_scope_pairs = (("", ""))
): # pytype: disable=annotation-type-mismatch
"""Gets `scaffold_fn` for initializing model variables from checkpoint.
If the checkpoint ends with "latest", then load the latest checkpoint in the
directory of the `init_checkpoint`.
Args:
init_checkpoint: Text, the initial checkpoint.
global_vars: bool, whether or not initialize global variables.
variable_scope_pairs: Sequence of (variable_scope, ckpt_variable_name)
pairs, where `variable_scope` is the scope of variables to initialize, and
`ckpt_variable_name` is the scope of checkpoint variables to initialize
from. The initializations from later pairs will overwrite those from
earlier pairs.
Returns:
The `scaffold_fn` for initializing model variables from checkpoint. If
`init_checkpoint` is None, return None.
"""
tvars = tf.global_variables() if global_vars else tf.trainable_variables()
if init_checkpoint is None:
_log_customized_initialization(init_checkpoint, tvars, global_vars)
return None
if init_checkpoint.endswith("latest"):
ckpt_dir = os.path.dirname(init_checkpoint)
init_checkpoint = tf.train.latest_checkpoint(ckpt_dir)
def scaffold_fn():
"""The TPU scaffold function."""
for variable_scope, ckpt_variable_scope in variable_scope_pairs:
if variable_scope and not variable_scope.endswith("/"):
variable_scope += "/"
if ckpt_variable_scope and not ckpt_variable_scope.endswith("/"):
ckpt_variable_scope += "/"
assignment_map, initialized_variable_names = (
get_assignment_map_from_checkpoint(tvars, init_checkpoint,
variable_scope,
ckpt_variable_scope))
_log_customized_initialization(init_checkpoint, tvars, global_vars,
initialized_variable_names)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
return scaffold_fn
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import string
import base64
try:
import http.client as httplib
from urllib import request as url_request
from urllib import parse
except ImportError: # above is available in py3+, below is py2.7
import httplib as httplib
import urllib2 as url_request
import urlparse as parse
from selenium.webdriver.common import utils as common_utils
from .command import Command
from .errorhandler import ErrorCode
from . import utils
LOGGER = logging.getLogger(__name__)
class Request(url_request.Request):
"""
Extends the url_request.Request to support all HTTP request types.
"""
def __init__(self, url, data=None, method=None):
"""
Initialise a new HTTP request.
:Args:
- url - String for the URL to send the request to.
- data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
url_request.Request.__init__(self, url, data=data)
def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method
class Response(object):
"""
Represents an HTTP response.
"""
def __init__(self, fp, code, headers, url):
"""
Initialise a new Response.
:Args:
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- headers - A dictionary of headers returned by the server.
- url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""
Close the response body file object.
"""
self.read = None
self.fp = None
def info(self):
"""
Returns the response headers.
"""
return self.headers
def geturl(self):
"""
Returns the URL for the resource returned in this response.
"""
return self.url
class HttpErrorHandler(url_request.HTTPDefaultErrorHandler):
"""
A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""
Default HTTP error handler.
:Args:
- req - The original Request object.
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- msg - The HTTP status message returned by the server.
- headers - The response headers.
:Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol"""
_timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_timeout(cls):
"""
:Returns:
Timeout value in seconds for all http requests made to the Remote Connection
"""
return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout
@classmethod
def set_timeout(cls, timeout):
"""
Override the default timeout
:Args:
- timeout - timeout value for http requests in seconds
"""
cls._timeout = timeout
@classmethod
def reset_timeout(cls):
"""
Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT
"""
cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT
def __init__(self, remote_server_addr, keep_alive=False, resolve_ip=True):
# Attempt to resolve the hostname and get an IP address.
self.keep_alive = keep_alive
parsed_url = parse.urlparse(remote_server_addr)
addr = parsed_url.hostname
if parsed_url.hostname and resolve_ip:
port = parsed_url.port or None
if parsed_url.scheme == "https":
ip = parsed_url.hostname
else:
ip = common_utils.find_connectable_ip(parsed_url.hostname,
port=port)
if ip:
netloc = ip
addr = netloc
if parsed_url.port:
netloc = common_utils.join_host_port(netloc,
parsed_url.port)
if parsed_url.username:
auth = parsed_url.username
if parsed_url.password:
auth += ':%s' % parsed_url.password
netloc = '%s@%s' % (auth, netloc)
remote_server_addr = parse.urlunparse(
(parsed_url.scheme, netloc, parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
else:
LOGGER.info('Could not get IP address for host: %s' %
parsed_url.hostname)
self._url = remote_server_addr
if keep_alive:
self._conn = httplib.HTTPConnection(
str(addr), str(parsed_url.port), timeout=self._timeout)
self._commands = {
Command.STATUS: ('GET', '/status'),
Command.NEW_SESSION: ('POST', '/session'),
Command.GET_ALL_SESSIONS: ('GET', '/sessions'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/element/$id/screenshot'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.W3C_GET_ACTIVE_ELEMENT: ('GET', '/session/$sessionId/element/active'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'),
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.SEND_KEYS_TO_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/keys'),
Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_RECT:
('GET', '/session/$sessionId/element/$id/rect'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.GET_ELEMENT_PROPERTY:
('GET', '/session/$sessionId/element/$id/property/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'),
Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'),
Command.DELETE_ALL_COOKIES:
('DELETE', '/session/$sessionId/cookie'),
Command.DELETE_COOKIE:
('DELETE', '/session/$sessionId/cookie/$name'),
Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'),
Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'),
Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'),
Command.CLOSE: ('DELETE', '/session/$sessionId/window'),
Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY:
('GET', '/session/$sessionId/element/$id/css/$propertyName'),
Command.IMPLICIT_WAIT:
('POST', '/session/$sessionId/timeouts/implicit_wait'),
Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'),
Command.SET_SCRIPT_TIMEOUT:
('POST', '/session/$sessionId/timeouts/async_script'),
Command.SET_TIMEOUTS:
('POST', '/session/$sessionId/timeouts'),
Command.DISMISS_ALERT:
('POST', '/session/$sessionId/dismiss_alert'),
Command.ACCEPT_ALERT:
('POST', '/session/$sessionId/accept_alert'),
Command.SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert_text'),
Command.GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert_text'),
Command.SET_ALERT_CREDENTIALS:
('POST', '/session/$sessionId/alert/credentials'),
Command.CLICK:
('POST', '/session/$sessionId/click'),
Command.DOUBLE_CLICK:
('POST', '/session/$sessionId/doubleclick'),
Command.MOUSE_DOWN:
('POST', '/session/$sessionId/buttondown'),
Command.MOUSE_UP:
('POST', '/session/$sessionId/buttonup'),
Command.MOVE_TO:
('POST', '/session/$sessionId/moveto'),
Command.GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/size'),
Command.SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/size'),
Command.GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/$windowHandle/position'),
Command.SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/$windowHandle/position'),
Command.W3C_GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/position'),
Command.W3C_SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/position'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/maximize'),
Command.SET_SCREEN_ORIENTATION:
('POST', '/session/$sessionId/orientation'),
Command.GET_SCREEN_ORIENTATION:
('GET', '/session/$sessionId/orientation'),
Command.SINGLE_TAP:
('POST', '/session/$sessionId/touch/click'),
Command.TOUCH_DOWN:
('POST', '/session/$sessionId/touch/down'),
Command.TOUCH_UP:
('POST', '/session/$sessionId/touch/up'),
Command.TOUCH_MOVE:
('POST', '/session/$sessionId/touch/move'),
Command.TOUCH_SCROLL:
('POST', '/session/$sessionId/touch/scroll'),
Command.DOUBLE_TAP:
('POST', '/session/$sessionId/touch/doubleclick'),
Command.LONG_PRESS:
('POST', '/session/$sessionId/touch/longclick'),
Command.FLICK:
('POST', '/session/$sessionId/touch/flick'),
Command.EXECUTE_SQL:
('POST', '/session/$sessionId/execute_sql'),
Command.GET_LOCATION:
('GET', '/session/$sessionId/location'),
Command.SET_LOCATION:
('POST', '/session/$sessionId/location'),
Command.GET_APP_CACHE:
('GET', '/session/$sessionId/application_cache'),
Command.GET_APP_CACHE_STATUS:
('GET', '/session/$sessionId/application_cache/status'),
Command.CLEAR_APP_CACHE:
('DELETE', '/session/$sessionId/application_cache/clear'),
Command.GET_NETWORK_CONNECTION:
('GET', '/session/$sessionId/network_connection'),
Command.SET_NETWORK_CONNECTION:
('POST', '/session/$sessionId/network_connection'),
Command.GET_LOCAL_STORAGE_ITEM:
('GET', '/session/$sessionId/local_storage/key/$key'),
Command.REMOVE_LOCAL_STORAGE_ITEM:
('DELETE', '/session/$sessionId/local_storage/key/$key'),
Command.GET_LOCAL_STORAGE_KEYS:
('GET', '/session/$sessionId/local_storage'),
Command.SET_LOCAL_STORAGE_ITEM:
('POST', '/session/$sessionId/local_storage'),
Command.CLEAR_LOCAL_STORAGE:
('DELETE', '/session/$sessionId/local_storage'),
Command.GET_LOCAL_STORAGE_SIZE:
('GET', '/session/$sessionId/local_storage/size'),
Command.GET_SESSION_STORAGE_ITEM:
('GET', '/session/$sessionId/session_storage/key/$key'),
Command.REMOVE_SESSION_STORAGE_ITEM:
('DELETE', '/session/$sessionId/session_storage/key/$key'),
Command.GET_SESSION_STORAGE_KEYS:
('GET', '/session/$sessionId/session_storage'),
Command.SET_SESSION_STORAGE_ITEM:
('POST', '/session/$sessionId/session_storage'),
Command.CLEAR_SESSION_STORAGE:
('DELETE', '/session/$sessionId/session_storage'),
Command.GET_SESSION_STORAGE_SIZE:
('GET', '/session/$sessionId/session_storage/size'),
Command.GET_LOG:
('POST', '/session/$sessionId/log'),
Command.GET_AVAILABLE_LOG_TYPES:
('GET', '/session/$sessionId/log/types'),
Command.CURRENT_CONTEXT_HANDLE:
('GET', '/session/$sessionId/context'),
Command.CONTEXT_HANDLES:
('GET', '/session/$sessionId/contexts'),
Command.SWITCH_TO_CONTEXT:
('POST', '/session/$sessionId/context'),
}
def execute(self, command, params):
"""
Send a command to the remote server.
Any path subtitutions required for the URL mapped to the command should be
included in the command parameters.
:Args:
- command - A string specifying the command to execute.
- params - A dictionary of named parameters to send with the command as
its JSON payload.
"""
command_info = self._commands[command]
assert command_info is not None, 'Unrecognised command %s' % command
data = utils.dump_json(params)
path = string.Template(command_info[1]).substitute(params)
url = '%s%s' % (self._url, path)
return self._request(command_info[0], url, body=data)
def _request(self, method, url, body=None):
"""
Send an HTTP request to the remote server.
:Args:
- method - A string for the HTTP method to send the request with.
- url - A string for the URL to send the request to.
- body - A string for request body. Ignored unless method is POST or PUT.
:Returns:
A dictionary with the server's parsed JSON response.
"""
LOGGER.debug('%s %s %s' % (method, url, body))
parsed_url = parse.urlparse(url)
if self.keep_alive:
headers = {"Connection": 'keep-alive', method: parsed_url.path,
"User-Agent": "Python http auth",
"Content-type": "application/json;charset=\"UTF-8\"",
"Accept": "application/json"}
if parsed_url.username:
auth = base64.standard_b64encode(('%s:%s' % (
parsed_url.username,
parsed_url.password)).encode('ascii')).decode('ascii').replace('\n', '')
headers["Authorization"] = "Basic %s" % auth
if body and method != 'POST' and method != 'PUT':
body = None
try:
self._conn.request(method, parsed_url.path, body, headers)
resp = self._conn.getresponse()
except (httplib.HTTPException, socket.error):
self._conn.close()
raise
statuscode = resp.status
else:
password_manager = None
if parsed_url.username:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%s" % parsed_url.port
cleaned_url = parse.urlunparse((
parsed_url.scheme,
netloc,
parsed_url.path,
parsed_url.params,
parsed_url.query,
parsed_url.fragment))
password_manager = url_request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None,
"%s://%s" % (parsed_url.scheme, netloc),
parsed_url.username,
parsed_url.password)
request = Request(cleaned_url, data=body.encode('utf-8'), method=method)
else:
request = Request(url, data=body.encode('utf-8'), method=method)
request.add_header('Accept', 'application/json')
request.add_header('Content-Type', 'application/json;charset=UTF-8')
if parsed_url.username:
base64string = base64.b64encode('%s:%s' % (parsed_url.username, parsed_url.password))
request.add_header("Authorization", "Basic %s" % base64string)
if password_manager:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler(),
url_request.HTTPBasicAuthHandler(password_manager))
else:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler())
resp = opener.open(request, timeout=self._timeout)
statuscode = resp.code
if not hasattr(resp, 'getheader'):
if hasattr(resp.headers, 'getheader'):
resp.getheader = lambda x: resp.headers.getheader(x)
elif hasattr(resp.headers, 'get'):
resp.getheader = lambda x: resp.headers.get(x)
data = resp.read()
try:
if 300 <= statuscode < 304:
return self._request('GET', resp.getheader('location'))
body = data.decode('utf-8').replace('\x00', '').strip()
if 399 < statuscode <= 500:
return {'status': statuscode, 'value': body}
content_type = []
if resp.getheader('Content-Type') is not None:
content_type = resp.getheader('Content-Type').split(';')
if not any([x.startswith('image/png') for x in content_type]):
try:
data = utils.load_json(body.strip())
except ValueError:
if 199 < statuscode < 300:
status = ErrorCode.SUCCESS
else:
status = ErrorCode.UNKNOWN_ERROR
return {'status': status, 'value': body.strip()}
assert type(data) is dict, (
'Invalid server response body: %s' % body)
# Some of the drivers incorrectly return a response
# with no 'value' field when they should return null.
if 'value' not in data:
data['value'] = None
return data
else:
data = {'status': 0, 'value': body.strip()}
return data
finally:
LOGGER.debug("Finished Request")
resp.close()
|
|
# Copyright The IETF Trust 2007, All Rights Reserved
from email.Utils import make_msgid, formatdate, formataddr, parseaddr, getaddresses
from email.MIMEText import MIMEText
from email.MIMEMessage import MIMEMessage
from email.MIMEMultipart import MIMEMultipart
from email.header import Header
from email import message_from_string
from email import charset as Charset
import smtplib
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured
from django.template.loader import render_to_string
from django.template import Context,RequestContext
import ietf
from ietf.utils.log import log
import sys
import time
import copy
import textwrap
import traceback
import datetime
# Testing mode:
# import ietf.utils.mail
# ietf.utils.mail.test_mode = True
# ... send some mail ...
# ... inspect ietf.utils.mail.outbox ...
# ... call ietf.utils.mail.empty_outbox() ...
test_mode = False
outbox = []
SMTP_ADDR = { 'ip4':settings.EMAIL_HOST, 'port':settings.EMAIL_PORT}
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, None, 'utf-8')
def empty_outbox():
outbox[:] = []
def add_headers(msg):
if not(msg.has_key('Message-ID')):
msg['Message-ID'] = make_msgid('idtracker')
if not(msg.has_key('Date')):
msg['Date'] = formatdate(time.time(), True)
if not(msg.has_key('From')):
msg['From'] = settings.DEFAULT_FROM_EMAIL
return msg
class SMTPSomeRefusedRecipients(smtplib.SMTPException):
def __init__(self, message, original_msg, refusals):
smtplib.SMTPException.__init__(self, message)
self.original_msg = original_msg
self.refusals = refusals
def detailed_refusals(self):
details = "The following recipients were refused:\n"
for recipient in self.refusals:
details += "\n%s: %s" % (recipient,self.refusals[recipient])
return details
def summary_refusals(self):
return ", ".join(["%s (%s)"%(x,self.refusals[x][0]) for x in self.refusals])
def send_smtp(msg, bcc=None):
'''
Send a Message via SMTP, based on the django email server settings.
The destination list will be taken from the To:/Cc: headers in the
Message. The From address will be used if present or will default
to the django setting DEFAULT_FROM_EMAIL
If someone has set test_mode=True, then append the msg to
the outbox.
'''
add_headers(msg)
(fname, frm) = parseaddr(msg.get('From'))
addrlist = msg.get_all('To') + msg.get_all('Cc', [])
if bcc:
addrlist += [bcc]
to = [addr for name, addr in getaddresses(addrlist) if ( addr != '' and not addr.startswith('unknown-email-') )]
if not to:
log("No addressees for email from '%s', subject '%s'. Nothing sent." % (frm, msg.get('Subject', '[no subject]')))
else:
if test_mode:
outbox.append(msg)
server = None
try:
server = smtplib.SMTP()
#log("SMTP server: %s" % repr(server))
#if settings.DEBUG:
# server.set_debuglevel(1)
conn_code, conn_msg = server.connect(SMTP_ADDR['ip4'], SMTP_ADDR['port'])
#log("SMTP connect: code: %s; msg: %s" % (conn_code, conn_msg))
if settings.EMAIL_HOST_USER and settings.EMAIL_HOST_PASSWORD:
server.ehlo()
if 'starttls' not in server.esmtp_features:
raise ImproperlyConfigured('password configured but starttls not supported')
(retval, retmsg) = server.starttls()
if retval != 220:
raise ImproperlyConfigured('password configured but tls failed: %d %s' % ( retval, retmsg ))
# Send a new EHLO, since without TLS the server might not
# advertise the AUTH capability.
server.ehlo()
server.login(settings.EMAIL_HOST_USER, settings.EMAIL_HOST_PASSWORD)
unhandled = server.sendmail(frm, to, msg.as_string())
if unhandled != {}:
raise SMTPSomeRefusedRecipients(message="%d addresses were refused"%len(unhandled),original_msg=msg,refusals=unhandled)
except Exception as e:
# need to improve log message
log("Exception while trying to send email from '%s' to %s subject '%s'" % (frm, to, msg.get('Subject', '[no subject]')))
if isinstance(e, smtplib.SMTPException):
e.original_msg=msg
raise
else:
raise smtplib.SMTPException({'really': sys.exc_info()[0], 'value': sys.exc_info()[1], 'tb': traceback.format_tb(sys.exc_info()[2])})
finally:
try:
server.quit()
except smtplib.SMTPServerDisconnected:
pass
log("sent email from '%s' to %s subject '%s'" % (frm, to, msg.get('Subject', '[no subject]')))
def copy_email(msg, to, toUser=False, originalBcc=None):
'''
Send a copy of the given email message to the given recipient.
'''
add_headers(msg)
new = MIMEMultipart()
# get info for first part.
# Mode: if it's production, then "copy of a message", otherwise
# "this is a message that would have been sent from"
# hostname?
# django settings if debugging?
# Should this be a template?
if settings.SERVER_MODE == 'production':
explanation = "This is a copy of a message sent from the I-D tracker."
elif settings.SERVER_MODE == 'test' and toUser:
explanation = "The attached message was generated by an instance of the tracker\nin test mode. It is being sent to you because you, or someone acting\non your behalf, is testing the system. If you do not recognize\nthis action, please accept our apologies and do not be concerned as\nthe action is being taken in a test context."
else:
explanation = "The attached message would have been sent, but the tracker is in %s mode.\nIt was not sent to anybody." % settings.SERVER_MODE
if originalBcc:
explanation += ("\nIn addition to the destinations derived from the header below, the message would have been sent Bcc to %s" % originalBcc)
new.attach(MIMEText(explanation + "\n\n"))
new.attach(MIMEMessage(msg))
# Overwrite the From: header, so that the copy from a development or
# test server doesn't look like spam.
new['From'] = settings.DEFAULT_FROM_EMAIL
new['Subject'] = '[Django %s] %s' % (settings.SERVER_MODE, msg.get('Subject', '[no subject]'))
new['To'] = to
send_smtp(new)
def mail_context(request):
if request:
return RequestContext(request)
else:
return Context()
def send_mail_subj(request, to, frm, stemplate, template, context, *args, **kwargs):
'''
Send an email message, exactly as send_mail(), but the
subject field is a template.
'''
subject = render_to_string(stemplate, context, context_instance=mail_context(request)).replace("\n"," ").strip()
return send_mail(request, to, frm, subject, template, context, *args, **kwargs)
def send_mail(request, to, frm, subject, template, context, *args, **kwargs):
'''
Send an email to the destination [list], with the given return
address (or "None" to use the default in settings.py).
The body is a text/plain rendering of the template with the context.
extra is a dict of extra headers to add.
'''
txt = render_to_string(template, context, context_instance=mail_context(request))
return send_mail_text(request, to, frm, subject, txt, *args, **kwargs)
def encode_message(txt):
if isinstance(txt, unicode):
msg = MIMEText(txt.encode('utf-8'), 'plain', 'UTF-8')
else:
msg = MIMEText(txt)
return msg
def send_mail_text(request, to, frm, subject, txt, cc=None, extra=None, toUser=False, bcc=None):
"""Send plain text message."""
msg = encode_message(txt)
send_mail_mime(request, to, frm, subject, msg, cc, extra, toUser, bcc)
def condition_message(to, frm, subject, msg, cc, extra):
if isinstance(frm, tuple):
frm = formataddr(frm)
if isinstance(to, list) or isinstance(to, tuple):
to = ", ".join([isinstance(addr, tuple) and formataddr(addr) or addr for addr in to if addr])
if isinstance(cc, list) or isinstance(cc, tuple):
cc = ", ".join([isinstance(addr, tuple) and formataddr(addr) or addr for addr in cc if addr])
if frm:
msg['From'] = frm
# The following is a hack to avoid an issue with how the email module (as of version 4.0.3)
# breaks lines when encoding header fields with anything other than the us-ascii codec.
# This allows the Header implementation to encode each display name as a separate chunk.
# The resulting encode produces a string that is us-ascii and has a good density of
# "higher-level syntactic breaks"
to_hdr = Header(header_name='To')
for name, addr in getaddresses([to]):
if addr != '' and not addr.startswith('unknown-email-'):
if name:
to_hdr.append('"%s"' % name)
to_hdr.append("<%s>," % addr)
to_str = to_hdr.encode()
if to_str and to_str[-1] == ',':
to_str=to_str[:-1]
# It's important to use this string, and not assign the Header object.
# Code downstream from this assumes that the msg['To'] will return a string, not an instance
msg['To'] = to_str
if cc:
msg['Cc'] = cc
msg['Subject'] = subject
msg['X-Test-IDTracker'] = (settings.SERVER_MODE == 'production') and 'no' or 'yes'
msg['X-IETF-IDTracker'] = ietf.__version__
msg['Auto-Submitted'] = "auto-generated"
msg['Precedence'] = "bulk"
if extra:
for k, v in extra.items():
if v:
msg[k] = v
def show_that_mail_was_sent(request,leadline,msg,bcc):
if request and request.user:
from ietf.ietfauth.utils import has_role
if has_role(request.user,['Area Director','Secretariat','IANA','RFC Editor','ISE','IAD','IRTF Chair','WG Chair','RG Chair','WG Secretary','RG Secretary']):
info = "%s at %s %s\n" % (leadline,datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),settings.TIME_ZONE)
info += "Subject: %s\n" % msg.get('Subject','[no subject]')
info += "To: %s\n" % msg.get('To','[no to]')
if msg.get('Cc'):
info += "Cc: %s\n" % msg.get('Cc')
if bcc:
info += "Bcc: %s\n" % bcc
messages.info(request,info,extra_tags='preformatted',fail_silently=True)
def send_mail_mime(request, to, frm, subject, msg, cc=None, extra=None, toUser=False, bcc=None):
"""Send MIME message with content already filled in."""
condition_message(to, frm, subject, msg, cc, extra)
# start debug server with python -m smtpd -n -c DebuggingServer localhost:2025
# then put USING_DEBUG_EMAIL_SERVER=True and EMAIL_HOST='localhost'
# and EMAIL_PORT=2025 in settings_local.py
debugging = getattr(settings, "USING_DEBUG_EMAIL_SERVER", False) and settings.EMAIL_HOST == 'localhost' and settings.EMAIL_PORT == 2025
if settings.SERVER_MODE == 'development':
show_that_mail_was_sent(request,'In production, email would have been sent',msg,bcc)
if test_mode or debugging or settings.SERVER_MODE == 'production':
try:
send_smtp(msg,bcc)
except smtplib.SMTPException as e:
log_smtp_exception(e)
build_warning_message(request, e)
send_error_email(e)
show_that_mail_was_sent(request,'Email was sent',msg,bcc)
elif settings.SERVER_MODE == 'test':
if toUser:
copy_email(msg, to, toUser=True, originalBcc=bcc)
elif request and request.COOKIES.has_key( 'testmailcc' ):
copy_email(msg, request.COOKIES[ 'testmailcc' ],originalBcc=bcc)
try:
copy_to = settings.EMAIL_COPY_TO
except AttributeError:
copy_to = "ietf.tracker.archive+%[email protected]" % settings.SERVER_MODE
if copy_to and not test_mode and not debugging: # if we're running automated tests, this copy is just annoying
if bcc:
msg['X-Tracker-Bcc']=bcc
try:
copy_email(msg, copy_to, originalBcc=bcc)
except smtplib.SMTPException as e:
log_smtp_exception(e)
build_warning_message(request, e)
send_error_email(e)
def parse_preformatted(preformatted, extra={}, override={}):
"""Parse preformatted string containing mail with From:, To:, ...,"""
msg = message_from_string(preformatted.encode("utf-8"))
for k, v in override.iteritems():
if k in msg:
del msg[k]
msg[k] = v
headers = copy.copy(msg)
for key in ['To', 'From', 'Subject', 'Bcc']:
del headers[key]
for k, v in extra.iteritems():
if k in headers:
del headers[k]
headers[k] = v
bcc = msg['Bcc']
del msg['Bcc']
return (msg, headers, bcc)
def send_mail_preformatted(request, preformatted, extra={}, override={}):
"""Parse preformatted string containing mail with From:, To:, ...,
and send it through the standard IETF mail interface (inserting
extra headers as needed)."""
(msg,headers,bcc) = parse_preformatted(preformatted, extra, override)
send_mail_text(request, msg['To'], msg["From"], msg["Subject"], msg.get_payload(), extra=headers, bcc=bcc)
return msg
def send_mail_message(request, message, extra={}):
"""Send a Message object."""
# note that this doesn't handle MIME messages at the moment
e = extra.copy()
if message.reply_to:
e['Reply-to'] = message.reply_to
send_mail_text(request, message.to, message.frm, message.subject,
message.body, cc=message.cc, bcc=message.bcc, extra=e)
def exception_components(e):
# See if it's a non-smtplib exception that we faked
if len(e.args)==1 and isinstance(e.args[0],dict) and e.args[0].has_key('really'):
orig = e.args[0]
extype = orig['really']
tb = orig['tb']
value = orig['value']
else:
extype = sys.exc_info()[0]
value = sys.exc_info()[1]
tb = traceback.format_tb(sys.exc_info()[2])
return (extype, value, tb)
def log_smtp_exception(e):
(extype, value, tb) = exception_components(e)
log("SMTP Exception: %s : %s" % (extype,value))
if isinstance(e,SMTPSomeRefusedRecipients):
log(" SomeRefused: %s"%(e.summary_refusals()))
log(" Traceback: %s" % tb)
def build_warning_message(request, e):
(extype, value, tb) = exception_components(e)
if request:
warning = "An error occured while sending email:\n"
if getattr(e,'original_msg',None):
warning += "Subject: %s\n" % e.original_msg.get('Subject','[no subject]')
warning += "To: %s\n" % e.original_msg.get('To','[no to]')
warning += "Cc: %s\n" % e.original_msg.get('Cc','[no cc]')
if isinstance(e,SMTPSomeRefusedRecipients):
warning += e.detailed_refusals()
else:
warning += "SMTP Exception: %s\n"%extype
warning += "Error Message: %s\n\n"%value
warning += "The message was not delivered to anyone."
messages.warning(request,warning,extra_tags='preformatted',fail_silently=True)
def send_error_email(e):
(extype, value, tb) = exception_components(e)
msg = MIMEMultipart()
msg['To'] = '<[email protected]>'
msg['From'] = settings.SERVER_EMAIL
if isinstance(e,SMTPSomeRefusedRecipients):
msg['Subject'] = 'Subject: Some recipients were refused while sending mail with Subject: %s' % e.original_msg.get('Subject','[no subject]')
textpart = textwrap.dedent("""\
This is a message from the datatracker to IETF-Action about an email
delivery failure, when sending email from the datatracker.
%s
""") % e.detailed_refusals()
else:
msg['Subject'] = 'Datatracker error while sending email'
textpart = textwrap.dedent("""\
This is a message from the datatracker to IETF-Action about an email
delivery failure, when sending email from the datatracker.
The original message was not delivered to anyone.
SMTP Exception: %s
Error Message: %s
""") % (extype,value)
if hasattr(e,'original_msg'):
textpart += "The original message follows:\n"
msg.attach(MIMEText(textpart,_charset='utf-8'))
if hasattr(e,'original_msg'):
msg.attach(MIMEMessage(e.original_msg))
send_error_to_secretariat(msg)
def send_error_to_secretariat(msg):
debugging = getattr(settings, "USING_DEBUG_EMAIL_SERVER", False) and settings.EMAIL_HOST == 'localhost' and settings.EMAIL_PORT == 2025
try:
if test_mode or debugging or settings.SERVER_MODE == 'production':
send_smtp(msg, bcc=None)
try:
copy_to = settings.EMAIL_COPY_TO
except AttributeError:
copy_to = "ietf.tracker.archive+%[email protected]" % settings.SERVER_MODE
if copy_to and not test_mode and not debugging: # if we're running automated tests, this copy is just annoying
copy_email(msg, copy_to,originalBcc=None)
except smtplib.SMTPException:
log("Exception encountered while sending a ticket to the secretariat")
(extype,value) = sys.exc_info()[:2]
log("SMTP Exception: %s : %s" % (extype,value))
|
|
#! /usr/bin/env python
"""
Notes
-----
Calculations are carried out with numpy.float64 precision.
This Python implementation is not optimized for speed.
Angles are in radians unless specified otherwise.
Quaternions ix+jy+kz+w are represented as [x, y, z, w].
"""
import rospy
# Messages
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from baxter_core_msgs.msg import EndpointState
from omni_msgs.msg import OmniState, OmniFeedback, OmniButtonEvent
from geometry_msgs.msg import Vector3, Point, PoseStamped, Quaternion, Wrench, Transform, PoseStamped
from visualization_msgs.msg import Marker
from std_msgs.msg import Bool
# State Machine
import smach
import smach_ros
from smach import CBState
# Math
from math import pi, exp, sin, sqrt
import numpy as np
import tf.transformations as tr
# Quaternions tools
import PyKDL
import time
GREY_BUTTON = 0
WHITE_BUTTON = 1
class TextColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
class RatePositionController:
STATES = ['GO_TO_CENTER', 'POSITION_CONTROL', 'VIBRATORY_PHASE', 'RATE_CONTROL', 'RATE_COLLISION']
def __init__(self):
# Create a SMACH state machine
self.sm = smach.StateMachine(outcomes=['succeeded', 'aborted'])
with self.sm:
# Add states to the state machine
smach.StateMachine.add('GO_TO_CENTER', CBState(self.go_to_center, cb_args=[self]),
transitions={'lock': 'GO_TO_CENTER', 'succeeded': 'POSITION_CONTROL', 'aborted': 'aborted'})
smach.StateMachine.add('POSITION_CONTROL', CBState(self.position_control, cb_args=[self]),
transitions={'stay': 'POSITION_CONTROL', 'leave': 'VIBRATORY_PHASE', 'aborted': 'aborted'})
smach.StateMachine.add('VIBRATORY_PHASE', CBState(self.vibratory_phase, cb_args=[self]),
transitions={'vibrate': 'VIBRATORY_PHASE', 'succeeded': 'RATE_CONTROL', 'aborted': 'aborted'})
smach.StateMachine.add('RATE_CONTROL', CBState(self.rate_control, cb_args=[self]),
transitions={'stay': 'RATE_CONTROL', 'leave': 'GO_TO_CENTER', 'collision': 'RATE_COLLISION', 'aborted': 'aborted'})
smach.StateMachine.add('RATE_COLLISION', CBState(self.rate_collision, cb_args=[self]),
transitions={'succeeded': 'GO_TO_CENTER', 'aborted': 'aborted'})
# Read all the parameters from the parameter server
# Topics to interact
master_name = self.read_parameter('~master_name', 'phantom')
slave_name = self.read_parameter('~slave_name', 'grips')
self.master_state_topic = '/%s/state' % master_name
#~ self.feedback_topic = '/%s/force_feedback' % master_name ##
self.slave_state_topic = '/%s/state' % slave_name
self.ik_mc_topic = '/%s/ik_command' % slave_name
self.gripper_topic = '/%s/GRIP/command' % slave_name
self.button_topic = '/%s/button' % master_name
self.slave_collision_topic = '/%s/collision' % slave_name
self.sm_control_topic = '/sm_control'
#~ self.ext_forces_topic = '/%s/external_forces' % slave_name ##
# Workspace definition
self.units = self.read_parameter('~units', 'mm')
width = self.read_parameter('~workspace/width', 140.0)
height = self.read_parameter('~workspace/height', 100.0)
depth = self.read_parameter('~workspace/depth', 55.0)
self.center_pos = self.read_parameter('~workspace/center', [0, 0 ,0])
self.workspace = np.array([width, depth, height])
self.hysteresis = self.read_parameter('~hysteresis', 3.0)
self.pivot_dist = self.read_parameter('~pivot_dist', 5.0)
# Force feedback parameters
self.k_center = self.read_parameter('~k_center', 0.1)
self.b_center = self.read_parameter('~b_center', 0.003)
self.k_rate = self.read_parameter('~k_rate', 0.05)
self.b_rate = self.read_parameter('~b_rate', 0.003)
# Position parameters
self.hysteresis = self.read_parameter('~hysteresis', 3.0)
self.pivot_dist = self.read_parameter('~pivot_dist', 5.0)
self.publish_frequency = self.read_parameter('~publish_rate', 1000.0)
self.position_ratio = self.read_parameter('~position_ratio', 250)
self.axes_rotation_1 = self.read_parameter('~axes_rotation_1', [0, 0, 0])
self.angle_rotation_1 = self.read_parameter('~angle_rotation_1',0.0)
self.axes_rotation_2 = self.read_parameter('~axes_rotation_2', [0, 0, 0])
self.angle_rotation_2 = self.read_parameter('~angle_rotation_2', 0.0)
self.axes_rotation_3 = self.read_parameter('~axes_rotation_3', [0, 0, 0])
self.angle_rotation_3 = self.read_parameter('~angle_rotation_3', 0.0)
self.position_axes = [0, 1, 2]
self.position_sign = np.array([1.0, 1.0, 1.0])
self.axes_mapping = self.read_parameter('~axes_mapping', ['x', 'y' ,'z'])
rospy.logwarn('axes_mapping[0] -> %s' % self.axes_mapping[0])
rospy.logwarn('axes_mapping[1] -> %s' % self.axes_mapping[1])
rospy.logwarn('axes_mapping[2] -> %s' % self.axes_mapping[2])
if len(self.axes_mapping) != 3:
rospy.logwarn('The invalid number of values in [axes_mapping]. Received 3, expected %d' % len(self.axes_mapping))
for i, axis in enumerate(self.axes_mapping):
axis = axis.lower()
if '-' == axis[0]:
axis = axis[1:]
self.position_sign[i] = -1.0
if axis not in ('x','y','z'):
rospy.logwarn('Invalid axis %s given in [axes_mapping]' % axis)
self.position_axes[i] = ['x','y','z'].index(axis)
# Vibration parameters
self.vib_a = self.read_parameter('~vibration/a', 2.0) # Amplitude (mm)
self.vib_c = self.read_parameter('~vibration/c', 5.0) # Damping
self.vib_freq = self.read_parameter('~vibration/frequency', 30.0) # Frequency (Hz)
self.vib_time = self.read_parameter('~vibration/duration', 0.3) # Duration (s)
self.vib_start_time = 0.0
# Rate parameters
self.rate_pivot = np.zeros(3)
self.rate_gain = self.read_parameter('~rate_gain', 1.0)
# Initial values
self.center_pos = np.array([0, 0, 0])
self.frame_id = self.read_parameter('~reference_frame', 'world')
self.colors = TextColors()
self.gripper_cmd = 0.0
self.master_pos = None
self.master_rot = np.array([0, 0, 0, 1])
self.master_vel = np.zeros(3)
self.master_dir = np.zeros(3)
self.slave_pos = None
self.slave_rot = np.array([0, 0, 0, 1])
self.slave_collision = False
self.timer = None
#~ self.force_feedback = np.zeros(3) ##
#~ self.ext_forces = np.zeros(3) ##
self.gripper_value = 0.5
self.button_states =np.array([0, 0])
self.time_counter = time.clock()
# Synch
self.slave_synch_pos = np.zeros(3)
self.slave_synch_rot = np.array([0, 0, 0, 1])
self.master_synch_rot = np.array([0, 0, 0, 1])
# Button
self.prev_buttons = [0] * 2
self.buttons = [False] * 2
self.buttons[WHITE_BUTTON] = True
# Setup Subscribers/Publishers
#~ self.feedback_pub = rospy.Publisher(self.feedback_topic, OmniFeedback)
self.sm_control_pub = rospy.Publisher(self.sm_control_topic, Float64)
self.ik_mc_pub = rospy.Publisher(self.ik_mc_topic, PoseStamped)
self.gripper_pub = rospy.Publisher(self.gripper_topic, Float64)
self.vis_pub = rospy.Publisher('visualization_marker', Marker)
rospy.Subscriber(self.master_state_topic, OmniState, self.cb_master_state)
rospy.Subscriber(self.slave_state_topic, PoseStamped, self.cb_slave_state)
rospy.Subscriber(self.slave_collision_topic, Bool, self.cb_slave_collision)
#~ rospy.Subscriber(self.ext_forces_topic, OmniFeedback, self.cb_ext_forces) ##
rospy.Subscriber(self.button_topic, OmniButtonEvent, self.buttons_cb)
self.loginfo('Waiting for [%s] and [%s] topics' % (self.master_state_topic, self.slave_state_topic))
while not rospy.is_shutdown():
if (self.slave_pos == None) or (self.master_pos == None):
rospy.sleep(0.01)
else:
self.loginfo('Rate position controller running')
# Register rospy shutdown hook
rospy.on_shutdown(self.shutdown_hook)
break
# Make sure the first command sent to the slave is equal to its current position6D
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
# Start the timer that will publish the ik commands
self.loginfo('Publisher frequency: [%f]' % self.publish_frequency)
self.timer = rospy.Timer(rospy.Duration(1.0/self.publish_frequency), self.publish_command)
self.loginfo('State machine state: GO_TO_CENTER')
@smach.cb_interface(outcomes=['lock', 'succeeded', 'aborted'])
def go_to_center(user_data, self):
if not np.allclose(np.zeros(3), self.master_pos, atol=self.hysteresis):
#~ self.force_feedback = (self.k_center * self.master_pos + self.b_center * self.master_vel) * -1.0
self.sm_control_pub.publish(4.0)
return 'lock'
else:
self.sm_control_pub.publish(1.0)
#~ self.force_feedback = np.zeros(3)
self.slave_synch_pos = np.array(self.slave_pos)
self.slave_synch_rot = np.array(self.slave_rot)
self.master_synch_rot = np.array(self.master_rot)
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
self.draw_position_region(self.slave_synch_pos)
self.loginfo('State machine transitioning: GO_TO_CENTER:succeeded-->POSITION_CONTROL')
return 'succeeded'
@smach.cb_interface(outcomes=['stay', 'leave', 'aborted'])
def position_control(user_data, self):
if self.inside_workspace(self.master_pos):
self.command_pos = self.slave_synch_pos + self.master_pos / self.position_ratio
self.command_rot = np.array(self.master_rot)
self.sm_control_pub.publish(1.0)
#~ self.force_feedback = self.ext_forces ##
return 'stay'
else:
self.sm_control_pub.publish(3.0)
#~ self.force_feedback = np.zeros(3)
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
self.vib_start_time = rospy.get_time()
self.loginfo('State machine transitioning: POSITION_CONTROL:leave-->VIBRATORY_PHASE')
return 'leave'
@smach.cb_interface(outcomes=['vibrate', 'succeeded', 'aborted'])
def vibratory_phase(user_data, self):
if rospy.get_time() < self.vib_start_time + self.vib_time:
self.sm_control_pub.publish(3.0)
#~ t = rospy.get_time() - self.vib_start_time
#~ amplitude = -self.vib_a*exp(-self.vib_c*t)*sin(2*pi*self.vib_freq*t);
#~ self.force_feedback = amplitude * self.master_dir
return 'vibrate'
else:
self.sm_control_pub.publish(2.0)
# The pivot point should be inside the position area but it's better when we use the center
#~ self.rate_pivot = self.master_pos - self.pivot_dist * self.normalize_vector(self.master_pos)
#~ self.force_feedback = np.zeros(3)
self.rate_pivot = np.array(self.master_pos)
self.loginfo('State machine transitioning: VIBRATORY_PHASE:succeeded-->RATE_CONTROL')
return 'succeeded'
@smach.cb_interface(outcomes=['stay', 'leave', 'collision', 'aborted'])
def rate_control(user_data, self):
if not self.slave_collision:
if not self.inside_workspace(self.master_pos):
# Send the force feedback to the master
#~ self.force_feedback = (self.k_rate * self.master_pos + self.b_rate * self.master_vel) * -1.0 ##
# Send the rate command to the slave
distance = sqrt(np.sum((self.master_pos - self.rate_pivot) ** 2)) / self.position_ratio
self.command_pos += (self.rate_gain * distance * self.normalize_vector(self.master_pos)) / self.position_ratio
self.command_rot = np.array(self.master_rot)
self.sm_control_pub.publish(2.0)
return 'stay'
else:
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
#~ self.force_feedback = np.zeros(3)
self.sm_control_pub.publish(4.0)
self.loginfo('State machine transitioning: RATE_CONTROL:leave-->GO_TO_CENTER')
return 'leave'
else:
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
self.sm_control_pub.publish(0.0)
#~ self.force_feedback = np.zeros(3)
self.loginfo('State machine transitioning: RATE_CONTROL:collision-->RATE_COLLISION')
return 'collision'
@smach.cb_interface(outcomes=['succeeded', 'aborted'])
def rate_collision(user_data, self):
self.loginfo('State machine transitioning: RATE_COLLISION:succeeded-->GO_TO_CENTER')
return 'succeeded'
def execute(self):
self.sm.execute()
def shutdown_hook(self):
# Stop the state machine
self.sm.request_preempt()
# Stop the publisher timer
self.timer.shutdown()
def read_parameter(self, name, default):
if not rospy.has_param(name):
rospy.logwarn('Parameter [%s] not found, using default: %s' % (name, default))
return rospy.get_param(name, default)
def loginfo(self, msg):
rospy.logwarn(self.colors.OKBLUE + str(msg) + self.colors.ENDC)
def inside_workspace(self, point):
# The workspace as an ellipsoid: http://en.wikipedia.org/wiki/Ellipsoid
return np.sum(np.divide(point**2, self.workspace**2)) < 1
def normalize_vector(self, v):
result = np.array(v)
norm = np.sqrt(np.sum((result ** 2)))
if norm:
result /= norm
return result
def change_axes(self, array, index=None, sign=None):
if index == None:
index = self.position_axes
if sign == None:
sign = self.position_sign
result = np.zeros(len(array))
for i, idx in enumerate(index):
result[i] = array[idx] * sign[idx]
return result
def change_force_axes(self, array, index=None, sign=None):
if index == None:
index = self.position_axes
if sign == None:
sign = self.position_sign
result = np.zeros(len(array))
for i, idx in enumerate(index):
result[i] = array[idx] * sign[i] #~ ??
return result
#~ def send_feedback(self):
#~
#~ feedback_msg = OmniFeedback()
#~ force = self.change_force_axes(self.force_feedback)
#~ pos = self.change_axes(self.center_pos)
#~ feedback_msg.force = Vector3(*force)
#~ feedback_msg.position = Vector3(*pos)
#~ self.feedback_pub.publish(feedback_msg)
# DO NOT print to the console within this function
def cb_master_state(self, msg):
self.master_real_pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])
pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]) - self.center_pos
vel = np.array([msg.velocity.x, msg.velocity.y, msg.velocity.z])
self.master_pos = self.change_axes(pos)
self.master_vel = self.change_axes(vel)
# Rotate tu use the same axes orientation between master and slave
real_rot = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
q_1 = tr.quaternion_about_axis(self.angle_rotation_1, self.axes_rotation_1)
aux_rot = tr.quaternion_multiply(q_1, real_rot)
q_2 = tr.quaternion_about_axis(self.angle_rotation_2, self.axes_rotation_2)
aux_rot_2 = tr.quaternion_multiply(q_2, aux_rot)
q_3 = tr.quaternion_about_axis(self.angle_rotation_3, self.axes_rotation_3)
self.master_rot = tr.quaternion_multiply(q_3, aux_rot_2)
#Normalize velocitity
self.master_dir = self.normalize_vector(self.master_vel)
def cb_slave_state(self, msg):
self.slave_pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])
self.slave_rot = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
def cb_slave_collision(self, msg):
self.slave_collision = msg.data
#~ def cb_ext_forces(self, msg): ##
#~ self.ext_forces = np.array([msg.force.x, msg.force.y, msg.force.z]) ##
def buttons_cb(self, msg):
#self.loginfo('PULSADO')
self.button_states = [msg.grey_button, msg.white_button]
# Check that any button was pressed / released
#~ for i, previous in enumerate(self.prev_buttons):
#~ if (previous != button_states[i]) and button_states[i] == 1:
#~ self.buttons[i] = not self.buttons[i]
# Open or close the gripper
def publish_command(self, event):
position, orientation = self.command_pos, self.command_rot
ik_mc_msg = PoseStamped()
ik_mc_msg.header.frame_id = self.frame_id
ik_mc_msg.header.stamp = rospy.Time.now()
ik_mc_msg.pose.position = Point(*position)
ik_mc_msg.pose.orientation = Quaternion(*orientation)
#Button selection
if (self.button_states[GREY_BUTTON]==1 and self.button_states[WHITE_BUTTON]==0): # Close
self.gripper_value += 0.0001
if self.gripper_value > 1.0:
self.gripper_value = 1.0
elif (self.button_states[GREY_BUTTON]==0 and self.button_states[WHITE_BUTTON]==1 ): # Open
self.gripper_value -= 0.0001
if self.gripper_value < 0.0:
self.gripper_value = 0.0
try:
self.gripper_pub.publish(Float64(self.gripper_value))
self.ik_mc_pub.publish(ik_mc_msg)
#~ t1 = time.time()
#~ t2 =time.time()
except rospy.exceptions.ROSException:
pass
#~ dif_time = t2-t1
#~ self.loginfo('Diference of time %0.3f ms' % (dif_time * 1000.0))
#~ self.time_counter = time.clock()
def draw_position_region(self, center_pos):
marker = Marker()
marker.header.frame_id = self.frame_id
marker.header.stamp = rospy.Time.now()
marker.id = 0;
marker.type = marker.SPHERE
marker.ns = 'position_region'
marker.action = marker.ADD
marker.pose.position.x = center_pos[0]
marker.pose.position.y = center_pos[1]
marker.pose.position.z = center_pos[2]
#~ Workspace ellipsoid: self.workspace
marker.scale.x = 2 * self.workspace[0]/self.position_ratio
marker.scale.y = 2 * self.workspace[1]/self.position_ratio
marker.scale.z = 2 * self.workspace[2]/self.position_ratio
marker.color.a = 0.5
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 0.2
#~ Publish
self.vis_pub.publish(marker)
if __name__ == '__main__':
rospy.init_node('rate_position_controller', log_level=rospy.WARN)
try:
controller = RatePositionController()
controller.execute()
except rospy.exceptions.ROSInterruptException:
pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
try:
import pathlib
except ImportError:
import pathlib2 as pathlib # py2 compat
import pytest
import pyarrow as pa
from pyarrow.tests.test_io import gzip_compress, gzip_decompress
from pyarrow.fs import (FileType, Selector, FileSystem, LocalFileSystem,
SubTreeFileSystem)
@pytest.fixture
def localfs(request, tempdir):
return dict(
fs=LocalFileSystem(),
pathfn=lambda p: (tempdir / p).as_posix(),
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.fixture
def subtree_localfs(request, tempdir, localfs):
prefix = 'subtree/prefix/'
(tempdir / prefix).mkdir(parents=True)
return dict(
fs=SubTreeFileSystem(prefix, localfs['fs']),
pathfn=prefix.__add__,
allow_move_dir=True,
allow_append_to_file=True,
)
@pytest.mark.s3
@pytest.fixture
def s3fs(request, minio_server):
from pyarrow.s3fs import S3Options, S3FileSystem
address, access_key, secret_key = minio_server
bucket = 'pyarrow-filesystem/'
options = S3Options(
endpoint_override=address,
access_key=access_key,
secret_key=secret_key,
scheme='http'
)
fs = S3FileSystem(options)
fs.create_dir(bucket)
return dict(
fs=fs,
pathfn=bucket.__add__,
allow_move_dir=False,
allow_append_to_file=False,
)
@pytest.fixture
def subtree_s3fs(request, s3fs):
prefix = 'pyarrow-filesystem/prefix/'
return dict(
fs=SubTreeFileSystem(prefix, s3fs['fs']),
pathfn=prefix.__add__,
allow_move_dir=False,
allow_append_to_file=False,
)
@pytest.fixture(params=[
pytest.param(
pytest.lazy_fixture('localfs'),
id='LocalFileSystem()'
),
pytest.param(
pytest.lazy_fixture('subtree_localfs'),
id='SubTreeFileSystem(LocalFileSystem())'
),
pytest.param(
pytest.lazy_fixture('s3fs'),
id='S3FileSystem'
),
pytest.param(
pytest.lazy_fixture('subtree_s3fs'),
id='SubTreeFileSystem(S3FileSystem())'
)
])
def filesystem_config(request):
return request.param
@pytest.fixture
def fs(request, filesystem_config):
return filesystem_config['fs']
@pytest.fixture
def pathfn(request, filesystem_config):
return filesystem_config['pathfn']
@pytest.fixture
def allow_move_dir(request, filesystem_config):
return filesystem_config['allow_move_dir']
@pytest.fixture
def allow_append_to_file(request, filesystem_config):
return filesystem_config['allow_append_to_file']
def test_cannot_instantiate_base_filesystem():
with pytest.raises(TypeError):
FileSystem()
def test_non_path_like_input_raises(fs):
class Path:
pass
invalid_paths = [1, 1.1, Path(), tuple(), {}, [], lambda: 1,
pathlib.Path()]
for path in invalid_paths:
with pytest.raises(TypeError):
fs.create_dir(path)
def test_get_target_stats(fs, pathfn):
aaa = pathfn('a/aa/aaa/')
bb = pathfn('a/bb')
c = pathfn('c.txt')
fs.create_dir(aaa)
with fs.open_output_stream(bb):
pass # touch
with fs.open_output_stream(c) as fp:
fp.write(b'test')
aaa_stat, bb_stat, c_stat = fs.get_target_stats([aaa, bb, c])
assert aaa_stat.path == aaa
assert 'aaa' in repr(aaa_stat)
assert aaa_stat.extension == ''
assert isinstance(aaa_stat.mtime, datetime)
assert bb_stat.path == str(bb)
assert bb_stat.base_name == 'bb'
assert bb_stat.extension == ''
assert bb_stat.type == FileType.File
assert bb_stat.size == 0
assert isinstance(bb_stat.mtime, datetime)
assert c_stat.path == str(c)
assert c_stat.base_name == 'c.txt'
assert c_stat.extension == 'txt'
assert c_stat.type == FileType.File
assert c_stat.size == 4
assert isinstance(c_stat.mtime, datetime)
def test_get_target_stats_with_selector(fs, pathfn):
base_dir = pathfn('selector-dir/')
file_a = pathfn('selector-dir/test_file_a')
file_b = pathfn('selector-dir/test_file_b')
dir_a = pathfn('selector-dir/test_dir_a')
try:
fs.create_dir(base_dir)
with fs.open_output_stream(file_a):
pass
with fs.open_output_stream(file_b):
pass
fs.create_dir(dir_a)
selector = Selector(base_dir, allow_non_existent=False, recursive=True)
assert selector.base_dir == base_dir
stats = fs.get_target_stats(selector)
assert len(stats) == 3
for st in stats:
if st.path.endswith(file_a):
assert st.type == FileType.File
elif st.path.endswith(file_b):
assert st.type == FileType.File
elif st.path.endswith(dir_a):
assert st.type == FileType.Directory
else:
raise ValueError('unexpected path {}'.format(st.path))
finally:
fs.delete_file(file_a)
fs.delete_file(file_b)
fs.delete_dir(dir_a)
fs.delete_dir(base_dir)
def test_create_dir(fs, pathfn):
d = pathfn('test-directory/')
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
fs.create_dir(d)
fs.delete_dir(d)
d = pathfn('deeply/nested/test-directory/')
fs.create_dir(d, recursive=True)
fs.delete_dir(d)
def test_delete_dir(fs, pathfn):
d = pathfn('directory/')
nd = pathfn('directory/nested/')
fs.create_dir(nd)
fs.delete_dir(nd)
fs.delete_dir(d)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(d)
def test_copy_file(fs, pathfn):
s = pathfn('test-copy-source-file')
t = pathfn('test-copy-target-file')
with fs.open_output_stream(s):
pass
fs.copy_file(s, t)
fs.delete_file(s)
fs.delete_file(t)
def test_move_directory(fs, pathfn, allow_move_dir):
# move directory (doesn't work with S3)
s = pathfn('source-dir/')
t = pathfn('target-dir/')
fs.create_dir(s)
if allow_move_dir:
fs.move(s, t)
with pytest.raises(pa.ArrowIOError):
fs.delete_dir(s)
fs.delete_dir(t)
else:
with pytest.raises(pa.ArrowIOError):
fs.move(s, t)
def test_move_file(fs, pathfn):
s = pathfn('test-move-source-file')
t = pathfn('test-move-target-file')
with fs.open_output_stream(s):
pass
fs.move(s, t)
with pytest.raises(pa.ArrowIOError):
fs.delete_file(s)
fs.delete_file(t)
def test_delete_file(fs, pathfn):
p = pathfn('test-delete-target-file')
with fs.open_output_stream(p):
pass
fs.delete_file(p)
with pytest.raises(pa.ArrowIOError):
fs.delete_file(p)
d = pathfn('test-delete-nested')
fs.create_dir(d)
f = pathfn('test-delete-nested/target-file')
with fs.open_output_stream(f) as s:
s.write(b'data')
fs.delete_dir(d)
def identity(v):
return v
@pytest.mark.parametrize(
('compression', 'buffer_size', 'compressor'),
[
(None, None, identity),
(None, 64, identity),
('gzip', None, gzip_compress),
('gzip', 256, gzip_compress),
]
)
def test_open_input_stream(fs, pathfn, compression, buffer_size, compressor):
p = pathfn('open-input-stream')
data = b'some data for reading\n' * 512
with fs.open_output_stream(p) as s:
s.write(compressor(data))
with fs.open_input_stream(p, compression, buffer_size) as s:
result = s.read()
assert result == data
def test_open_input_file(fs, pathfn):
p = pathfn('open-input-file')
data = b'some data' * 1024
with fs.open_output_stream(p) as s:
s.write(data)
read_from = len(b'some data') * 512
with fs.open_input_file(p) as f:
f.seek(read_from)
result = f.read()
assert result == data[read_from:]
@pytest.mark.parametrize(
('compression', 'buffer_size', 'decompressor'),
[
(None, None, identity),
(None, 64, identity),
('gzip', None, gzip_decompress),
('gzip', 256, gzip_decompress),
]
)
def test_open_output_stream(fs, pathfn, compression, buffer_size,
decompressor):
p = pathfn('open-output-stream')
data = b'some data for writing' * 1024
with fs.open_output_stream(p, compression, buffer_size) as f:
f.write(data)
with fs.open_input_stream(p, compression, buffer_size) as f:
assert f.read(len(data)) == data
@pytest.mark.parametrize(
('compression', 'buffer_size', 'compressor', 'decompressor'),
[
(None, None, identity, identity),
(None, 64, identity, identity),
('gzip', None, gzip_compress, gzip_decompress),
('gzip', 256, gzip_compress, gzip_decompress),
]
)
def test_open_append_stream(fs, pathfn, compression, buffer_size, compressor,
decompressor, allow_append_to_file):
p = pathfn('open-append-stream')
initial = compressor(b'already existing')
with fs.open_output_stream(p) as s:
s.write(initial)
if allow_append_to_file:
with fs.open_append_stream(p, compression, buffer_size) as f:
f.write(b'\nnewly added')
with fs.open_input_stream(p) as f:
result = f.read()
result = decompressor(result)
assert result == b'already existing\nnewly added'
else:
with pytest.raises(pa.ArrowNotImplementedError):
fs.open_append_stream(p, compression, buffer_size)
@pytest.mark.s3
def test_s3_options(minio_server):
from pyarrow.s3fs import S3Options
options = S3Options()
assert options.region == 'us-east-1'
options.region = 'us-west-1'
assert options.region == 'us-west-1'
assert options.scheme == 'https'
options.scheme = 'http'
assert options.scheme == 'http'
assert options.endpoint_override == ''
options.endpoint_override = 'localhost:8999'
assert options.endpoint_override == 'localhost:8999'
with pytest.raises(ValueError):
S3Options(access_key='access')
with pytest.raises(ValueError):
S3Options(secret_key='secret')
address, access_key, secret_key = minio_server
options = S3Options(
access_key=access_key,
secret_key=secret_key,
endpoint_override=address,
scheme='http'
)
assert options.scheme == 'http'
assert options.endpoint_override == address
|
|
from test.support import run_unittest
import cgi
import os
import sys
import tempfile
import unittest
from io import StringIO
from warnings import catch_warnings, filterwarnings
class HackedSysModule:
# The regression test will have real values in sys.argv, which
# will completely confuse the test of the cgi module
argv = []
stdin = sys.stdin
cgi.sys = HackedSysModule()
try:
from io import StringIO
except ImportError:
from io import StringIO
class ComparableException:
def __init__(self, err):
self.err = err
def __str__(self):
return str(self.err)
def __eq__(self, anExc):
if not isinstance(anExc, Exception):
return NotImplemented
return (self.err.__class__ == anExc.__class__ and
self.err.args == anExc.args)
def __getattr__(self, attr):
return getattr(self.err, attr)
def do_test(buf, method):
env = {}
if method == "GET":
fp = None
env['REQUEST_METHOD'] = 'GET'
env['QUERY_STRING'] = buf
elif method == "POST":
fp = StringIO(buf)
env['REQUEST_METHOD'] = 'POST'
env['CONTENT_TYPE'] = 'application/x-www-form-urlencoded'
env['CONTENT_LENGTH'] = str(len(buf))
else:
raise ValueError("unknown method: %s" % method)
try:
return cgi.parse(fp, env, strict_parsing=1)
except Exception as err:
return ComparableException(err)
parse_strict_test_cases = [
("", ValueError("bad query field: ''")),
("&", ValueError("bad query field: ''")),
("&&", ValueError("bad query field: ''")),
(";", ValueError("bad query field: ''")),
(";&;", ValueError("bad query field: ''")),
# Should the next few really be valid?
("=", {}),
("=&=", {}),
("=;=", {}),
# This rest seem to make sense
("=a", {'': ['a']}),
("&=a", ValueError("bad query field: ''")),
("=a&", ValueError("bad query field: ''")),
("=&a", ValueError("bad query field: 'a'")),
("b=a", {'b': ['a']}),
("b+=a", {'b ': ['a']}),
("a=b=a", {'a': ['b=a']}),
("a=+b=a", {'a': [' b=a']}),
("&b=a", ValueError("bad query field: ''")),
("b&=a", ValueError("bad query field: 'b'")),
("a=a+b&b=b+c", {'a': ['a b'], 'b': ['b c']}),
("a=a+b&a=b+a", {'a': ['a b', 'b a']}),
("x=1&y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0&z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("x=1;y=2.0;z=2-3.%2b0", {'x': ['1'], 'y': ['2.0'], 'z': ['2-3.+0']}),
("Hbc5161168c542333633315dee1182227:key_store_seqid=400006&cuyer=r&view=bustomer&order_id=0bb2e248638833d48cb7fed300000f1b&expire=964546263&lobale=en-US&kid=130003.300038&ss=env",
{'Hbc5161168c542333633315dee1182227:key_store_seqid': ['400006'],
'cuyer': ['r'],
'expire': ['964546263'],
'kid': ['130003.300038'],
'lobale': ['en-US'],
'order_id': ['0bb2e248638833d48cb7fed300000f1b'],
'ss': ['env'],
'view': ['bustomer'],
}),
("group_id=5470&set=custom&_assigned_to=31392&_status=1&_category=100&SUBMIT=Browse",
{'SUBMIT': ['Browse'],
'_assigned_to': ['31392'],
'_category': ['100'],
'_status': ['1'],
'group_id': ['5470'],
'set': ['custom'],
})
]
def norm(seq):
return sorted(seq, key=repr)
def first_elts(list):
return [p[0] for p in list]
def first_second_elts(list):
return [(p[0], p[1][0]) for p in list]
def gen_result(data, environ):
fake_stdin = StringIO(data)
fake_stdin.seek(0)
form = cgi.FieldStorage(fp=fake_stdin, environ=environ)
result = {}
for k, v in dict(form).items():
result[k] = type(v) is list and form.getlist(k) or v.value
return result
class CgiTests(unittest.TestCase):
def test_strict(self):
for orig, expect in parse_strict_test_cases:
# Test basic parsing
d = do_test(orig, "GET")
self.assertEqual(d, expect, "Error parsing %s" % repr(orig))
d = do_test(orig, "POST")
self.assertEqual(d, expect, "Error parsing %s" % repr(orig))
env = {'QUERY_STRING': orig}
fs = cgi.FieldStorage(environ=env)
if type(expect) == type({}):
# test dict interface
self.assertEqual(len(expect), len(fs))
self.assertEqual(norm(expect.keys()), norm(fs.keys()))
##self.assertEqual(norm(expect.values()), norm(fs.values()))
##self.assertEqual(norm(expect.items()), norm(fs.items()))
self.assertEqual(fs.getvalue("nonexistent field", "default"), "default")
# test individual fields
for key in expect.keys():
expect_val = expect[key]
self.assert_(key in fs)
if len(expect_val) > 1:
self.assertEqual(fs.getvalue(key), expect_val)
else:
self.assertEqual(fs.getvalue(key), expect_val[0])
def test_log(self):
cgi.log("Testing")
cgi.logfp = StringIO()
cgi.initlog("%s", "Testing initlog 1")
cgi.log("%s", "Testing log 2")
self.assertEqual(cgi.logfp.getvalue(), "Testing initlog 1\nTesting log 2\n")
if os.path.exists("/dev/null"):
cgi.logfp = None
cgi.logfile = "/dev/null"
cgi.initlog("%s", "Testing log 3")
cgi.log("Testing log 4")
def test_fieldstorage_readline(self):
# FieldStorage uses readline, which has the capacity to read all
# contents of the input file into memory; we use readline's size argument
# to prevent that for files that do not contain any newlines in
# non-GET/HEAD requests
class TestReadlineFile:
def __init__(self, file):
self.file = file
self.numcalls = 0
def readline(self, size=None):
self.numcalls += 1
if size:
return self.file.readline(size)
else:
return self.file.readline()
def __getattr__(self, name):
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
f = TestReadlineFile(tempfile.TemporaryFile("w+"))
f.write('x' * 256 * 1024)
f.seek(0)
env = {'REQUEST_METHOD':'PUT'}
fs = cgi.FieldStorage(fp=f, environ=env)
# if we're not chunking properly, readline is only called twice
# (by read_binary); if we are chunking properly, it will be called 5 times
# as long as the chunksize is 1 << 16.
self.assert_(f.numcalls > 2)
def test_fieldstorage_multipart(self):
#Test basic FieldStorage multipart parsing
env = {'REQUEST_METHOD':'POST', 'CONTENT_TYPE':'multipart/form-data; boundary=---------------------------721837373350705526688164684', 'CONTENT_LENGTH':'558'}
postdata = """-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="id"
1234
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="title"
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="file"; filename="test.txt"
Content-Type: text/plain
Testing 123.
-----------------------------721837373350705526688164684
Content-Disposition: form-data; name="submit"
Add\x20
-----------------------------721837373350705526688164684--
"""
fs = cgi.FieldStorage(fp=StringIO(postdata), environ=env)
self.assertEquals(len(fs.list), 4)
expect = [{'name':'id', 'filename':None, 'value':'1234'},
{'name':'title', 'filename':None, 'value':''},
{'name':'file', 'filename':'test.txt', 'value':'Testing 123.'},
{'name':'submit', 'filename':None, 'value':' Add '}]
for x in range(len(fs.list)):
for k, exp in expect[x].items():
got = getattr(fs.list[x], k)
self.assertEquals(got, exp)
_qs_result = {
'key1': 'value1',
'key2': ['value2x', 'value2y'],
'key3': 'value3',
'key4': 'value4'
}
def testQSAndUrlEncode(self):
data = "key2=value2x&key3=value3&key4=value4"
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'key1=value1&key2=value2y',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormData(self):
data = """
---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
v = gen_result(data, environ)
self.assertEqual(self._qs_result, v)
def testQSAndFormDataFile(self):
data = """
---123
Content-Disposition: form-data; name="key2"
value2y
---123
Content-Disposition: form-data; name="key3"
value3
---123
Content-Disposition: form-data; name="key4"
value4
---123
Content-Disposition: form-data; name="upload"; filename="fake.txt"
Content-Type: text/plain
this is the content of the fake file
---123--
"""
environ = {
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': 'multipart/form-data; boundary=-123',
'QUERY_STRING': 'key1=value1&key2=value2x',
'REQUEST_METHOD': 'POST',
}
result = self._qs_result.copy()
result.update({
'upload': 'this is the content of the fake file'
})
v = gen_result(data, environ)
self.assertEqual(result, v)
def test_deprecated_parse_qs(self):
# this func is moved to urlparse, this is just a sanity check
with catch_warnings():
filterwarnings('ignore',
'cgi.parse_qs is deprecated, use urllib.parse.parse_qs instead',
DeprecationWarning)
self.assertEqual({'a': ['A1'], 'B': ['B3'], 'b': ['B2']},
cgi.parse_qs('a=A1&b=B2&B=B3'))
def test_deprecated_parse_qsl(self):
# this func is moved to urlparse, this is just a sanity check
with catch_warnings():
filterwarnings('ignore',
'cgi.parse_qsl is deprecated, use urllib.parse.parse_qsl instead',
DeprecationWarning)
self.assertEqual([('a', 'A1'), ('b', 'B2'), ('B', 'B3')],
cgi.parse_qsl('a=A1&b=B2&B=B3'))
def test_main():
run_unittest(CgiTests)
if __name__ == '__main__':
test_main()
|
|
#!/usr/bin/env python3
"""
dbsync: Database Sync: Command Line Interface
Python3.x was required for consist Unicode processing, stored in the DB
This CLI tool allows scripting Database Syncing between Zones.
dbsync_server.py will call into the same functions, but will be for interactive
multi-stage, multi-user, transactions. CLI is meant for automation or
single-user interactive runs.
"""
__author__ = 'Geoff Howland <[email protected]>'
import sys
import os
import getopt
import traceback
import yaml
# # Custom script imports
# import zone_manager
import AbsoluteImport
AbsoluteImport.RegisterPathPrefix('procblock', '/Users/ghowland/projects/dropstar/dropstar/control/procblock/')
AbsoluteImport.RegisterPathPrefix('unidist', '/Users/ghowland/projects/dropstar/dropstar/control/unidist/')
AbsoluteImport.RegisterPathPrefix('dbsync', '/Users/ghowland/projects/dbsync/')
from AbsoluteImport import Import
log = Import('log', prefix='unidist').log
zone_manager = Import('zone_manager', prefix='dbsync')
print(zone_manager)
# Commands mappng their descriptions to auto-build Usage info
#NOTE(g): Value Tuple = (args, description)
COMMANDS = {
'list':('', 'List Groups and their Zones'),
'compare':('<database set> <souce_zone> <target_zone> [instance]', 'Compare a Database Set source and target Zone Database Instances, optional DB Instance'),
'sync':('<database set> <souce_zone> <target_zone> [instance]', 'Sync a Database Set source and target Zone Database Instances, optional DB Instance'),
}
def GetDatabaseSetInstances(database_set, directory_prefix=None):
"""Returns dict of instances"""
database_set_data = zone_manager.DATABASE_SETS
yaml_path = '%s' % database_set_data[database_set]['data']
if directory_prefix:
yaml_path = '%s/%s' % (directory_prefix, yaml_path)
data = yaml.load(open(yaml_path))
return data
def ProcessCommand(command, args, options):
"""Process the command: routes to specific command functions
Args:
command: string, command to process - routing rule
args: list of strings, args for the command
options: dict of string/string, options for the command
"""
#print('Process command: %s: %s: %s' % (command, options, args))
#TODO(g): Whatever needs to be done gets set into result...
zones = zone_manager.ZONES
database_set_data = zone_manager.DATABASE_SETS
output = ''
# List available data...
if command == 'list':
result = '\nZones:\n\n'
# Loop over Zones
for (zone, database_sets) in zones.items():
# Loop over Database Sets in each Zone
for (database_set, database_data) in database_sets.items():
# If this database set exists and has data, load it (baased on Database Set conf data)
if database_set in database_set_data and 'data' in database_set_data[database_set]:
data = yaml.load(open(database_set_data[database_set]['data']))
data_str = ' Instances: %s' % len(data)
else:
data = None
data_str = ''
result += ' Zone: %-15s Set: %-15s Host: %-40s%s\n' % \
(zone, database_set, database_data['host'],
data_str)
# Compare the source and target zone database set instances
elif command == 'compare':
# Test all the argument cases for failures
if len(args) < 1:
Usage('Compare: Missing 3 arguments: <database set> <source zone> <target zone>')
elif len(args) < 2:
Usage('Compare: Missing 2 arguments: <source zone> <target zone>')
elif len(args) < 3:
Usage('Compare: Missing 1 argument: <target zone>')
elif len(args) > 4:
Usage('Compare: Too many arguments. 3 are required, the 4th is optional')
# Set the variables
database_set = args[0]
zone_source = args[1]
zone_target = args[2]
if len(args) > 3:
instance = args[3]
else:
instance = None
# Validate arguments
if database_set not in database_set_data:
Usage('Database set "%s" is not a valid dataset listed in: conf/database_sets.yaml' % database_set)
if zone_source not in zones:
Usage('Source Zone "%s" not a valid zone listed in: conf/zones.yaml' % zone_source)
if zone_target not in zones:
Usage('Target Zone "%s" not a valid zone listed in: conf/zones.yaml' % zone_target)
# Get the Database Set data
#yaml_path = '%s/%s' % (__file__, database_set_data[database_set]['data'])
yaml_path = '%s' % database_set_data[database_set]['data']
data = yaml.load(open(yaml_path))
if instance != None and instance not in data:
Usage('Instance "%s" not found in Database Set data: %s' % (instance, database_set_data[database_set]['data']))
output += 'Comparing: %s %s %s %s' % (database_set, zone_source, zone_target, instance)
#TEST: Attempt querying zone, naively...
schema_source = zone_manager.GetSchema(zone_source, database_set, instance)
schema_target = zone_manager.GetSchema(zone_target, database_set, instance)
# print('schema source:\n%s' % str(schema_source))
# print('\n\n');
# print('schema target:\n%s' % str(schema_target))
comparison = zone_manager.CompareSchemas(schema_source, schema_target)
output += '\n\nComparison: \n'
import pprint
output += pprint.pformat(comparison)
# Get the Schema Diff in a key-oriented dictionary format
schema_diff_keys = zone_manager.GenerateSchemaDiffKeyDictionary(zone_source, zone_target, database_set, instance, comparison)
output += '\n\nSchema Key Diff: \n'
import pprint
output += pprint.pformat(schema_diff_keys)
# Generate Forward and Reverse commands to sync the Target DB to Source DB
(forward_commands, reverse_commands) = zone_manager.GenerateSchemaSyncCommands(zone_source, \
zone_target, database_set, instance, \
comparison)
sql_comparison_forward = zone_manager.GetSql_DatabaseDiff(zone_source, zone_target, database_set, instance)
sql_comparison_reverse = zone_manager.GetSql_DatabaseDiff(zone_target, zone_source, database_set, instance)
output += '\n\nSQL Comparison: \n'
import pprint
output += pprint.pformat(sql_comparison_forward)
# Get the Schema Diff in a key-oriented dictionary format
data_diff_keys = zone_manager.GenerateDataDiffKeyDictionary(zone_source, zone_target, database_set, instance, sql_comparison_forward)
output += '\n\nData Key Diff: \n'
import pprint
output += pprint.pformat(data_diff_keys)
forward_commands += zone_manager.CreateSQLFromDataDiff(zone_source, zone_target, database_set, instance, sql_comparison_forward)
reverse_commands += zone_manager.CreateSQLFromDataDiff(zone_source, zone_target, database_set, instance, sql_comparison_reverse)
# If we have any commands
if forward_commands or reverse_commands:
output += '\n\nCompare completed: \n\nForward: %s\n\nReverse: %s\n\n' % (pprint.pformat(forward_commands), \
pprint.pformat(reverse_commands))
else:
output += '\n\nCompare completed: No work to do'
result = output
# Sync the source and target zone database set instances
elif command == 'sync':
# Test all the argument cases for failures
if len(args) < 1:
Usage('Compare: Missing 3 arguments: <database set> <source zone> <target zone>')
elif len(args) < 2:
Usage('Compare: Missing 2 arguments: <source zone> <target zone>')
elif len(args) < 3:
Usage('Compare: Missing 1 argument: <target zone>')
elif len(args) > 4:
Usage('Compare: Too many arguments. 3 are required, the 4th is optional')
# Set the variables
database_set = args[0]
zone_source = args[1]
zone_target = args[2]
if len(args) > 3:
instance = args[3]
else:
instance = None
# Validate arguments
if database_set not in database_set_data:
Usage('Database set "%s" is not a valid dataset listed in: conf/database_sets.yaml' % database_set)
if zone_source not in zones:
Usage('Source Zone "%s" not a valid zone listed in: conf/zones.yaml' % zone_source)
if zone_target not in zones:
Usage('Target Zone "%s" not a valid zone listed in: conf/zones.yaml' % zone_target)
# Get the Database Set data
data = yaml.load(open(database_set_data[database_set]['data']))
if instance != None and instance not in data:
Usage('Instance "%s" not found in Database Set data: %s' % (instance, database_set_data[database_set]['data']))
print('Syncing: %s %s %s %s' % (database_set, zone_source, zone_target, instance))
# #TEST: Attempt querying zone, naively...
schema_source = zone_manager.GetSchema(zone_source, database_set, instance)
schema_target = zone_manager.GetSchema(zone_target, database_set, instance)
# # print('schema source:\n%s' % str(schema_source))
# # print('\n\n');
# # print('schema target:\n%s' % str(schema_target))
comparison = zone_manager.CompareSchemas(schema_source, schema_target)
# #print('Comparison: \n')
# #import pprint
# #pprint.pprint(comparison)
# # Generate Forward and Reverse commands to sync the Target DB to Source DB
# (forward_commands, reverse_commands) = zone_manager.GenerateSchemaSyncCommands(comparison, zone_source, \
# zone_target, database_set, instance)
# schema_source = zone_manager.GetSchema(zone_source, database_set, instance)
# schema_target = zone_manager.GetSchema(zone_target, database_set, instance)
# print('schema source:\n%s' % str(schema_source))
# print('\n\n');
# print('schema target:\n%s' % str(schema_target))
# Generate Forward and Reverse commands to sync the Target DB to Source DB
(forward_commands, reverse_commands) = zone_manager.GenerateSchemaSyncCommands(zone_source, \
zone_target, database_set, instance, \
comparison)
sql_comparison_forward = zone_manager.GetSql_DatabaseDiff(zone_source, zone_target, database_set, instance)
sql_comparison_reverse = zone_manager.GetSql_DatabaseDiff(zone_target, zone_source, database_set, instance)
forward_commands += zone_manager.CreateSQLFromDataDiff(zone_source, zone_target, database_set, instance, sql_comparison_forward)
reverse_commands += zone_manager.CreateSQLFromDataDiff(zone_source, zone_target, database_set, instance, sql_comparison_reverse)
# # If we have any commands
# if forward_commands or reverse_commands:
# result = 'Compare completed: \n\nForward: %s\n\nReverse: %s\n\n' % (pprint.pformat(forward_commands), \
# pprint.pformat(reverse_commands))
# else:
# result = 'Compare completed: No work to do'
import pprint
print('Applying SQL:\n%s' % pprint.pformat(forward_commands))
# Sync the zone instances with the Forward commands
zone_manager.SyncTargetZone(zone_target, database_set, instance, forward_commands)
result = 'Success'
else:
#NOTE(g): Running from CLI will test for this, so this is for API usage
raise Exception('Unknown command: %s' % command)
# Return whatever the result of the command was, so it can be used or formatted
return result
def Usage(error=None, exit_code=None):
"""Print usage information, any errors, and exit.
If errors, exit code = 1, otherwise 0.
"""
if error:
print('\nerror: %s' % error)
if exit_code == None:
exit_code = 1
else:
if exit_code == None:
exit_code = 0
print()
print('usage: %s [options] <command>' % os.path.basename(sys.argv[0]))
print()
print('Commands:')
keys = list(COMMANDS.keys())
keys.sort()
for key in keys:
(args, description) = COMMANDS[key]
if args:
command_str = '%s %s' % (key, args)
print(' %s\n %s' % (command_str, description))
else:
print(' %-23s %s' % (key, description))
#print(' other Other commands go here')
print()
print('Options:')
print()
print(' -h, -?, --help This usage information')
print(' -v, --verbose Verbose output')
print()
sys.exit(exit_code)
def Main(args=None):
if not args:
args = []
long_options = ['help', 'verbose']
try:
(options, args) = getopt.getopt(args, '?hv', long_options)
except Exception as exc:
Usage(exc)
# Dictionary of command options, with defaults
command_options = {}
command_options['verbose'] = False
# Process out CLI options
for (option, value) in options:
# Help
if option in ('-h', '-?', '--help'):
Usage()
# Verbose output information
elif option in ('-v', '--verbose'):
command_options['verbose'] = True
# Invalid option
else:
Usage('Unknown option: %s' % option)
# Ensure we at least have a command, it's required
if len(args) < 1:
Usage('No command sepcified')
# Get the command
command = args[0]
# If this is an unknown command, say so
if command not in COMMANDS:
Usage('Command "%s" unknown. Commands: %s' % (command, ', '.join(COMMANDS)))
# If there are any command args, get them
command_args = args[1:]
try:
result = ProcessCommand(command, command_args, command_options)
# Do something with the result...
print(result)
except Exception as exc:
error = 'Error:\n%s\n%s\n' % ('\n'.join(traceback.format_tb(exc.__traceback__)), str(exc))
#Log(error)
print(error)
sys.exit(1)
if __name__ == '__main__':
Main(sys.argv[1:])
|
|
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from core.tests.base import BaseTestCase
class UserAPIViewTestCase(BaseTestCase):
def test_user_change_email(self):
test_user_email_url = reverse('api:user:email')
test_new_email = "[email protected]"
test_data = {
'new_email': test_new_email,
}
response = self.client.patch(
test_user_email_url,
test_data,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertEqual(
get_user_model().objects.last().email,
test_new_email,
)
def test_user_change_password(self):
test_user_password_url = reverse('api:user:password')
test_new_password = "test_new_password"
test_data = {
'current_password': self.test_password,
'new_password': test_new_password,
'confirm_new_password': test_new_password,
}
response = self.client.patch(
test_user_password_url,
test_data,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertTrue(
response.data.get("is_current_password_valid"),
)
self.assertTrue(
response.data.get("does_match_confirm_password"),
)
self.assertTrue(
get_user_model().objects.last().check_password(test_new_password),
)
def test_user_enter_wrong_current_password(self):
test_user_password_url = reverse('api:user:password')
test_wrong_password = "test_wrong_password"
test_new_password = "test_new_password"
test_data = {
'current_password': test_wrong_password,
'new_password': test_new_password,
'confirm_new_password': test_new_password,
}
response = self.client.patch(
test_user_password_url,
test_data,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertFalse(
response.data.get("is_current_password_valid"),
)
self.assertTrue(
response.data.get("does_match_confirm_password"),
)
self.assertFalse(
get_user_model().objects.last().check_password(test_new_password),
)
def test_new_password_does_not_match_cofirmation(self):
test_user_password_url = reverse('api:user:password')
test_new_password = "test_new_password"
test_data = {
'current_password': self.test_password,
'new_password': test_new_password,
'confirm_new_password': test_new_password+"!",
}
response = self.client.patch(
test_user_password_url,
test_data,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertTrue(
response.data.get("is_current_password_valid"),
)
self.assertFalse(
response.data.get("does_match_confirm_password"),
)
self.assertFalse(
get_user_model().objects.last().check_password(test_new_password),
)
def test_user_enter_wrong_current_password_and_new_password_does_not_match_cofirmation(self):
test_user_password_url = reverse('api:user:password')
test_wrong_password = "test_wrong_password"
test_new_password = "test_new_password"
test_data = {
'current_password': test_wrong_password,
'new_password': test_new_password,
'confirm_new_password': test_new_password+"!",
}
response = self.client.patch(
test_user_password_url,
test_data,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertFalse(
response.data.get("is_current_password_valid"),
)
self.assertFalse(
response.data.get("does_match_confirm_password"),
)
self.assertFalse(
get_user_model().objects.last().check_password(test_new_password),
)
def test_get_default_email_notification(self):
test_user_email_notification_url = reverse(
'api:user:email_notification',
)
response = self.client.get(
test_user_email_notification_url,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertTrue(
response.data.get("email_notification"),
)
def test_get_changed_email_notification(self):
test_user_email_notification_url = reverse(
'api:user:email_notification',
)
self.user.email_notification = False
self.user.save()
response = self.client.get(
test_user_email_notification_url,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertFalse(
response.data.get("email_notification"),
)
def test_user_set_email_notification_true(self):
test_user_email_noficatoin_url = reverse('api:user:email_notification')
self.user.email_notification = True
self.user.save()
test_data = {
'email_notification': 'off',
}
response = self.client.patch(
test_user_email_noficatoin_url,
test_data,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertFalse(
get_user_model().objects.last().email_notification,
)
def test_user_set_email_notification_false(self):
test_user_email_noficatoin_url = reverse('api:user:email_notification')
self.user.email_notification = False
self.user.save()
test_data = {
'email_notification': 'on',
}
response = self.client.patch(
test_user_email_noficatoin_url,
test_data,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertTrue(
get_user_model().objects.last().email_notification,
)
def test_delete_user(self):
test_user_delete_url = reverse('api:user:delete')
response = self.client.delete(
test_user_delete_url,
)
self.assertEqual(
response.status_code,
status.HTTP_201_CREATED,
)
self.assertEqual(
get_user_model().objects.count(),
0,
)
|
|
import requests
from itertools import chain
from time import sleep
import datetime
import calendar
import requests.compat
import six
class StackAPIError(Exception):
"""
The Exception that is thrown when ever there is an API error.
This utilizes the values returned by the API and described
here: http://api.stackexchange.com/docs/types/error
:param url: (string) The URL that was called and generated an error
:param error: (int) The `error_id` returned by the API (should be an int)
:param code: (string) The `description` returned by the API and is human friendly
:param message: (string) The `error_name` returned by the API
"""
def __init__(self, url, error, code, message):
self.url = url
self.error = error
self.code = code
self.message = message
class StackAPI(object):
def __init__(self, name=None, version="2.2", **kwargs):
"""
The object used to interact with the Stack Exchange API
:param name: (string) **(Required)** A valid ``api_site_parameter``
(available from http://api.stackexchange.com/docs/sites) which will
be used to connect to a particular site on the Stack Exchange
Network.
:param version: (float) **(Required)** The version of the API you are connecting to.
The default of ``2.2`` is the current version
:param proxy: (dict) (optional) A dictionary of http and https proxy locations
Example:
.. code-block:: python
{'http': 'http://example.com',
'https': 'https://example.com'}
By default, this is ``None``.
:param max_pages: (int) (optional) The maximum number of pages to retrieve (Default: ``100``)
:param page_size: (int) (optional) The number of elements per page. The API limits this to
a maximum of 100 items on all end points except ``site``
:param key: (string) (optional) An API key
:param access_token: (string) (optional) An access token associated with an application and
a user, to grant more permissions (such as write access)
"""
if not name:
raise ValueError('No Site Name provided')
self.proxy = kwargs.get('proxy', None)
self.max_pages = kwargs.get('max_pages', 5)
self.page_size = kwargs.get('page_size', 100)
self.key = kwargs.get('key', None)
self.access_token = kwargs.get('access_token', None)
self._endpoint = None
self._api_key = None
self._name = None
self._version = version
self._previous_call = None
self._base_url = 'https://api.stackexchange.com/{}/'.format(version)
sites = self.fetch('sites', filter='!*L1*AY-85YllAr2)', pagesize=1000)
for s in sites['items']:
if name == s['api_site_parameter']:
self._name = s['name']
self._api_key = s['api_site_parameter']
break
if not self._name:
raise ValueError('Invalid Site Name provided')
def __repr__(self):
return "<{}> v:<{}> endpoint: {} Last URL: {}".format(self._name,
self._version,
self._endpoint,
self._previous_call)
def fetch(self, endpoint=None, page=1, key=None, filter='default', **kwargs):
"""Returns the results of an API call.
This is the main work horse of the class. It builds the API query
string and sends the request to Stack Exchange. If there are multiple
pages of results, and we've configured `max_pages` to be greater than
1, it will automatically paginate through the results and return a
single object.
Returned data will appear in the `items` key of the resulting
dictionary.
:param endpoint: (string) The API end point being called. Available endpoints are listed on
the official API documentation: http://api.stackexchange.com/docs
This can be as simple as ``fetch('answers')``, to call the answers
end point
If calling an end point that takes additional parameter, such as `id`s
pass the ids as a list to the `ids` key:
.. code-block:: python
fetch('answers/{}', ids=[1,2,3])
This will attempt to retrieve the answers for the three listed ids.
If no end point is passed, a ``ValueError`` will be raised
:param page: (int) The page in the results to start at. By default, it will start on
the first page and automatically paginate until the result set
reached ``max_pages``.
:param key: (string) The site you are issuing queries to.
:param filter: (string) The filter to utilize when calling an endpoint. Different filters
will return different keys. The default is ``default`` and this will
still vary depending on what the API returns as default for a
particular endpoint
:param kwargs: Parameters accepted by individual endpoints. These parameters
**must** be named the same as described in the endpoint documentation
:rtype: (dictionary) A dictionary containing wrapper data regarding the API call
and the results of the call in the `items` key. If multiple
pages were received, all of the results will appear in the
``items`` tag.
"""
if not endpoint:
raise ValueError('No end point provided.')
self._endpoint = endpoint
params = {
"pagesize": self.page_size,
"page": page,
"filter": filter
}
if self.key:
params['key'] = self.key
if self.access_token:
params['access_token'] = self.access_token
# This block will replace {ids} placeholds in end points
# converting .fetch('badges/{ids}', ids=[222, 1306, 99999]) to
# badges/222;1306;99999
for k, value in list(kwargs.items()):
if "{" + k + "}" in endpoint:
# using six for backwards compatibility
if isinstance(value, six.string_types):
endpoint = endpoint.replace("{" + k + "}", requests.compat.quote_plus(str(value)))
else:
# check if value is iterable, based on
# https://stackoverflow.com/questions/1952464/in-python-how-do-i-determine-if-an-object-is-iterable
# notice that a string is also an iterable, that's why it's checked first
try:
iterator = iter(value)
endpoint = endpoint.replace("{" + k + "}", ';'.join(requests.compat.quote_plus(str(x)) for x in iterator))
except TypeError:
# it's not an iterable, represent as string
endpoint = endpoint.replace("{" + k + "}", requests.compat.quote_plus(str(value)))
kwargs.pop(k, None)
date_time_keys = ['fromdate', 'todate', 'since', 'min', 'max']
for k in date_time_keys:
if k in kwargs:
if isinstance(kwargs[k], datetime.datetime):
kwargs[k] = int(calendar.timegm(kwargs[k].utctimetuple()))
# This block will see if there there are ids remaining
# This would occur if the developer passed `badges` instead of `badges/{ids}` to `fetch`
# If this is the case, then convert to a string and assume this goes at the end of the endpoint
if 'ids' in kwargs:
ids = ';'.join(str(x) for x in kwargs['ids'])
kwargs.pop('ids', None)
endpoint += "/{}".format(ids)
params.update(kwargs)
if self._api_key:
params['site'] = self._api_key
data = []
run_cnt = 1
backoff = 0
total = 0
while run_cnt <= self.max_pages:
run_cnt += 1
base_url = "{}{}/".format(self._base_url, endpoint)
try:
response = requests.get(base_url, params=params, proxies=self.proxy)
except requests.exceptions.ConnectionError as e:
raise StackAPIError(self._previous_call, str(e), str(e), str(e))
self._previous_call = response.url
try:
response.encoding = 'utf-8-sig'
response = response.json()
except ValueError as e:
raise StackAPIError(self._previous_call, str(e), str(e), str(e))
try:
error = response["error_id"]
code = response["error_name"]
message = response["error_message"]
raise StackAPIError(self._previous_call, error, code, message)
except KeyError:
pass # This means there is no error
if key:
data.append(response[key])
else:
data.append(response)
if len(data) < 1:
break
backoff = 0
total = 0
page = 1
if 'backoff' in response:
backoff = int(response['backoff'])
sleep(backoff+1) # Sleep an extra second to ensure no timing issues
if 'total' in response:
total = response['total']
if 'has_more' in response and response['has_more'] and run_cnt <= self.max_pages:
params["page"] += 1
else:
break
r = []
for d in data:
if 'items' in d:
r.extend(d['items'])
result = {'backoff': backoff,
'has_more': False if 'has_more' not in data[-1] else data[-1]['has_more'],
'page': params['page'],
'quota_max': -1 if 'quota_max' not in data[-1] else data[-1]['quota_max'],
'quota_remaining': -1 if 'quota_remaining' not in data[-1] else data[-1]['quota_remaining'],
'total': total,
'items': list(chain(r))}
return result
def send_data(self, endpoint=None, page=1, key=None, filter='default', **kwargs):
"""Sends data to the API.
This call is similar to ``fetch``, but **sends** data to the API instead
of retrieving it.
Returned data will appear in the ``items`` key of the resulting
dictionary.
Sending data **requires** that the ``access_token`` is set. This is enforced
on the API side, not within this library.
:param endpoint: (string) The API end point being called. Available endpoints are listed on
the official API documentation: http://api.stackexchange.com/docs
This can be as simple as ``fetch('answers')``, to call the answers
end point
If calling an end point that takes additional parameter, such as `id`s
pass the ids as a list to the `ids` key:
.. code-block:: python
fetch('answers/{}', ids=[1,2,3])
This will attempt to retrieve the answers for the three listed ids.
If no end point is passed, a ``ValueError`` will be raised
:param page: (int) The page in the results to start at. By default, it will start on
the first page and automatically paginate until the result set
reached ``max_pages``.
:param key: (string) The site you are issuing queries to.
:param filter: (string) The filter to utilize when calling an endpoint. Different filters
will return different keys. The default is ``default`` and this will
still vary depending on what the API returns as default for a
particular endpoint
:param kwargs: Parameters accepted by individual endpoints. These parameters
**must** be named the same as described in the endpoint documentation
:rtype: (dictionary) A dictionary containing wrapper data regarding the API call
and the results of the call in the `items` key. If multiple
pages were received, all of the results will appear in the
``items`` tag.
"""
if not endpoint:
raise ValueError('No end point provided.')
self._endpoint = endpoint
params = {
"pagesize": self.page_size,
"page": page,
"filter": filter
}
if self.key:
params['key'] = self.key
if self.access_token:
params['access_token'] = self.access_token
if 'ids' in kwargs:
ids = ';'.join(str(x) for x in kwargs['ids'])
kwargs.pop('ids', None)
else:
ids = None
params.update(kwargs)
if self._api_key:
params['site'] = self._api_key
data = []
base_url = "{}{}/".format(self._base_url, endpoint)
response = requests.post(base_url, data=params, proxies=self.proxy)
self._previous_call = response.url
response = response.json()
try:
error = response["error_id"]
code = response["error_name"]
message = response["error_message"]
raise StackAPIError(self._previous_call, error, code, message)
except KeyError:
pass # This means there is no error
data.append(response)
r = []
for d in data:
r.extend(d['items'])
result = {'has_more': data[-1]['has_more'],
'page': params['page'],
'quota_max': data[-1]['quota_max'],
'quota_remaining': data[-1]['quota_remaining'],
'items': list(chain(r))}
return result
|
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: icx_command
version_added: "2.9"
author: "Ruckus Wireless (@Commscope)"
short_description: Run arbitrary commands on remote Ruckus ICX 7000 series switches
description:
- Sends arbitrary commands to an ICX node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
notes:
- Tested against ICX 10.1
options:
commands:
description:
- List of commands to send to the remote ICX device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired. If a command sent to the
device requires answering a prompt, checkall and newline if
multiple prompts, it is possible to pass
a dict containing I(command), I(answer), I(prompt), I(check_all)
and I(newline).Common answers are 'y' or "\\r" (carriage return,
must be double quotes). See examples.
type: list
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
type: list
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
type: str
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of times a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
type: int
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
type: int
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
icx_command:
commands: show version
- name: run show version and check to see if output contains ICX
icx_command:
commands: show version
wait_for: result[0] contains ICX
- name: run multiple commands on remote nodes
icx_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
icx_command:
commands:
- show version
- show interfaces
wait_for:
- result[0] contains ICX
- result[1] contains GigabitEthernet1/1/1
- name: run commands that require answering a prompt
icx_command:
commands:
- command: 'service password-encryption sha1'
prompt: 'Warning: Moving to higher password-encryption type,.*'
answer: 'y'
- name: run commands that require answering multiple prompt
icx_command:
commands:
- command: 'username qqq password qqq'
prompt:
- 'User already exists. Do you want to modify:.*'
- 'To modify or remove user, enter current password:'
answer:
- 'y'
- 'qqq\\\r'
check_all: True
newline: False
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import re
import time
from ansible.module_utils.network.icx.icx import run_commands
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList, to_lines
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict(),
check_all=dict(type='bool', default='False'),
newline=dict(type='bool', default='True')
), module)
commands = command(module.params['commands'])
for item in list(commands):
if module.check_mode:
if not item['command'].startswith('show'):
warnings.append(
'Only show commands are supported when using check mode, not executing configure terminal')
commands.remove(item)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
run_commands(module, ['skip'])
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2017 Massachusetts Institute of Technology
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import re
import pyparsing as pp
# pp.ParserElement.setDefaultWhitespaceChars(' \t')
# Testing infrastructure
tests = []
def add_tests(term, matches, no_matches):
global tests
tests += [(term, matches, no_matches)]
def run_tests():
global tests
errors = 0
for (term, pass_examples, fail_examples) in tests:
for string in pass_examples:
try:
result = term.parseString(string, parseAll = True)
print(string + ' -> ' + str(result))
except:
print('ERROR: ' + string + ' -> no match')
errors += 1
for string in fail_examples:
try:
result = term.parseString(string, parseAll = True)
print('ERROR: ' + string + ' -> ' + str(result))
errors += 1
except:
print(string + ' -> no match')
return errors
# BSV objects
def parse_type(toks):
# print('type = ' + str(toks.asDict()))
# return [''.join(toks)]
return toks
# BSV comments
doc_comment=''
def collect_doc(toks):
global doc_comment
block = ''.join(toks)
# block is entire comment block. Process it as necessary
block = re.sub(r'\ */// ?', '', block)
block = re.sub(r'\ */\** ?', '', block)
block = re.sub(r'\ *\* ?', '', block)
### print(block)
doc_comment += '\n' + block
def clear_doc():
global doc_comment
doc_comment = ''
doc_block_comment_bsv = (~pp.Literal('/**/') + pp.Regex(r"/\*\*([^*]*\*+)+?/")).setName('doc block comment').setParseAction(collect_doc)
doc_oneline_comment_bsv = ((pp.Literal('///') + pp.LineEnd()) | pp.Regex(r"///(\\\n|[^/])(\\\n|.)*")).setName('doc one line comment').setParseAction(collect_doc)
block_comment_bsv = (~doc_block_comment_bsv + pp.Regex(r"/\*(?:[^*]*\*+)+?/")).setName('block comment').setParseAction(clear_doc)
oneline_comment_bsv = (~doc_oneline_comment_bsv + pp.Regex(r"//(?:\\\n|.)*")).setName('one line comment').setParseAction(clear_doc)
comment_bsv = doc_block_comment_bsv | doc_oneline_comment_bsv | block_comment_bsv | oneline_comment_bsv
add_tests(oneline_comment_bsv, ['//', '//Hello, World!', '// Hello, World!', '////Hello, World!', '//// Hello, World!'], ['///', '///Hello, World!', '/// Hello, World!'])
add_tests(doc_oneline_comment_bsv, ['///', '///Hello, World!', '/// Hello, World!'], ['//', '//Hello, World!', '// Hello, World!', '////Hello, World!', '//// Hello, World!'])
# basic identifiers and literals
Identifier_bsv = pp.Word(pp.srange('[A-Z]'), pp.srange('[a-zA-Z0-9$_]'))
identifier_bsv = pp.Word(pp.srange('[a-z_]'), pp.srange('[a-zA-Z0-9$_]'))
anyIdentifier_bsv = pp.Word(pp.srange('[a-zA-Z_]'), pp.srange('[a-zA-Z0-9$_]'))
dec_literal_bsv = pp.Optional(pp.Optional(pp.Word(pp.nums)) + '\'' + pp.CaselessLiteral('d')) + pp.Word(pp.srange('[0-9_]'))
hex_literal_bsv = pp.Optional(pp.Word(pp.nums)) + '\'' + pp.CaselessLiteral('h') + pp.Word(pp.srange('[0-9A-Fa-f_]'))
oct_literal_bsv = pp.Optional(pp.Word(pp.nums)) + '\'' + pp.CaselessLiteral('o') + pp.Word(pp.srange('[0-7_]'))
bin_literal_bsv = pp.Optional(pp.Word(pp.nums)) + '\'' + pp.CaselessLiteral('b') + pp.Word(pp.srange('[01_]'))
int_literal_bsv = hex_literal_bsv | oct_literal_bsv | bin_literal_bsv | dec_literal_bsv | '\'0' | '\'1'
# some tests
add_tests(Identifier_bsv, ['Hello', 'World', 'A', 'ALL_CAPS', 'About_$3_50'], ['$display', 'a', '_TEST', '`Riscv', ''])
add_tests(identifier_bsv, ['hello', 'world', 'a', 'aLMOST_ALL_CAPS', 'about_$3_50', '_TEST_', '_abc'], ['$display', 'A', '`Riscv', ''])
add_tests(anyIdentifier_bsv, ['hello', 'Hello', 'HELLO', 'world'], [''])
add_tests(dec_literal_bsv, ['0', '1', '2', '42'], ['1a', 'hello', 'world'])
add_tests(int_literal_bsv, ['0', '1', '\'1', '\'0', '20152', "'hf0A", "201'hfff", "'b10110", "8'b11111111"], ["'b00101001012"])
# tokens
def token(x):
return pp.Suppress(pp.Literal(x))
# keywords
kw_package = pp.Keyword('package')
kw_endpackage = pp.Keyword('endpackage')
kw_import = pp.Keyword('import')
kw_export = pp.Keyword('export')
kw_module = pp.Keyword('module')
kw_endmodule = pp.Keyword('endmodule')
kw_typeclass = pp.Keyword('typeclass')
kw_endtypeclass = pp.Keyword('endtypeclass')
kw_instance = pp.Keyword('instance')
kw_endinstance = pp.Keyword('endinstance')
kw_function = pp.Keyword('function')
kw_endfunction = pp.Keyword('endfunction')
kw_type = pp.Keyword('type')
kw_typedef = pp.Keyword('typedef')
kw_enum = pp.Keyword('enum')
kw_union = pp.Keyword('union')
kw_tagged = pp.Keyword('tagged')
kw_struct = pp.Keyword('struct')
kw_numeric = pp.Keyword('numeric')
kw_deriving = pp.Keyword('deriving')
kw_provisos = pp.Keyword('provisos')
kw_dependencies = pp.Keyword('dependencies')
kw_determines = pp.Keyword('determines')
add_tests(kw_module + ';', ['module;'], [])
add_tests(kw_module + 's', [], ['modules'])
add_tests(kw_module + '_', [], ['module_'])
add_tests(kw_module + '2', [], ['module2'])
# packages, imports, and exports
#package_bsv = kw_package + Identifier_bsv + ';' + \
# pp.ZeroOrMore(~kw_endpackage + pp.Word(pp.printables)) + \
# kw_endpackage + pp.Optional(':' + Identifier_bsv)
package_bsv = kw_package + Identifier_bsv('name') + ';'
import_bsv = kw_import + Identifier_bsv + pp.Literal('::') + pp.Literal('*') + pp.Literal(';')
export_bsv = kw_export + anyIdentifier_bsv + pp.Optional(pp.Literal('(') + pp.Literal('..') + pp.Literal(')')) + ';'
# type
type_bsv = pp.Forward()
function_type_bsv = kw_function + type_bsv + identifier_bsv + token('(') + pp.delimitedList(type_bsv + identifier_bsv) + token(')')
type_bsv << (function_type_bsv \
| (pp.Optional(anyIdentifier_bsv('package') + '::') + anyIdentifier_bsv('name') + \
pp.Optional( pp.Suppress('#') + pp.Suppress('(') + pp.Group(pp.delimitedList(type_bsv, ','))('formal_args') + pp.Suppress(')') )) \
| (int_literal_bsv('numeric'))).setParseAction(parse_type)
add_tests(type_bsv, ['a', 'WriteReq', 'Bool', 'function Bit#(1) f(Bit#(1) x)', 'void', 'Bit#(2)', 'List::List#(t)', 'Vector#(2, Reg#(Bit#(32)))'], ['Vector#(2 Reg#(Bit#(5)))', 'A#(B#(C#(a))'])
# terms used in typedef definitions
union_member_bsv = pp.Forward()
type_formal_bsv = pp.Optional(kw_numeric) + kw_type + identifier_bsv
type_formals_bsv = token('#') + token('(') + pp.Group(pp.delimitedList(type_formal_bsv, ',')) + token(')')
typedef_type_bsv = Identifier_bsv + pp.Optional(type_formals_bsv)
deriving_bsv = pp.Optional(kw_deriving + token('(') + pp.Group(pp.delimitedList(Identifier_bsv, ',')) + token(')'), default = [])
subunion_bsv = kw_union + kw_tagged + token('{') + pp.OneOrMore(union_member_bsv) + token('}')
struct_member_bsv = (type_bsv + identifier_bsv + token(';')) | (subunion_bsv + Identifier_bsv + token(';'))
substruct_bsv = kw_struct + token('{') + pp.OneOrMore(struct_member_bsv) + token('}')
union_member_bsv << ((type_bsv + Identifier_bsv + token(';')) | (subunion_bsv + Identifier_bsv + token(';')) | (substruct_bsv + Identifier_bsv + token(';')))
enum_element_bsv = Identifier_bsv + pp.Optional(token('=') + int_literal_bsv) # TODO also support [intLiteral] and [intLiteral:intLiteral]
# typedefs
basic_typedef_bsv = kw_typedef + type_bsv + typedef_type_bsv('name') + token(';');
enum_typedef_bsv = kw_typedef + kw_enum + token('{') + pp.delimitedList(enum_element_bsv, ',') + token('}') + typedef_type_bsv('name') + deriving_bsv + token(';')
struct_typedef_bsv = kw_typedef + kw_struct + token('{') + pp.OneOrMore(struct_member_bsv) + token('}') + typedef_type_bsv('name') + deriving_bsv + token(';')
union_typedef_bsv = kw_typedef + kw_union + kw_tagged + token('{') + pp.OneOrMore(union_member_bsv) + token('}') + typedef_type_bsv('name') + deriving_bsv + token(';')
typedef_bsv = basic_typedef_bsv | enum_typedef_bsv | struct_typedef_bsv | union_typedef_bsv
# typedef tests
add_tests(basic_typedef_bsv, ['typedef a MyA;'], ['typedef x y;'])
# provisos
provisos_bsv = pp.Optional(kw_provisos + token('(') + pp.Group(pp.delimitedList(type_bsv, ',')) + token(')'), default = [])
# module
module_bsv = kw_module + pp.Optional(token('[') + type_bsv + token(']')) + identifier_bsv('name') + \
pp.ZeroOrMore(~kw_endmodule + pp.Word(pp.printables)) + \
kw_endmodule + pp.Optional(token(':') + identifier_bsv)
# module tests
add_tests(module_bsv, ["module[m] mkTest(); endmodule : mkTest"], [])
# function
# functions can take function as arguments, and the syntax does not match "type_bsv + identifier_bsv" so I made the identifier optional
function_bsv = pp.Forward()
function_bsv << (kw_function + pp.Optional(type_bsv) + identifier_bsv('name') + pp.Optional( token('(') + pp.Group(pp.delimitedList(type_bsv + pp.Optional(identifier_bsv), ',')) + token(')'), default=[] )('args') + provisos_bsv + token(';') + \
pp.ZeroOrMore(pp.Group(function_bsv) | (~kw_endfunction + pp.Word(pp.printables))) + \
kw_endfunction + pp.Optional(token(':') + identifier_bsv))
# typeclass
type_list_bsv = identifier_bsv | (token('(') + pp.delimitedList(identifier_bsv, ',') + token(')'))
type_depend_bsv = type_list_bsv + kw_determines + type_list_bsv
type_depends_bsv = pp.Optional(kw_dependencies + token('(') + pp.delimitedList(type_depend_bsv, ',') + token(')'))
typeclass_bsv = kw_typeclass + Identifier_bsv('name') + type_formals_bsv('formal_args') + provisos_bsv + type_depends_bsv + token(';') + \
pp.ZeroOrMore(~kw_endtypeclass + pp.Word(pp.printables)) + \
kw_endtypeclass + pp.Optional(token(':') + Identifier_bsv)
# typeclass tests
# instances
instance_bsv = kw_instance + Identifier_bsv('name') + token('#') + token('(') + pp.Group(pp.delimitedList(type_bsv, ','))('formal_args') + token(')') + provisos_bsv + token(';') + \
pp.ZeroOrMore(~kw_endinstance + pp.Word(pp.printables)) + \
kw_endinstance + pp.Optional(token(':') + Identifier_bsv)
# interface
kw_interface = pp.Keyword('interface')
kw_endinterface = pp.Keyword('endinterface')
interface_bsv = kw_interface + Identifier_bsv('name') + pp.Optional(type_formals_bsv) + token(';') + \
pp.ZeroOrMore(~kw_endinterface + pp.Word(pp.printables)) + \
kw_endinterface + pp.Optional(token(':') + Identifier_bsv)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('ERROR: expected a bsv filename or --test')
exit(1)
if sys.argv[1] == '--test' or sys.argv[1] == '-test':
errors = run_tests()
if errors > 0:
print('ERROR: Found ' + str(errors) + ' errors')
exit(1)
else:
exit(0)
with open(sys.argv[1]) as f:
file_data = f.read()
# scan_result = (typedef_bsv | typeclass_bsv | instance_bsv | module_bsv).ignore(comment_bsv).scanString(file_data)
def save_doc_comment(toks):
global doc_comment
try:
toks.insert(0, doc_comment)
doc_comment = ''
return toks
except TypeError as e:
print('ERROR: type error in save_doc_comment: ' + str(e))
print('type(toks) = ' + str(type(toks)))
exit(1)
scan_result = (comment_bsv | (package_bsv | typedef_bsv | interface_bsv | typeclass_bsv | instance_bsv | module_bsv | function_bsv).addParseAction(save_doc_comment)).scanString(file_data)
for (x, y, z) in scan_result:
if 'name' in x:
line = pp.lineno(y, file_data)
if x[1] == 'package':
print('# ' + str(x['name']))
elif 'formal_args' in x:
# print('## ' + str(x['name']) + '#(' + str(x['formal_args']) + ')')
# print('### ' + str(x['name']))
print('### [' + str(x['name']) + '](../../' + sys.argv[1] + '#L' + str(line) + ')')
else:
# print('### ' + str(x['name']))
print('### [' + str(x['name']) + '](../../' + sys.argv[1] + '#L' + str(line) + ')')
if x[0] != '':
print(x[0])
if x[1] != 'package':
print("```bluespec")
print(file_data[y:z])
print("```")
print('')
|
|
import os
import sys
def is_active():
return True
def get_name():
return "Windows"
def can_build():
if (os.name=="nt"):
#building natively on windows!
if (os.getenv("VSINSTALLDIR")):
return True
else:
print("MSVC Not detected, attempting mingw.")
return True
if (os.name=="posix"):
mingw = "i586-mingw32msvc-"
mingw64 = "i686-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw=os.getenv("MINGW32_PREFIX")
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
if os.system(mingw+"gcc --version >/dev/null") == 0 or os.system(mingw64+"gcc --version >/dev/null") ==0:
return True
return False
def get_opts():
mingw=""
mingw64=""
if (os.name!="nt"):
mingw = "i586-mingw32msvc-"
mingw64 = "i686-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw=os.getenv("MINGW32_PREFIX")
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
return [
('mingw_prefix','Mingw Prefix',mingw),
('mingw_prefix_64','Mingw Prefix 64 bits',mingw64),
('mingw64_for_32','Use Mingw 64 for 32 Bits Build',"no"),
]
def get_flags():
return [
('freetype','builtin'), #use builtin freetype
('openssl','builtin'), #use builtin openssl
('theora','no'),
]
def configure(env):
env.Append(CPPPATH=['#platform/windows'])
if (os.name=="nt" and os.getenv("VSINSTALLDIR")!=None):
#build using visual studio
env['ENV']['TMP'] = os.environ['TMP']
env.Append(CPPPATH=['#platform/windows/include'])
env.Append(LIBPATH=['#platform/windows/lib'])
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['/DFREETYPE_ENABLED'])
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Zi','/DDEBUG_ENABLED','/DDEBUG_MEMORY_ENABLED','/DD3D_DEBUG_INFO','/O1'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG'])
env.Append(CCFLAGS=['/MT','/Gd','/GR','/nologo'])
env.Append(CXXFLAGS=['/TP'])
env.Append(CPPFLAGS=['/DMSVC', '/GR', ])
env.Append(CCFLAGS=['/I'+os.getenv("WindowsSdkDir")+"/Include"])
env.Append(CCFLAGS=['/DWINDOWS_ENABLED'])
env.Append(CCFLAGS=['/DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['/DWIN32'])
env.Append(CCFLAGS=['/DTYPED_METHOD_BIND'])
env.Append(CCFLAGS=['/DGLES2_ENABLED'])
env.Append(CCFLAGS=['/DGLEW_ENABLED'])
LIBS=['winmm','opengl32','dsound','kernel32','ole32','user32','gdi32', 'IPHLPAPI', 'wsock32', 'shell32','advapi32']
env.Append(LINKFLAGS=[p+env["LIBSUFFIX"] for p in LIBS])
env.Append(LIBPATH=[os.getenv("WindowsSdkDir")+"/Lib"])
if (os.getenv("DXSDK_DIR")):
DIRECTX_PATH=os.getenv("DXSDK_DIR")
else:
DIRECTX_PATH="C:/Program Files/Microsoft DirectX SDK (March 2009)"
if (os.getenv("VCINSTALLDIR")):
VC_PATH=os.getenv("VCINSTALLDIR")
else:
VC_PATH=""
env.Append(CCFLAGS=["/I" + p for p in os.getenv("INCLUDE").split(";")])
env.Append(LIBPATH=[p for p in os.getenv("LIB").split(";")])
env.Append(CCFLAGS=["/I"+DIRECTX_PATH+"/Include"])
env.Append(LIBPATH=[DIRECTX_PATH+"/Lib/x86"])
env['ENV'] = os.environ;
else:
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
if (os.name=="nt"):
import subprocess
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False, env = env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
env['SPAWN'] = mySpawn
#build using mingw
if (os.name=="nt"):
env['ENV']['TMP'] = os.environ['TMP'] #way to go scons, you can be so stupid sometimes
else:
env["PROGSUFFIX"]=env["PROGSUFFIX"]+".exe"
mingw_prefix=""
if (env["bits"]=="default"):
env["bits"]="32"
use64=False
if (env["bits"]=="32"):
if (env["mingw64_for_32"]=="yes"):
env.Append(CCFLAGS=['-m32'])
env.Append(LINKFLAGS=['-m32'])
env.Append(LINKFLAGS=['-static-libgcc'])
env.Append(LINKFLAGS=['-static-libstdc++'])
mingw_prefix=env["mingw_prefix_64"];
else:
mingw_prefix=env["mingw_prefix"];
else:
mingw_prefix=env["mingw_prefix_64"];
env.Append(LINKFLAGS=['-static'])
nulstr=""
if (os.name=="posix"):
nulstr=">/dev/null"
else:
nulstr=">nul"
if os.system(mingw_prefix+"gcc --version"+nulstr)!=0:
#not really super consistent but..
print("Can't find Windows compiler: "+mingw_prefix)
sys.exit(255)
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O3','-ffast-math','-fomit-frame-pointer','-msse2'])
env.Append(LINKFLAGS=['-Wl,--subsystem,windows'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['-DFREETYPE_ENABLED'])
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
env["CC"]=mingw_prefix+"gcc"
env['AS']=mingw_prefix+"as"
env['CXX'] = mingw_prefix+"g++"
env['AR'] = mingw_prefix+"ar"
env['RANLIB'] = mingw_prefix+"ranlib"
env['LD'] = mingw_prefix+"g++"
#env['CC'] = "winegcc"
#env['CXX'] = "wineg++"
env.Append(CCFLAGS=['-DWINDOWS_ENABLED','-mwindows'])
env.Append(CPPFLAGS=['-DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['-DGLES2_ENABLED','-DGLEW_ENABLED'])
env.Append(LIBS=['mingw32','opengl32', 'dsound', 'ole32', 'd3d9','winmm','gdi32','iphlpapi','wsock32','kernel32'])
if (env["bits"]=="32" and env["mingw64_for_32"]!="yes"):
# env.Append(LIBS=['gcc_s'])
#--with-arch=i686
env.Append(CPPFLAGS=['-march=i686'])
env.Append(LINKFLAGS=['-march=i686'])
#'d3dx9d'
env.Append(CPPFLAGS=['-DMINGW_ENABLED'])
env.Append(LINKFLAGS=['-g'])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
|
|
import copy
from typing import Optional
import virtool.db.utils
import virtool.history.db
import virtool.otus.db
import virtool.otus.utils
import virtool.utils
from virtool.types import App
async def add(
app, otu_id: str, data: dict, user_id: str, isolate_id: Optional[str] = None
) -> dict:
"""
Add an isolate to an existing OTU.
:param app: the application object
:param otu_id: the id of the OTU
:param data: the isolate data
:param user_id: the user making the change
:param isolate_id: an optional ID to force for the isolate
:return: the isolate sub-document
"""
db = app["db"]
document = await db.otus.find_one(otu_id)
isolates = copy.deepcopy(document["isolates"])
# True if the new isolate should be default and any existing isolates should be non-default.
will_be_default = not isolates or data["default"]
# Set ``default`` to ``False`` for all existing isolates if the new one should be default.
if will_be_default:
for isolate in isolates:
isolate["default"] = False
# Get the complete, joined entry before the update.
old = await virtool.otus.db.join(db, otu_id, document)
existing_ids = [isolate["id"] for isolate in isolates]
if isolate_id is None:
isolate_id = virtool.utils.random_alphanumeric(length=3, excluded=existing_ids)
if isolate_id in existing_ids:
raise ValueError(f"Isolate ID already exists: {isolate_id}")
isolate = {
"id": isolate_id,
"default": will_be_default,
"source_type": data["source_type"],
"source_name": data["source_name"],
}
# Push the new isolate to the database.
await db.otus.update_one(
{"_id": otu_id},
{
"$set": {"isolates": [*isolates, isolate], "verified": False},
"$inc": {"version": 1},
},
)
# Get the joined entry now that it has been updated.
new = await virtool.otus.db.join(db, otu_id)
await virtool.otus.db.update_verification(db, new)
isolate_name = virtool.otus.utils.format_isolate_name(data)
description = f"Added {isolate_name}"
if will_be_default:
description += " as default"
await virtool.history.db.add(app, "add_isolate", old, new, description, user_id)
return {**isolate, "sequences": []}
async def edit(
app: App, otu_id: str, isolate_id: str, data: dict, user_id: str
) -> dict:
db = app["db"]
isolates = await virtool.db.utils.get_one_field(db.otus, "isolates", otu_id)
isolate = virtool.otus.utils.find_isolate(isolates, isolate_id)
old_isolate_name = virtool.otus.utils.format_isolate_name(isolate)
isolate.update(data)
new_isolate_name = virtool.otus.utils.format_isolate_name(isolate)
old = await virtool.otus.db.join(db, otu_id)
# Replace the isolates list with the update one.
document = await db.otus.find_one_and_update(
{"_id": otu_id},
{"$set": {"isolates": isolates, "verified": False}, "$inc": {"version": 1}},
)
# Get the joined entry now that it has been updated.
new = await virtool.otus.db.join(db, otu_id, document)
await virtool.otus.db.update_verification(db, new)
# Use the old and new entry to add a new history document for the change.
await virtool.history.db.add(
app,
"edit_isolate",
old,
new,
f"Renamed {old_isolate_name} to {new_isolate_name}",
user_id,
)
complete = await virtool.otus.db.join_and_format(db, otu_id, joined=new)
return virtool.otus.utils.find_isolate(complete["isolates"], isolate_id)
async def remove(app: App, otu_id: str, isolate_id: str, user_id: str):
db = app["db"]
document = await db.otus.find_one(otu_id)
isolates = copy.deepcopy(document["isolates"])
# Get any isolates that have the isolate id to be removed (only one should match!).
isolate_to_remove = virtool.otus.utils.find_isolate(isolates, isolate_id)
# Remove the isolate from the otu' isolate list.
isolates.remove(isolate_to_remove)
new_default = None
# Set the first isolate as default if the removed isolate was the default.
if isolate_to_remove["default"] and len(isolates):
new_default = isolates[0]
new_default["default"] = True
old = await virtool.otus.db.join(db, otu_id, document)
document = await db.otus.find_one_and_update(
{"_id": otu_id},
{"$set": {"isolates": isolates, "verified": False}, "$inc": {"version": 1}},
)
new = await virtool.otus.db.join(db, otu_id, document)
await virtool.otus.db.update_verification(db, new)
# Remove any sequences associated with the removed isolate.
await db.sequences.delete_many({"otu_id": otu_id, "isolate_id": isolate_id})
old_isolate_name = virtool.otus.utils.format_isolate_name(isolate_to_remove)
description = f"Removed {old_isolate_name}"
if isolate_to_remove["default"] and new_default:
new_isolate_name = virtool.otus.utils.format_isolate_name(new_default)
description += f" and set {new_isolate_name} as default"
await virtool.history.db.add(app, "remove_isolate", old, new, description, user_id)
async def set_default(app, otu_id: str, isolate_id: str, user_id: str) -> dict:
"""
Set a new default isolate.
:param app: the application object
:param otu_id: the ID of the parent OTU
:param isolate_id: the ID of the isolate set as default
:param user_id: the ID of the requesting user
:return: the updated isolate
"""
db = app["db"]
document = await db.otus.find_one(otu_id)
isolate = virtool.otus.utils.find_isolate(document["isolates"], isolate_id)
old = await virtool.otus.db.join(db, otu_id, document)
# If the default isolate will be unchanged, immediately return the existing isolate.
if isolate["default"]:
return virtool.otus.utils.find_isolate(old["isolates"], isolate_id)
# Set ``default`` to ``False`` for all existing isolates if the new one should be default.
isolates = [
{**isolate, "default": isolate_id == isolate["id"]}
for isolate in document["isolates"]
]
# Replace the isolates list with the updated one.
document = await db.otus.find_one_and_update(
{"_id": otu_id},
{"$set": {"isolates": isolates, "verified": False}, "$inc": {"version": 1}},
)
# Get the joined entry now that it has been updated.
new = await virtool.otus.db.join(db, otu_id, document)
await virtool.otus.db.update_verification(db, new)
isolate_name = virtool.otus.utils.format_isolate_name(isolate)
# Use the old and new entry to add a new history document for the change.
await virtool.history.db.add(
app, "set_as_default", old, new, f"Set {isolate_name} as default", user_id
)
return virtool.otus.utils.find_isolate(new["isolates"], isolate_id)
|
|
"""Test the Google Maps Travel Time config flow."""
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.google_travel_time.const import (
ARRIVAL_TIME,
CONF_ARRIVAL_TIME,
CONF_AVOID,
CONF_DEPARTURE_TIME,
CONF_DESTINATION,
CONF_LANGUAGE,
CONF_ORIGIN,
CONF_TIME,
CONF_TIME_TYPE,
CONF_TRAFFIC_MODEL,
CONF_TRANSIT_MODE,
CONF_TRANSIT_ROUTING_PREFERENCE,
CONF_UNITS,
DEFAULT_NAME,
DEPARTURE_TIME,
DOMAIN,
)
from homeassistant.const import (
CONF_API_KEY,
CONF_MODE,
CONF_NAME,
CONF_UNIT_SYSTEM_IMPERIAL,
)
from tests.common import MockConfigEntry
async def test_minimum_fields(hass, validate_config_entry, bypass_setup):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == DEFAULT_NAME
assert result2["data"] == {
CONF_NAME: DEFAULT_NAME,
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
}
async def test_invalid_config_entry(hass, invalidate_config_entry):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_options_flow(hass, validate_config_entry, bypass_update):
"""Test options flow."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
options={
CONF_MODE: "driving",
CONF_ARRIVAL_TIME: "test",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: ARRIVAL_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert entry.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_ARRIVAL_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
async def test_options_flow_departure_time(hass, validate_config_entry, bypass_update):
"""Test options flow wiith departure time."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_API_KEY: "api_key",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id, data=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_TIME_TYPE: DEPARTURE_TIME,
CONF_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == ""
assert result["data"] == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
assert entry.options == {
CONF_MODE: "driving",
CONF_LANGUAGE: "en",
CONF_AVOID: "tolls",
CONF_UNITS: CONF_UNIT_SYSTEM_IMPERIAL,
CONF_DEPARTURE_TIME: "test",
CONF_TRAFFIC_MODEL: "best_guess",
CONF_TRANSIT_MODE: "train",
CONF_TRANSIT_ROUTING_PREFERENCE: "less_walking",
}
async def test_dupe(hass, validate_config_entry, bypass_setup):
"""Test setting up the same entry data twice is OK."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_API_KEY: "test",
CONF_ORIGIN: "location1",
CONF_DESTINATION: "location2",
},
)
await hass.async_block_till_done()
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
|
|
#!/usr/bin/env python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2020 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
import platform
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
fncodes = {
';': '\1bOP', # F1
'<': '\1bOQ', # F2
'=': '\1bOR', # F3
'>': '\1bOS', # F4
'?': '\1b[15~', # F5
'@': '\1b[17~', # F6
'A': '\1b[18~', # F7
'B': '\1b[19~', # F8
'C': '\1b[20~', # F9
'D': '\1b[21~', # F10
}
navcodes = {
'H': '\x1b[A', # UP
'P': '\x1b[B', # DOWN
'K': '\x1b[D', # LEFT
'M': '\x1b[C', # RIGHT
'G': '\x1b[H', # HOME
'O': '\x1b[F', # END
'R': '\x1b[2~', # INSERT
'S': '\x1b[3~', # DELETE
'I': '\x1b[5~', # PGUP
'Q': '\x1b[6~', # PGDN
}
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
# ANSI handling available through SetConsoleMode since Windows 10 v1511
# https://en.wikipedia.org/wiki/ANSI_escape_code#cite_note-win10th2-1
if platform.release() == '10' and int(platform.version().split('.')[2]) > 10586:
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
import ctypes.wintypes as wintypes
if not hasattr(wintypes, 'LPDWORD'): # PY2
wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD)
SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
mode = wintypes.DWORD()
GetConsoleMode(GetStdHandle(-11), ctypes.byref(mode))
if (mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING) == 0:
SetConsoleMode(GetStdHandle(-11), mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
self._saved_cm = mode
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
try:
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), self._saved_cm)
except AttributeError: # in case no _saved_cm
pass
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z is unichr(0) or z is unichr(0xe0):
try:
code = msvcrt.getwch()
if z is unichr(0):
return self.fncodes[code]
else:
return self.navcodes[code]
except KeyError:
pass
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{!r}] '.format(text))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{!r}] '.format(text))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = unichr(0x1d) # GS/CTRL+]
self.menu_character = unichr(0x14) # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'zZ': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
elif c in 'qQ':
self.stop() # Q -> exit app
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {!r}\n'.format(f))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program (alias {menu} Q)
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description='Miniterm - A simple terminal program for the serial port.')
parser.add_argument(
'port',
nargs='?',
help='serial port name ("-" to show port list)',
default=default_port)
parser.add_argument(
'baudrate',
nargs='?',
type=int,
help='set baud rate, default: %(default)s',
default=default_baudrate)
group = parser.add_argument_group('port settings')
group.add_argument(
'--parity',
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help='set parity, one of {N E O S M}, default: N',
default='N')
group.add_argument(
'--rtscts',
action='store_true',
help='enable RTS/CTS flow control (default off)',
default=False)
group.add_argument(
'--xonxoff',
action='store_true',
help='enable software flow control (default off)',
default=False)
group.add_argument(
'--rts',
type=int,
help='set initial RTS line state (possible values: 0, 1)',
default=default_rts)
group.add_argument(
'--dtr',
type=int,
help='set initial DTR line state (possible values: 0, 1)',
default=default_dtr)
group.add_argument(
'--non-exclusive',
dest='exclusive',
action='store_false',
help='disable locking for native ports',
default=True)
group.add_argument(
'--ask',
action='store_true',
help='ask again for port when open fails',
default=False)
group = parser.add_argument_group('data handling')
group.add_argument(
'-e', '--echo',
action='store_true',
help='enable local echo (default off)',
default=False)
group.add_argument(
'--encoding',
dest='serial_port_encoding',
metavar='CODEC',
help='set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s',
default='UTF-8')
group.add_argument(
'-f', '--filter',
action='append',
metavar='NAME',
help='add text transformation',
default=[])
group.add_argument(
'--eol',
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help='end of line mode',
default='CRLF')
group.add_argument(
'--raw',
action='store_true',
help='Do no apply any encodings/transformations',
default=False)
group = parser.add_argument_group('hotkeys')
group.add_argument(
'--exit-char',
type=int,
metavar='NUM',
help='Unicode of special character that is used to exit the application, default: %(default)s',
default=0x1d) # GS/CTRL+]
group.add_argument(
'--menu-char',
type=int,
metavar='NUM',
help='Unicode code of special character that is used to control miniterm (menu), default: %(default)s',
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group('diagnostics')
group.add_argument(
'-q', '--quiet',
action='store_true',
help='suppress non-error messages',
default=False)
group.add_argument(
'--develop',
action='store_true',
help='show Python traceback on error',
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
if isinstance(serial_instance, serial.Serial):
serial_instance.exclusive = args.exclusive
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {!r}: {}\n'.format(args.port, e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write('\n--- exit ---\n')
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
|
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import slugify
from django.db import models
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from Forum.settings import User
from Forum.lib import user_has_permission
# Create your models here.
class PerInstancePerm(models.Model):
"""Classes that inherit from this one can have per-instance permissions."""
def perms(self):
return self.permission_set.all();
def has_perm(self, codename):
try:
self.permission_set.get(codename=codename,content_type=ContentType.objects.get_for_model(PerInstancePerm))
return True
except ObjectDoesNotExist:
return False
def add_perm(self, codename, name):
try:
self.permission_set.get(codename=codename,name=name,content_type=ContentType.objects.get_for_model(PerInstancePerm))
return None
except ObjectDoesNotExist:
p = ForumPermission(instance=self,codename=codename,name=name,content_type=ContentType.objects.get_for_model(PerInstancePerm))
p.save()
return p
def remove_perm(self, codename):
try:
self.permission_set.get(codename=codename,content_type=ContentType.objects.get_for_model(PerInstancePerm)).delete()
return True
except ObjectDoesNotExist:
return False
def groups(self):
return self.group_set.all();
def add_group(self, name):
try:
self.group_set.get(name=name)
return None
except ObjectDoesNotExist:
g = ForumGroup(instance=self,name=name)
g.save()
return g
def remove_group(self, name):
try:
self.group_set.get(name=name)
return True
except ObjectDoesNotExist:
return False
def get_group(self, name):
try:
return self.group_set.get(name=name)
except ObjectDoesNotExist:
return None
class Meta:
abstract = False
class ForumGroup(Group):
instance = models.ForeignKey(PerInstancePerm,related_name="group_set")
class ForumPermission(Permission):
instance = models.ForeignKey(PerInstancePerm,related_name="permission_set")
class Forum(PerInstancePerm):
def _get_main_forum(self):
return self.subforum_set.get(local_id=0)
local_id = models.IntegerField(unique=True)
name = models.CharField(max_length=100) #name of the Forum instance (for listing and icons and stuff)
main_forum = property(_get_main_forum)
admin_permission = models.CharField(max_length=40, default="none")
# Votes config
allow_up_votes = models.BooleanField(default=True)
allow_down_votes = models.BooleanField(default=True)
positive_score_event = models.IntegerField(default=100)
negative_score_event = models.IntegerField(default=-100)
# Display config
posts_per_page = models.IntegerField(default=10)
threads_per_page = models.IntegerField(default=20)
def __unicode__(self):
return str(self.local_id) + "-" + self.name
def slug(self):
return '-'+slugify(self.name)
class Subforum(PerInstancePerm):
local_id = models.IntegerField()
name = models.CharField(max_length=100)
parent = models.ForeignKey('self', related_name="child_set", blank=True, null=True)
forum = models.ForeignKey(Forum)
view_permission = models.CharField(max_length=40, default="none")
mod_permission = models.CharField(max_length=40, default="none") # or user has permission "Forum.global_moderator"
create_thread_permission = models.CharField(max_length=40, default="none")
reply_thread_permission = models.CharField(max_length=40, default="none")
description = models.TextField(blank=True)
creator = models.ForeignKey(User, null=True)
creation_datetime = models.DateTimeField(auto_now_add=True, null=True)
# Perms stuff
def perms(self):
return Permission.objects.filter(codename__startswith=str(self.forum.id)+".");
def has_perm(self, codename):
try:
Permission.objects.get(codename=str(self.forum.id)+"."+codename,content_type=ContentType.objects.get_for_model(PerInstancePerm))
return True
except ObjectDoesNotExist:
return False
def add_perm(self, codename, name):
try:
Permission.objects.get(codename=str(self.forum.id)+"."+codename,name=name,content_type=ContentType.objects.get_for_model(PerInstancePerm))
return False
except ObjectDoesNotExist:
Permission(codename=str(self.forum.id)+"."+codename,name=name,content_type=ContentType.objects.get_for_model(PerInstancePerm)).save()
return True
def remove_perm(self, codename):
try:
Permission.objects.get(codename=str(self.forum.id)+"."+codename,content_type=ContentType.objects.get_for_model(PerInstancePerm)).delete()
return True
except ObjectDoesNotExist:
return False
class Meta:
unique_together = ('local_id','forum')
def __unicode__(self):
return str(self.local_id) + "-" + self.name
def slug(self):
return '-'+slugify(self.name)
def getPathAsList(self):
l = []
current_element = self
while current_element != self.forum.main_forum:
current_element = current_element.parent
l.append(current_element)
l.reverse()
return l
def getLastModifiedThread(self):
return self.thread_set.all().order_by('-last_publication_datetime').first()
def isVisited(self, user):
if self.thread_set.count() == 0 or not user.is_authenticated():
return True
for thread in self.thread_set.all():
if thread.isVisited(user):
return True
return False
class Thread(models.Model):
def _get_poll(self):
try:
return Poll.objects.get(thread=self)
except ObjectDoesNotExist:
return None
local_id = models.IntegerField()
name = models.CharField(max_length=100)
parent = models.ForeignKey(Subforum)
forum = models.ForeignKey(Forum)
creator = models.ForeignKey(User)
creation_datetime = models.DateTimeField(auto_now_add=True)
last_publication_datetime = models.DateTimeField()
hidden = models.BooleanField(default=False)
closed = models.BooleanField(default=False)
pinned = models.BooleanField(default=False)
visit_counter = models.IntegerField(default=0)
poll = property(_get_poll)
class Meta:
unique_together = ('local_id','forum')
def __unicode__(self):
return str(self.local_id) + "-" + self.name
def slug(self):
return '-'+slugify(self.name)
def getLastPublishedPost(self):
return self.post_set.all().order_by('-publication_datetime').first()
def isVisited(self, user):
if not user.is_authenticated():
return True
try:
dt = self.lastuservisit_set.get(user=user, thread=self).datetime
if self.last_publication_datetime <= dt:
return True
else:
return False
except ObjectDoesNotExist:
return False
def setPoll(self, question, option_list):
if self.poll:
self.poll.question = question;
for opt in self.poll.option_set.all():
opt.delete()
for opt in option_list:
PollOption(content=opt, poll=self.poll).save()
for vote in self.poll.pollvote_set.all():
vote.delete()
self.poll.save()
else:
Poll(question=question, thread=self).save()
for opt in option_list:
PollOption(content=opt, poll=self.poll).save()
self.save()
class Poll(models.Model):
question = models.CharField(max_length=200)
thread = models.ForeignKey(Thread, unique=True)
def getTotalVotes(self):
opt_list = self.option_set.all()
ret = 0
for opt in opt_list:
ret += opt.vote_count
return ret
def userCanVote(self, user):
try:
PollVote.objects.get(poll=self, user=user)
return False
except ObjectDoesNotExist:
return True
def vote(self, user, answer):
try:
poll_option = self.option_set.get(content=answer)
PollVote(user=user, poll=self).save()
poll_option.vote_count += 1
print poll_option.vote_count
poll_option.save()
except ObjectDoesNotExist:
raise Http404
return
def __unicode__(self):
return self.thread.__unicode__()+"-"+self.question
class PollOption(models.Model):
content = models.CharField(max_length=200)
poll = models.ForeignKey(Poll, related_name="option_set")
vote_count = models.IntegerField(default=0)
def percentage(self):
if self.poll.getTotalVotes() == 0:
return 0
return float(self.vote_count)/float(self.poll.getTotalVotes()) * 100
def __unicode__(self):
return self.content
class PollVote(models.Model):
user = models.ForeignKey(User)
poll = models.ForeignKey(Poll)
class Post(models.Model):
def _user_is_mod (self):
return user_has_permission(self.thread.parent.mod_permission, self.publisher)
def _user_is_admin (self):
return user_has_permission(self.forum.admin_permission, self.publisher)
def _get_upvotes(self):
return Vote.objects.filter(post=self, type="Up").count()
def _get_downvotes(self):
return Vote.objects.filter(post=self, type="Down").count()
local_id = models.IntegerField()
title = models.CharField(max_length=200, blank=True)
forum = models.ForeignKey(Forum)
thread = models.ForeignKey(Thread)
content = models.TextField(max_length=7000)
publisher = models.ForeignKey(User)
publication_datetime = models.DateTimeField(auto_now_add=True)
upvotes = property(_get_upvotes)
downvotes = property(_get_downvotes)
hidden = models.BooleanField(default=False)
score_event_sent = models.BooleanField(default=False)
user_is_mod = property(_user_is_mod)
user_is_admin = property(_user_is_admin)
def __unicode__(self):
return str(self.local_id) + "-" + self.title
def slug(self):
return '-'+slugify(self.title)
def score(self):
return self.upvotes-self.downvotes
class Meta:
unique_together = ('local_id','forum')
class PostReported(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User, related_name="+")
reason = models.CharField(max_length=500)
datetime = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ('post','user', 'datetime')
verbose_name_plural = "Posts Reported"
def __unicode__(self):
return self.post.__unicode__()
class PostEdited(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User, related_name="+")
datetime = models.DateTimeField(auto_now_add=True)
reason = models.CharField(max_length=500)
old_title = models.CharField(max_length=200, blank=True)
new_title = models.CharField(max_length=200, blank=True)
old_content = models.TextField()
new_content = models.TextField()
user_is_moderator = models.BooleanField(default=False)
user_is_administrator = models.BooleanField(default=False)
class Meta:
verbose_name_plural = "Posts Edited"
class LastUserVisit(models.Model):
thread = models.ForeignKey(Thread)
datetime = models.DateTimeField()
user = models.ForeignKey(User)
class Quote(models.Model):
user = models.ForeignKey(User)
post = models.ForeignKey(Post)
thread = models.ForeignKey(Thread)
class Meta:
unique_together = ('user','post')
Vote_types = (("Up","Up"),("Down","Down"))
class Vote(models.Model):
user = models.ForeignKey(User)
post = models.ForeignKey(Post)
type = models.CharField(max_length=4, choices=Vote_types)
class Meta:
unique_together = ('user','post')
|
|
## A script for finding every cox coefficient and pvalue for every mRNA in LIHC Tier 3 data downloaded Feb. 2015
##load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_lihc.txt'))
f.readline()
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical=[['','','']]
for i in data:
try:
if clinical[-1][0]==i[0]:
if i[9]=='Alive':
clinical[-1]=[i[0],int(i[10]),'Alive']
elif i[9]=='Dead':
clinical[-1]=[i[0],int(i[11]),'Dead']
else:
pass
else:
if i[9]=='Alive':
clinical.append([i[0],int(i[10]),'Alive'])
elif i[9]=='Dead':
clinical.append([i[0],int(i[11]),'Dead'])
else:
pass
except:
pass
## Removing the empty value.
clinical=clinical[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G1']=1
grade_dict['G2']=2
grade_dict['G3']=3
grade_dict['G4']=4
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','clinical','nationwidechildrens.org_clinical_patient_lihc.txt'))
f.readline()
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[0]]=[grade_dict[i[25]],sex_dict[i[5]],int(i[56])]
if i[13]=='Alive':
clinical4.append([i[0],int(i[71]),'Alive'])
elif i[13]=='Dead':
clinical4.append([i[0],int(i[64]),'Dead'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','FILE_SAMPLE_MAP.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## We only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
## A list of lists of genes is constructed, the order of gene lists is same as the clinical_and_files data
## Data structure: [[genes for patient 1], [genes for patient 2], ....]
genes=[]
for i in clinical_and_files:
temp=[]
for j in i[-1]:
f=open(os.path.join(BASE_DIR,'tcga_data','LIHC','mrna',j))
f.readline()
temp.append([[i.split('|')[1].split()[0],float(i.strip().split()[-1])] for i in f])
## In the case that the patient only contained 1 primary tumor mRNA file.
if len(temp)==1:
genes.append(temp[0])
## If the patient contained more than 1 primary tumor mRNA file
## this list comprehension will average the files for any number of files.
else:
values=[]
for k in temp:
values.append([kk[1] for kk in k])
genes.append(zip([z[0] for z in temp[0]],list(sum([np.array(kkk) for kkk in values])/float(len(temp)))))
## Only want genes that meet an expression cutoff
## A cutoff of 1 RSEM and no more than a fourth of the patients containing no expression was chosen
final_genes=[[]]*len(genes)
for i in range(len(genes[0])):
temp=[]
for j in genes:
temp.append(j[i])
count=0
for k in temp:
if k[1]==0:
count+=1
median=np.median([ii[1] for ii in temp])
if count<len(genes)/4 and median>1:
for index, kk in enumerate(temp):
final_genes[index]=final_genes[index]+[kk]
## This will write the final genes to a large (100-300 MB file) which could be useful for further analyses, this step can be skipped.
##f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','final_genes.txt'),'w')
##for i in final_genes:
## f.write(str(i))
## f.write('\n')
##f.close()
##Performing Cox regression on all of the genes in final_genes
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
coeffs=[]
pvalues=[]
genes=[] ##This list tracks the gene names
for i in range(len(final_genes[0])): ## These lists contain the clinical information and mRNA data in the same order.
kaplan=[]
genes.append(final_genes[0][i][0])
for k,j in zip(clinical_and_files,final_genes):
kaplan.append([k[1],k[2],k[3],k[4],k[5],j[i][1]])
data=[ii[-1] for ii in kaplan] ## Grabbing all the gene values for the current gene being analyzed
ro.globalenv['expression']=ro.FloatVector(data)
res=ro.r('round(qnorm((rank(expression, na.last="keep")-0.5)/sum(!is.na(expression))), digit=5)') ## Perform inverse normal transformation
inverse_norm=list(res) ## Convert robject to python list
## Prepare the variables for rpy2
ro.globalenv['gene']=ro.FloatVector(inverse_norm)
ro.globalenv['times']=ro.IntVector([ii[0] for ii in kaplan])
ro.globalenv['died']=ro.IntVector([death_dic[ii[1]] for ii in kaplan])
##grade1
grade1=[]
for ii in kaplan:
if ii[2]==1:
grade1.append(1)
else:
grade1.append(0)
##grade2
grade2=[]
for ii in kaplan:
if ii[2]==2:
grade2.append(1)
else:
grade2.append(0)
##grade3
grade3=[]
for ii in kaplan:
if ii[2]==3:
grade3.append(1)
else:
grade3.append(0)
##grade4
grade4=[]
for ii in kaplan:
if ii[2]==4:
grade4.append(1)
else:
grade4.append(0)
ro.globalenv['grade1']=ro.IntVector(grade1)
ro.globalenv['grade2']=ro.IntVector(grade2)
ro.globalenv['grade3']=ro.IntVector(grade3)
ro.globalenv['grade4']=ro.IntVector(grade4)
ro.globalenv['sex']=ro.IntVector([ii[3] for ii in kaplan])
ro.globalenv['age']=ro.IntVector([ii[4] for ii in kaplan])
res=ro.r('coxph(Surv(times,died) ~ gene + grade1 + grade2 + grade3 + grade4 + sex + age)') ## Perform Cox regression
# Parse the string of the result with python for the gene coefficient and pvalue
for entry in str(res).split('\n'):
try:
if entry.split()[0]=='gene':
coeff=entry.split()[1]
pvalue=entry.split()[-1]
break
except:
pass
coeffs.append(coeff)
pvalues.append(pvalue)
## This will write the results to a tab delimited file with gene name, cox coefficient, and pvalue.
f=open(os.path.join(BASE_DIR,'cox_regression','LIHC','coeffs_pvalues.txt'),'w')
for i,j,k in zip(genes,coeffs,pvalues):
f.write(i)
f.write('\t')
f.write(j)
f.write('\t')
f.write(k)
f.write('\n')
f.close()
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.kfac.fisher_blocks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.kfac.python.ops import fisher_blocks as fb
from tensorflow.contrib.kfac.python.ops import layer_collection as lc
from tensorflow.contrib.kfac.python.ops import utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import test
def _make_psd(dim):
"""Constructs a PSD matrix of the given dimension."""
mat = np.ones((dim, dim), dtype=np.float32)
mat[np.arange(dim), np.arange(dim)] = 2. + np.arange(dim)
return array_ops.constant(mat)
class UtilsTest(test.TestCase):
def testComputePiTracenorm(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
left_factor = array_ops.diag([1., 2., 0., 1.])
right_factor = array_ops.ones([2., 2.])
# pi is the sqrt of the left trace norm divided by the right trace norm
pi = fb.compute_pi_tracenorm(left_factor, right_factor)
pi_val = sess.run(pi)
self.assertEqual(1., pi_val)
class FullFBTest(test.TestCase):
def testFullFBInitSingleTensor(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.FullFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
self.assertAllEqual(params, block.tensors_to_compute_grads())
def testFullFBInitTensorTuple(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.FullFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
self.assertAllEqual(params, block.tensors_to_compute_grads())
def testInstantiateFactors(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.FullFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = (params[0]**2, math_ops.sqrt(params[1]))
block.instantiate_factors(grads, 0.5)
def testMultiplyInverseTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.FullFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = (params[0]**2, math_ops.sqrt(params[1]))
block.instantiate_factors((grads,), 0.5)
block._factor.instantiate_cov_variables()
block.register_inverse()
block._factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._factor.make_inverse_update_ops())
vector = array_ops.ones(3,) * 2
output = block.multiply_inverse(vector)
self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output))
def testMultiplyInverseNotTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = array_ops.constant([[1.], [2.]])
block = fb.FullFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = params**2
block.instantiate_factors((grads,), 0.5)
block._factor.instantiate_cov_variables()
block.register_inverse()
block._factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._factor.make_inverse_update_ops())
vector = array_ops.ones(2,) * 2
output = block.multiply_inverse(vector)
self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output))
def testMultiplyInverseAgainstExplicit(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.FullFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = (array_ops.constant([2., 3.]), array_ops.constant(4.))
damping = 0.5
block.instantiate_factors((grads,), damping)
block._factor.instantiate_cov_variables()
block.register_inverse()
block._factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(state_ops.assign(block._factor._cov, _make_psd(3)))
sess.run(block._factor.make_inverse_update_ops())
v_flat = np.array([4., 5., 6.], dtype=np.float32)
vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
output = block.multiply_inverse(vector)
output_flat = sess.run(utils.tensors_to_column(output)).ravel()
full = sess.run(block.full_fisher_block())
explicit = np.dot(np.linalg.inv(full + damping * np.eye(3)), v_flat)
self.assertAllClose(output_flat, explicit)
class NaiveDiagonalFBTest(test.TestCase):
def testNaiveDiagonalFBInitSingleTensor(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.NaiveDiagonalFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
self.assertAllEqual(params, block.tensors_to_compute_grads())
def testNaiveDiagonalFBInitTensorTuple(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.NaiveDiagonalFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
self.assertAllEqual(params, block.tensors_to_compute_grads())
def testInstantiateFactors(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.NaiveDiagonalFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = (params[0]**2, math_ops.sqrt(params[1]))
block.instantiate_factors(grads, 0.5)
def testMultiplyInverseTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.NaiveDiagonalFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = (params[0]**2, math_ops.sqrt(params[1]))
block.instantiate_factors((grads,), 0.5)
block._factor.instantiate_cov_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._factor.make_inverse_update_ops())
vector = array_ops.ones(3,) * 2
output = block.multiply_inverse(vector)
self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output))
def testMultiplyInverseNotTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = array_ops.constant([[1.], [2.]])
block = fb.NaiveDiagonalFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = params**2
block.instantiate_factors((grads,), 0.5)
block._factor.instantiate_cov_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._factor.make_inverse_update_ops())
vector = array_ops.ones(2,) * 2
output = block.multiply_inverse(vector)
self.assertAllClose(sess.run(vector * 2 / 3.), sess.run(output))
def testMultiplyInverseAgainstExplicit(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = (array_ops.constant([1., 2.]), array_ops.constant(3.))
block = fb.NaiveDiagonalFB(lc.LayerCollection(), params)
block.register_additional_minibatch(32)
grads = (params[0]**2, math_ops.sqrt(params[1]))
damping = 0.5
block.instantiate_factors((grads,), damping)
block._factor.instantiate_cov_variables()
cov = array_ops.reshape(array_ops.constant([2., 3., 4.]), [-1, 1])
sess.run(state_ops.assign(block._factor._cov, cov))
sess.run(block._factor.make_inverse_update_ops())
v_flat = np.array([4., 5., 6.], dtype=np.float32)
vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
output = block.multiply_inverse(vector)
output_flat = sess.run(utils.tensors_to_column(output)).ravel()
full = sess.run(block.full_fisher_block())
explicit = np.dot(np.linalg.inv(full + damping * np.eye(3)), v_flat)
self.assertAllClose(output_flat, explicit)
class FullyConnectedDiagonalFBTest(test.TestCase):
def setUp(self):
super(FullyConnectedDiagonalFBTest, self).setUp()
self.batch_size = 4
self.input_size = 6
self.output_size = 3
self.inputs = np.random.randn(self.batch_size, self.input_size).astype(
np.float32)
self.outputs = np.zeros([self.batch_size, self.output_size]).astype(
np.float32)
self.output_grads = np.random.randn(self.batch_size,
self.output_size).astype(np.float32)
self.w = np.random.randn(self.input_size, self.output_size).astype(
np.float32)
self.b = np.random.randn(self.output_size).astype(np.float32)
def fisherApprox(self, has_bias=False):
"""Fisher approximation using default inputs."""
if has_bias:
inputs = np.concatenate(
[self.inputs, np.ones([self.batch_size, 1])], axis=1)
else:
inputs = self.inputs
return self.buildDiagonalFisherApproximation(inputs, self.output_grads)
def buildDiagonalFisherApproximation(self, inputs, output_grads):
"""Builds explicit diagonal Fisher approximation.
Fisher's diagonal is (d loss / d w)'s elements squared for
d/dw = E[outer(input, output_grad)]
where the expectation is taken over examples.
Args:
inputs: np.array of shape [batch_size, input_size].
output_grads: np.array of shape [batch_size, output_size].
Returns:
Diagonal np.array of shape [num_params, num_params] for num_params =
input_size * output_size.
"""
batch_size = inputs.shape[0]
assert output_grads.shape[0] == batch_size
input_size = inputs.shape[1]
output_size = output_grads.shape[1]
fisher_diag = np.zeros((input_size, output_size))
for i in range(batch_size):
fisher_diag += np.square(np.outer(inputs[i], output_grads[i]))
return np.diag(fisher_diag.flatten()) / batch_size
def testMultiply(self):
result, _ = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs],
[self.output_grads])
# Construct Fisher-vector product.
expected_result = self.fisherApprox().dot(self.w.flatten())
expected_result = expected_result.reshape(
[self.input_size, self.output_size])
self.assertAllClose(expected_result, result)
def testMultiplyInverse(self):
_, result = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs],
[self.output_grads])
# Construct inverse Fisher-vector product.
expected_result = np.linalg.inv(self.fisherApprox()).dot(self.w.flatten())
expected_result = expected_result.reshape(
[self.input_size, self.output_size])
self.assertAllClose(expected_result, result)
def testRegisterAdditionalMinibatch(self):
"""Ensure 1 big minibatch and 2 small minibatches are equivalent."""
multiply_result_big, multiply_inverse_result_big = self.runFisherBlockOps(
self.w, [self.inputs], [self.outputs], [self.output_grads])
multiply_result_small, multiply_inverse_result_small = (
self.runFisherBlockOps(self.w, np.split(self.inputs, 2),
np.split(self.outputs, 2),
np.split(self.output_grads, 2)))
self.assertAllClose(multiply_result_big, multiply_result_small)
self.assertAllClose(multiply_inverse_result_big,
multiply_inverse_result_small)
def testMultiplyHasBias(self):
result, _ = self.runFisherBlockOps((self.w, self.b), [self.inputs],
[self.outputs], [self.output_grads])
expected_result = self.fisherApprox(True).dot(
np.concatenate([self.w.flatten(), self.b.flatten()]))
expected_result = expected_result.reshape(
[self.input_size + 1, self.output_size])
expected_result = (expected_result[:-1], expected_result[-1])
self.assertEqual(len(result), 2)
self.assertAllClose(expected_result[0], result[0])
self.assertAllClose(expected_result[1], result[1])
def runFisherBlockOps(self, params, inputs, outputs, output_grads):
"""Run Ops guaranteed by FisherBlock interface.
Args:
params: Tensor or 2-tuple of Tensors. Represents weights or weights and
bias of this layer.
inputs: list of Tensors of shape [batch_size, input_size]. Inputs to
layer.
outputs: list of Tensors of shape [batch_size, output_size].
Preactivations produced by layer.
output_grads: list of Tensors of shape [batch_size, output_size].
Gradient of loss with respect to 'outputs'.
Returns:
multiply_result: Result of FisherBlock.multiply(params)
multiply_inverse_result: Result of FisherBlock.multiply_inverse(params)
"""
with ops.Graph().as_default(), self.test_session() as sess:
inputs = as_tensors(inputs)
outputs = as_tensors(outputs)
output_grads = as_tensors(output_grads)
params = as_tensors(params)
block = fb.FullyConnectedDiagonalFB(
lc.LayerCollection(), has_bias=isinstance(params, (tuple, list)))
for (i, o) in zip(inputs, outputs):
block.register_additional_minibatch(i, o)
block.instantiate_factors((output_grads,), damping=0.0)
block._factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
sess.run(block._factor.make_covariance_update_op(0.0))
multiply_result = sess.run(block.multiply(params))
multiply_inverse_result = sess.run(block.multiply_inverse(params))
return multiply_result, multiply_inverse_result
class EmbeddingKFACFBTest(test.TestCase):
def testInstantiateFactors(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
# Create a Fisher Block.
vocab_size = 5
block = fb.EmbeddingKFACFB(lc.LayerCollection(), vocab_size)
# Add some examples.
inputs = array_ops.constant([[0, 1], [1, 2], [2, 3]])
outputs = array_ops.constant([[0.], [1.], [2.]])
block.register_additional_minibatch(inputs, outputs)
# Instantiate factor's variables. Ensure it doesn't fail.
grads = outputs**2.
damping = array_ops.constant(0.)
block.instantiate_factors(((grads,),), damping)
def testMultiplyInverse(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
# Create a Fisher Block.
vocab_size = 5
block = fb.EmbeddingKFACFB(lc.LayerCollection(), vocab_size)
# Add some examples.
inputs = array_ops.constant([[0, 1], [1, 2], [2, 3]])
outputs = array_ops.constant([[0.], [1.], [2.]])
block.register_additional_minibatch(inputs, outputs)
# Instantiate factor's variables. Ensure it doesn't fail.
grads = outputs**2.
damping = array_ops.constant(0.)
block.instantiate_factors(((grads,),), damping)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Create a sparse update.
indices = array_ops.constant([1, 3, 4])
values = array_ops.constant([[1.], [1.], [1.]])
sparse_vector = ops.IndexedSlices(
values, indices, dense_shape=[vocab_size, 1])
dense_vector = array_ops.reshape([0., 1., 0., 1., 1.], [vocab_size, 1])
# Compare Fisher-vector product against explicit result.
result = block.multiply_inverse(sparse_vector)
expected_result = linalg_ops.matrix_solve(block.full_fisher_block(),
dense_vector)
sess.run(tf_variables.global_variables_initializer())
self.assertAlmostEqual(
sess.run(expected_result[1]), sess.run(result.values[0]))
self.assertAlmostEqual(
sess.run(expected_result[3]), sess.run(result.values[1]))
self.assertAlmostEqual(
sess.run(expected_result[4]), sess.run(result.values[2]))
class FullyConnectedKFACBasicFBTest(test.TestCase):
def testFullyConnectedKFACBasicFBInit(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
inputs = array_ops.constant([1., 2.])
outputs = array_ops.constant([3., 4.])
block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection())
block.register_additional_minibatch(inputs, outputs)
self.assertAllEqual([outputs], block.tensors_to_compute_grads())
def testInstantiateFactorsHasBias(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
inputs = array_ops.constant([[1., 2.], [3., 4.]])
outputs = array_ops.constant([[3., 4.], [5., 6.]])
block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=True)
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
def testInstantiateFactorsNoBias(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
inputs = array_ops.constant([[1., 2.], [3., 4.]])
outputs = array_ops.constant([[3., 4.], [5., 6.]])
block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False)
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
def testMultiplyInverseTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
inputs = array_ops.constant([[1., 2., 3.], [3., 4., 5.], [5., 6., 7.]])
outputs = array_ops.constant([[3., 4.], [5., 6.]])
block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False)
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = (
np.arange(2, 6).reshape(2, 2).astype(np.float32), #
np.arange(1, 3).reshape(2, 1).astype(np.float32))
output = block.multiply_inverse((array_ops.constant(vector[0]),
array_ops.constant(vector[1])))
output = sess.run(output)
self.assertAllClose([[0.686291, 1.029437], [1.372583, 1.715729]],
output[0])
self.assertAllClose([0.343146, 0.686291], output[1])
def testMultiplyInverseNotTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
inputs = array_ops.constant([[1., 2.], [3., 4.]])
outputs = array_ops.constant([[3., 4.], [5., 6.]])
block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False)
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = np.arange(2, 6).reshape(2, 2).astype(np.float32)
output = block.multiply_inverse(array_ops.constant(vector))
self.assertAllClose([[0.686291, 1.029437], [1.372583, 1.715729]],
sess.run(output))
def testMultiplyInverseAgainstExplicit(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
input_dim, output_dim = 3, 2
inputs = array_ops.zeros([32, input_dim])
outputs = array_ops.zeros([32, output_dim])
params = array_ops.zeros([input_dim, output_dim])
block = fb.FullyConnectedKFACBasicFB(lc.LayerCollection(), has_bias=False)
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
damping = 0. # This test is only valid without damping.
block.instantiate_factors(((grads,),), damping)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
sess.run(state_ops.assign(block._input_factor._cov, _make_psd(3)))
sess.run(state_ops.assign(block._output_factor._cov, _make_psd(2)))
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
v_flat = np.arange(6, dtype=np.float32)
vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
output = block.multiply_inverse(vector)
output_flat = sess.run(utils.tensors_to_column(output)).ravel()
full = sess.run(block.full_fisher_block())
explicit = np.dot(np.linalg.inv(full + damping * np.eye(6)), v_flat)
self.assertAllClose(output_flat, explicit)
class ConvDiagonalFBTest(test.TestCase):
def setUp(self):
super(ConvDiagonalFBTest, self).setUp()
self.batch_size = 2
self.height = 8
self.width = 4
self.input_channels = 6
self.output_channels = 3
self.kernel_size = 1
self.inputs = np.random.randn(self.batch_size, self.height, self.width,
self.input_channels).astype(np.float32)
self.outputs = np.zeros(
[self.batch_size, self.height, self.width,
self.output_channels]).astype(np.float32)
self.output_grads = np.random.randn(
self.batch_size, self.height, self.width, self.output_channels).astype(
np.float32)
self.w = np.random.randn(self.kernel_size, self.kernel_size,
self.input_channels, self.output_channels).astype(
np.float32)
self.b = np.random.randn(self.output_channels).astype(np.float32)
def fisherApprox(self, has_bias=False):
"""Fisher approximation using default inputs."""
if has_bias:
inputs = np.concatenate(
[self.inputs,
np.ones([self.batch_size, self.height, self.width, 1])],
axis=-1)
else:
inputs = self.inputs
return self.buildDiagonalFisherApproximation(inputs, self.output_grads,
self.kernel_size)
def buildDiagonalFisherApproximation(self, inputs, output_grads, kernel_size):
r"""Builds explicit diagonal Fisher approximation.
Fisher's diagonal is (d loss / d w)'s elements squared for
d/dw = E[\sum_{loc} outer(input_{loc}, output_grad_{loc})]
where the expectation is taken over examples and the sum over (x, y)
locations upon which the convolution is applied.
Args:
inputs: np.array of shape [batch_size, height, width, input_channels].
output_grads: np.array of shape [batch_size, height, width,
output_channels].
kernel_size: int. height and width of kernel.
Returns:
Diagonal np.array of shape [num_params, num_params] for num_params =
kernel_size^2 * input_channels * output_channels.
"""
batch_size, height, width, input_channels = inputs.shape
assert output_grads.shape[0] == batch_size
assert output_grads.shape[1] == height
assert output_grads.shape[2] == width
output_channels = output_grads.shape[3]
# If kernel_size == 1, then we don't need to worry about capturing context
# around the pixel upon which a convolution is applied. This makes testing
# easier.
assert kernel_size == 1, "kernel_size != 1 isn't supported."
num_locations = height * width
inputs = np.reshape(inputs, [batch_size, num_locations, input_channels])
output_grads = np.reshape(output_grads,
[batch_size, num_locations, output_channels])
fisher_diag = np.zeros((input_channels, output_channels))
for i in range(batch_size):
# Each example's approximation is a square(sum-of-outer-products).
example_fisher_diag = np.zeros((input_channels, output_channels))
for j in range(num_locations):
example_fisher_diag += np.outer(inputs[i, j], output_grads[i, j])
fisher_diag += np.square(example_fisher_diag)
# Normalize by batch_size (not num_locations).
return np.diag(fisher_diag.flatten()) / batch_size
def testMultiply(self):
result, _ = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs],
[self.output_grads])
# Construct Fisher-vector product.
expected_result = self.fisherApprox().dot(self.w.flatten())
expected_result = expected_result.reshape([
self.kernel_size, self.kernel_size, self.input_channels,
self.output_channels
])
self.assertAllClose(expected_result, result)
def testMultiplyInverse(self):
_, result = self.runFisherBlockOps(self.w, [self.inputs], [self.outputs],
[self.output_grads])
# Construct inverse Fisher-vector product.
expected_result = np.linalg.inv(self.fisherApprox()).dot(self.w.flatten())
expected_result = expected_result.reshape([
self.kernel_size, self.kernel_size, self.input_channels,
self.output_channels
])
self.assertAllClose(expected_result, result, atol=1e-3)
def testRegisterAdditionalMinibatch(self):
"""Ensure 1 big minibatch and 2 small minibatches are equivalent."""
multiply_result_big, multiply_inverse_result_big = self.runFisherBlockOps(
self.w, [self.inputs], [self.outputs], [self.output_grads])
multiply_result_small, multiply_inverse_result_small = (
self.runFisherBlockOps(self.w, np.split(self.inputs, 2),
np.split(self.outputs, 2),
np.split(self.output_grads, 2)))
self.assertAllClose(multiply_result_big, multiply_result_small)
self.assertAllClose(multiply_inverse_result_big,
multiply_inverse_result_small)
def testMultiplyHasBias(self):
result, _ = self.runFisherBlockOps((self.w, self.b), [self.inputs],
[self.outputs], [self.output_grads])
# Clone 'b' along 'input_channels' dimension.
b_filter = np.tile(
np.reshape(self.b, [1, 1, 1, self.output_channels]),
[self.kernel_size, self.kernel_size, 1, 1])
params = np.concatenate([self.w, b_filter], axis=2)
expected_result = self.fisherApprox(True).dot(params.flatten())
# Extract 'b' from concatenated parameters.
expected_result = expected_result.reshape([
self.kernel_size, self.kernel_size, self.input_channels + 1,
self.output_channels
])
expected_result = (expected_result[:, :, 0:-1, :],
np.reshape(expected_result[:, :, -1, :],
[self.output_channels]))
self.assertEqual(len(result), 2)
self.assertAllClose(expected_result[0], result[0])
self.assertAllClose(expected_result[1], result[1])
def runFisherBlockOps(self, params, inputs, outputs, output_grads):
"""Run Ops guaranteed by FisherBlock interface.
Args:
params: Tensor or 2-tuple of Tensors. Represents weights or weights and
bias of this layer.
inputs: list of Tensors of shape [batch_size, input_size]. Inputs to
layer.
outputs: list of Tensors of shape [batch_size, output_size].
Preactivations produced by layer.
output_grads: list of Tensors of shape [batch_size, output_size].
Gradient of loss with respect to 'outputs'.
Returns:
multiply_result: Result of FisherBlock.multiply(params)
multiply_inverse_result: Result of FisherBlock.multiply_inverse(params)
"""
with ops.Graph().as_default(), self.test_session() as sess:
inputs = as_tensors(inputs)
outputs = as_tensors(outputs)
output_grads = as_tensors(output_grads)
params = as_tensors(params)
block = fb.ConvDiagonalFB(
lc.LayerCollection(), params, strides=[1, 1, 1, 1], padding='SAME')
for (i, o) in zip(inputs, outputs):
block.register_additional_minibatch(i, o)
block.instantiate_factors((output_grads,), damping=0.0)
block._factor.instantiate_cov_variables()
sess.run(tf_variables.global_variables_initializer())
sess.run(block._factor.make_covariance_update_op(0.0))
multiply_result = sess.run(block.multiply(params))
multiply_inverse_result = sess.run(block.multiply_inverse(params))
return multiply_result, multiply_inverse_result
class DepthwiseConvKFCBasicFBTest(test.TestCase):
def testInstantiateFactors(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
params = random_ops.random_normal((3, 3, 8, 2))
inputs = random_ops.random_normal((32, 5, 5, 8))
outputs = random_ops.random_normal((32, 5, 5, 16))
layer_collection = lc.LayerCollection()
block = fb.DepthwiseConvKFCBasicFB(
layer_collection, params=params, strides=[1, 1, 1, 1], padding='SAME')
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(([grads],), 0.5)
def testMultiplyInverse(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = random_ops.random_normal((3, 3, 8, 2))
inputs = random_ops.random_normal((32, 5, 5, 8))
outputs = random_ops.random_normal((32, 5, 5, 16))
layer_collection = lc.LayerCollection()
block = fb.DepthwiseConvKFCBasicFB(
layer_collection, params=params, strides=[1, 1, 1, 1], padding='SAME')
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(([grads],), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Ensure inverse update op doesn't crash.
sess.run(tf_variables.global_variables_initializer())
sess.run([
factor.make_inverse_update_ops()
for factor in layer_collection.get_factors()
])
# Ensure inverse-vector multiply doesn't crash.
output = block.multiply_inverse(params)
sess.run(output)
# Ensure same shape.
self.assertAllEqual(output.shape, params.shape)
class ConvKFCBasicFBTest(test.TestCase):
def _testConvKFCBasicFBInitParams(self, params):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
if isinstance(params, (list, tuple)):
params = [array_ops.constant(param) for param in params]
else:
params = array_ops.constant(params)
inputs = random_ops.random_normal((2, 2, 2))
outputs = random_ops.random_normal((2, 2, 2))
block = fb.ConvKFCBasicFB(
lc.LayerCollection(), params=params, padding='SAME')
block.register_additional_minibatch(inputs, outputs)
self.assertAllEqual([outputs], block.tensors_to_compute_grads())
def testConvKFCBasicFBInitParamsParamsTuple(self):
self._testConvKFCBasicFBInitParams([np.ones([1, 2, 2]), np.ones([2])])
def testConvKFCBasicFBInitParamsParamsSingle(self):
self._testConvKFCBasicFBInitParams([np.ones([1, 2, 2])])
def testMultiplyInverseTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = random_ops.random_normal((2, 2, 2, 2))
inputs = random_ops.random_normal((2, 2, 2, 2))
outputs = random_ops.random_normal((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(
lc.LayerCollection(), params=params, padding='SAME')
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = (np.arange(1, 15).reshape(7, 2).astype(np.float32),
np.arange(2, 4).reshape(2, 1).astype(np.float32))
output = block.multiply_inverse((array_ops.constant(vector[0]),
array_ops.constant(vector[1])))
output = sess.run(output)
self.assertAllClose([0.136455, 0.27291], output[0][0])
self.assertAllClose([0.27291, 0.409365], output[1])
def testMultiplyInverseNotTuple(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = random_ops.random_normal((2, 2, 2, 2))
inputs = random_ops.random_normal((2, 2, 2, 2))
outputs = random_ops.random_normal((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(
lc.LayerCollection(), params=params, padding='SAME')
block.register_additional_minibatch(inputs, outputs)
self.assertFalse(block._has_bias)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = np.arange(1, 17).reshape(8, 2).astype(np.float32)
output = block.multiply_inverse(array_ops.constant(vector))
self.assertAllClose([0.136455, 0.27291], sess.run(output)[0])
def testMultiplyInverseNotTupleWithBias(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = [random_ops.random_normal((2, 2, 2, 2))]
inputs = random_ops.random_normal((2, 2, 2, 2))
outputs = random_ops.random_normal((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(
lc.LayerCollection(), params=params, padding='SAME')
block.register_additional_minibatch(inputs, outputs)
self.assertTrue(block._has_bias)
grads = outputs**2
block.instantiate_factors(((grads,),), 0.5)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
# Make sure our inverse is something other than the identity.
sess.run(tf_variables.global_variables_initializer())
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
vector = np.arange(1, 19).reshape(9, 2).astype(np.float32)
output = block.multiply_inverse(array_ops.constant(vector))
self.assertAllClose([0.136455, 0.27291], sess.run(output)[0])
def testMultiplyInverseAgainstExplicit(self):
with ops.Graph().as_default(), self.test_session() as sess:
random_seed.set_random_seed(200)
params = array_ops.zeros((2, 2, 2, 2))
inputs = array_ops.zeros((2, 2, 2, 2))
outputs = array_ops.zeros((2, 2, 2, 2))
block = fb.ConvKFCBasicFB(
lc.LayerCollection(), params=params, padding='SAME')
block.register_additional_minibatch(inputs, outputs)
grads = outputs**2
damping = 0. # This test is only valid without damping.
block.instantiate_factors(((grads,),), damping)
block._input_factor.instantiate_cov_variables()
block._output_factor.instantiate_cov_variables()
block.register_inverse()
block._input_factor.instantiate_inv_variables()
block._output_factor.instantiate_inv_variables()
sess.run(state_ops.assign(block._input_factor._cov, _make_psd(8)))
sess.run(state_ops.assign(block._output_factor._cov, _make_psd(2)))
sess.run(block._input_factor.make_inverse_update_ops())
sess.run(block._output_factor.make_inverse_update_ops())
v_flat = np.arange(16, dtype=np.float32)
vector = utils.column_to_tensors(params, array_ops.constant(v_flat))
output = block.multiply_inverse(vector)
output_flat = sess.run(utils.tensors_to_column(output)).ravel()
full = sess.run(block.full_fisher_block())
explicit = np.dot(np.linalg.inv(full + damping * np.eye(16)), v_flat)
self.assertAllClose(output_flat, explicit)
class FullyConnectedSeriesFBTest(test.TestCase):
def testFullyConnectedSeriesFBInit(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
inputs = array_ops.constant([1., 2.])
outputs = array_ops.constant([3., 4.])
block = fb.FullyConnectedSeriesFB(lc.LayerCollection())
block.register_additional_minibatch([inputs], [outputs])
self.assertAllEqual([[outputs]], block.tensors_to_compute_grads())
def testInstantiateFactorsHasBias(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
inputs = array_ops.constant([[1., 2.], [3., 4.]])
outputs = array_ops.constant([[3., 4.], [5., 6.]])
block = fb.FullyConnectedSeriesFB(
lc.LayerCollection(),
has_bias=True)
block.register_additional_minibatch([inputs], [outputs])
grads = outputs**2
block.instantiate_factors((((grads,),),), 0.5)
def testInstantiateFactorsNoBias(self):
with ops.Graph().as_default():
random_seed.set_random_seed(200)
inputs = array_ops.constant([[1., 2.], [3., 4.]])
outputs = array_ops.constant([[3., 4.], [5., 6.]])
block = fb.FullyConnectedSeriesFB(
lc.LayerCollection(),
has_bias=False)
block.register_additional_minibatch([inputs], [outputs])
grads = outputs**2
block.instantiate_factors((((grads,),),), 0.5)
def as_tensors(tensor_or_tuple):
"""Converts a potentially nested tuple of np.array to Tensors."""
if isinstance(tensor_or_tuple, (tuple, list)):
return tuple(as_tensors(t) for t in tensor_or_tuple)
return ops.convert_to_tensor(tensor_or_tuple)
if __name__ == '__main__':
test.main()
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class NotificationList(ListResource):
def __init__(self, version, chat_service_sid):
"""
Initialize the NotificationList
:param Version version: Version that contains the resource
:param chat_service_sid: The unique string that identifies the resource
:returns: twilio.rest.conversations.v1.service.configuration.notification.NotificationList
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationList
"""
super(NotificationList, self).__init__(version)
# Path Solution
self._solution = {'chat_service_sid': chat_service_sid, }
def get(self):
"""
Constructs a NotificationContext
:returns: twilio.rest.conversations.v1.service.configuration.notification.NotificationContext
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationContext
"""
return NotificationContext(self._version, chat_service_sid=self._solution['chat_service_sid'], )
def __call__(self):
"""
Constructs a NotificationContext
:returns: twilio.rest.conversations.v1.service.configuration.notification.NotificationContext
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationContext
"""
return NotificationContext(self._version, chat_service_sid=self._solution['chat_service_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.NotificationList>'
class NotificationPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the NotificationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param chat_service_sid: The unique string that identifies the resource
:returns: twilio.rest.conversations.v1.service.configuration.notification.NotificationPage
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationPage
"""
super(NotificationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of NotificationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
"""
return NotificationInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.NotificationPage>'
class NotificationContext(InstanceContext):
def __init__(self, version, chat_service_sid):
"""
Initialize the NotificationContext
:param Version version: Version that contains the resource
:param chat_service_sid: The SID of the Conversation Service that the Configuration applies to.
:returns: twilio.rest.conversations.v1.service.configuration.notification.NotificationContext
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationContext
"""
super(NotificationContext, self).__init__(version)
# Path Solution
self._solution = {'chat_service_sid': chat_service_sid, }
self._uri = '/Services/{chat_service_sid}/Configuration/Notifications'.format(**self._solution)
def update(self, log_enabled=values.unset, new_message_enabled=values.unset,
new_message_template=values.unset, new_message_sound=values.unset,
new_message_badge_count_enabled=values.unset,
added_to_conversation_enabled=values.unset,
added_to_conversation_template=values.unset,
added_to_conversation_sound=values.unset,
removed_from_conversation_enabled=values.unset,
removed_from_conversation_template=values.unset,
removed_from_conversation_sound=values.unset,
new_message_with_media_enabled=values.unset,
new_message_with_media_template=values.unset):
"""
Update the NotificationInstance
:param bool log_enabled: Weather the notification logging is enabled.
:param bool new_message_enabled: Whether to send a notification when a new message is added to a conversation.
:param unicode new_message_template: The template to use to create the notification text displayed when a new message is added to a conversation.
:param unicode new_message_sound: The name of the sound to play when a new message is added to a conversation.
:param bool new_message_badge_count_enabled: Whether the new message badge is enabled.
:param bool added_to_conversation_enabled: Whether to send a notification when a participant is added to a conversation.
:param unicode added_to_conversation_template: The template to use to create the notification text displayed when a participant is added to a conversation.
:param unicode added_to_conversation_sound: The name of the sound to play when a participant is added to a conversation.
:param bool removed_from_conversation_enabled: Whether to send a notification to a user when they are removed from a conversation.
:param unicode removed_from_conversation_template: The template to use to create the notification text displayed to a user when they are removed.
:param unicode removed_from_conversation_sound: The name of the sound to play to a user when they are removed from a conversation.
:param bool new_message_with_media_enabled: Whether to send a notification when a new message with media/file attachments is added to a conversation.
:param unicode new_message_with_media_template: The template to use to create the notification text displayed when a new message with media/file attachments is added to a conversation.
:returns: The updated NotificationInstance
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
"""
data = values.of({
'LogEnabled': log_enabled,
'NewMessage.Enabled': new_message_enabled,
'NewMessage.Template': new_message_template,
'NewMessage.Sound': new_message_sound,
'NewMessage.BadgeCountEnabled': new_message_badge_count_enabled,
'AddedToConversation.Enabled': added_to_conversation_enabled,
'AddedToConversation.Template': added_to_conversation_template,
'AddedToConversation.Sound': added_to_conversation_sound,
'RemovedFromConversation.Enabled': removed_from_conversation_enabled,
'RemovedFromConversation.Template': removed_from_conversation_template,
'RemovedFromConversation.Sound': removed_from_conversation_sound,
'NewMessage.WithMedia.Enabled': new_message_with_media_enabled,
'NewMessage.WithMedia.Template': new_message_with_media_template,
})
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return NotificationInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
)
def fetch(self):
"""
Fetch the NotificationInstance
:returns: The fetched NotificationInstance
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return NotificationInstance(
self._version,
payload,
chat_service_sid=self._solution['chat_service_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.NotificationContext {}>'.format(context)
class NotificationInstance(InstanceResource):
def __init__(self, version, payload, chat_service_sid):
"""
Initialize the NotificationInstance
:returns: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
"""
super(NotificationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'chat_service_sid': payload.get('chat_service_sid'),
'new_message': payload.get('new_message'),
'added_to_conversation': payload.get('added_to_conversation'),
'removed_from_conversation': payload.get('removed_from_conversation'),
'log_enabled': payload.get('log_enabled'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'chat_service_sid': chat_service_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: NotificationContext for this NotificationInstance
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationContext
"""
if self._context is None:
self._context = NotificationContext(
self._version,
chat_service_sid=self._solution['chat_service_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique ID of the Account responsible for this configuration.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def chat_service_sid(self):
"""
:returns: The SID of the Conversation Service that the Configuration applies to.
:rtype: unicode
"""
return self._properties['chat_service_sid']
@property
def new_message(self):
"""
:returns: The Push Notification configuration for New Messages.
:rtype: dict
"""
return self._properties['new_message']
@property
def added_to_conversation(self):
"""
:returns: The Push Notification configuration for being added to a Conversation.
:rtype: dict
"""
return self._properties['added_to_conversation']
@property
def removed_from_conversation(self):
"""
:returns: The Push Notification configuration for being removed from a Conversation.
:rtype: dict
"""
return self._properties['removed_from_conversation']
@property
def log_enabled(self):
"""
:returns: Weather the notification logging is enabled.
:rtype: bool
"""
return self._properties['log_enabled']
@property
def url(self):
"""
:returns: An absolute URL for this configuration.
:rtype: unicode
"""
return self._properties['url']
def update(self, log_enabled=values.unset, new_message_enabled=values.unset,
new_message_template=values.unset, new_message_sound=values.unset,
new_message_badge_count_enabled=values.unset,
added_to_conversation_enabled=values.unset,
added_to_conversation_template=values.unset,
added_to_conversation_sound=values.unset,
removed_from_conversation_enabled=values.unset,
removed_from_conversation_template=values.unset,
removed_from_conversation_sound=values.unset,
new_message_with_media_enabled=values.unset,
new_message_with_media_template=values.unset):
"""
Update the NotificationInstance
:param bool log_enabled: Weather the notification logging is enabled.
:param bool new_message_enabled: Whether to send a notification when a new message is added to a conversation.
:param unicode new_message_template: The template to use to create the notification text displayed when a new message is added to a conversation.
:param unicode new_message_sound: The name of the sound to play when a new message is added to a conversation.
:param bool new_message_badge_count_enabled: Whether the new message badge is enabled.
:param bool added_to_conversation_enabled: Whether to send a notification when a participant is added to a conversation.
:param unicode added_to_conversation_template: The template to use to create the notification text displayed when a participant is added to a conversation.
:param unicode added_to_conversation_sound: The name of the sound to play when a participant is added to a conversation.
:param bool removed_from_conversation_enabled: Whether to send a notification to a user when they are removed from a conversation.
:param unicode removed_from_conversation_template: The template to use to create the notification text displayed to a user when they are removed.
:param unicode removed_from_conversation_sound: The name of the sound to play to a user when they are removed from a conversation.
:param bool new_message_with_media_enabled: Whether to send a notification when a new message with media/file attachments is added to a conversation.
:param unicode new_message_with_media_template: The template to use to create the notification text displayed when a new message with media/file attachments is added to a conversation.
:returns: The updated NotificationInstance
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
"""
return self._proxy.update(
log_enabled=log_enabled,
new_message_enabled=new_message_enabled,
new_message_template=new_message_template,
new_message_sound=new_message_sound,
new_message_badge_count_enabled=new_message_badge_count_enabled,
added_to_conversation_enabled=added_to_conversation_enabled,
added_to_conversation_template=added_to_conversation_template,
added_to_conversation_sound=added_to_conversation_sound,
removed_from_conversation_enabled=removed_from_conversation_enabled,
removed_from_conversation_template=removed_from_conversation_template,
removed_from_conversation_sound=removed_from_conversation_sound,
new_message_with_media_enabled=new_message_with_media_enabled,
new_message_with_media_template=new_message_with_media_template,
)
def fetch(self):
"""
Fetch the NotificationInstance
:returns: The fetched NotificationInstance
:rtype: twilio.rest.conversations.v1.service.configuration.notification.NotificationInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.NotificationInstance {}>'.format(context)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2015 Nippon Telegraph and Telephone Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
recovery status management tool
"""
import MySQLdb
import datetime
import sys
import argparse
import subprocess
################################################################################
#
# (CLASS):recovery_status_manage
#
################################################################################
class recovery_status_manage(object):
"""
recovery status management class
"""
################################################################################
#
# (Constructor):__init__
#
################################################################################
def __init__(self):
parser = argparse.ArgumentParser(prog='recovery_status_manage.py', add_help=False)
parser.add_argument('--mode', help='list/update')
parser.add_argument('--uuid', help='uuid')
parser.add_argument('--db-user', help='mysql user name')
parser.add_argument('--db-password', help='mysql user password')
parser.add_argument('--db-host', help='mysql host name')
args = parser.parse_args()
#command input information check
if self._command_input_information_check(parser,args) == "NG":
return
msg = "recovery status manage execution start"
print msg
try:
#DB connection
db = self._db_connect(args.db_user,
args.db_password,
args.db_host)
#mode="list"
if args.mode == "list":
#ALL
if args.uuid == None:
sysout_sql = self._recovery_status_manage_list_all(args.db_user,
args.db_password,
args.db_host,
db)
#UUID
else:
sysout_sql = self._recovery_status_manage_list_uuid(args.uuid,
args.db_user,
args.db_password,
args.db_host,
db)
#mode="update"
else:
sysout_sql = self._recovery_status_manage_update(args.uuid,
args.db_user,
args.db_password,
args.db_host,
db)
#sysout
if sysout_sql != None:
subprocess.call(sysout_sql, shell=True)
except:
msg = "recovery status manage execution failure"
print msg
finally:
msg = "recovery status manage execution end"
print msg
################################################################################
#
# (METHOD):_command_input_information_check
#
################################################################################
def _command_input_information_check(self,parser,args):
result = "OK"
#command format and input parameter check
if (args.mode == None
or args.db_user == None
or args.db_password == None
or args.db_host == None):
result = "NG"
if args.mode == "list":
pass
elif args.mode == "update":
if args.uuid == None:
result = "NG"
else:
result = "NG"
#usage display
if result == "NG":
parser.print_help()
return result
################################################################################
#
# (METHOD):_db_connect
#
################################################################################
def _db_connect(self,
mysql_user_name,
mysql_user_password,
mysql_node_name):
try:
db = MySQLdb.connect(host=mysql_node_name,
db='vm_ha',
user=mysql_user_name,
passwd=mysql_user_password,
charset='utf8'
)
return db
except:
msg = "db connection failed"
print msg
raise
################################################################################
#
# (METHOD):_recovery_status_manage_list_all
#
################################################################################
def _recovery_status_manage_list_all(self,
mysql_user_name,
mysql_user_password,
mysql_node_name,
db):
# Execute SQL
cursor = db.cursor(MySQLdb.cursors.DictCursor)
sql = ("SELECT * FROM vm_list "
"WHERE deleted = 0 "
"AND (progress = 0 OR progress = 1 OR progress = 3)")
try:
row_cnt = cursor.execute(sql)
if row_cnt == 0:
msg = "none vm_list"
print msg
return None
# sysout
else:
sql = ("mysql --host=%s --database=vm_ha "
"--user=%s --password=%s "
"-e\"SELECT "
"create_at,"
"update_at,"
"uuid,"
"progress,"
"notification_id,"
"recover_by "
"FROM vm_list "
"WHERE deleted = 0 "
"AND (progress = 0 OR progress = 1 OR progress = 3)\";"
) % (mysql_node_name,
mysql_user_name,
mysql_user_password)
return sql
except:
msg = "vm_list select(all) failed"
print msg
raise
finally:
db.commit()
db.close()
################################################################################
#
# (METHOD):_recovery_status_manage_list_uuid
#
################################################################################
def _recovery_status_manage_list_uuid(self,
uuid,
mysql_user_name,
mysql_user_password,
mysql_node_name,
db):
# Execute SQL
cursor = db.cursor(MySQLdb.cursors.DictCursor)
sql = ("SELECT * FROM vm_list "
"WHERE uuid='%s' "
"AND deleted = 0 "
"AND (progress = 0 OR progress = 1 OR progress = 3)"
) % (uuid)
try:
row_cnt = cursor.execute(sql)
if row_cnt == 0:
msg = "none vm_list"
print msg
return None
# sysout
else:
sql = ("mysql --host=%s --database=vm_ha "
"--user=%s --password=%s "
"-e\"SELECT "
"create_at,"
"update_at,"
"uuid,"
"progress,"
"notification_id,"
"recover_by "
"FROM vm_list "
"WHERE uuid = '%s' "
"AND deleted = 0 "
"AND (progress = 0 OR progress = 1 OR progress = 3)\";"
) % (mysql_node_name,
mysql_user_name,
mysql_user_password,
uuid)
return sql
except:
msg = "vm_list select(uuid) failed"
print msg
raise
finally:
db.commit()
db.close()
################################################################################
#
# (METHOD):_recovery_status_manage_update
#
################################################################################
def _recovery_status_manage_update(self,
uuid,
mysql_user_name,
mysql_user_password,
mysql_node_name,
db):
# Execute SQL
cursor = db.cursor(MySQLdb.cursors.DictCursor)
sql = ("SELECT "
"* FROM vm_list "
"WHERE uuid='%s' AND deleted = 0 "
"AND (progress = 0 OR progress = 1)"
) % (uuid)
try:
row_cnt = cursor.execute(sql)
if row_cnt == 0:
msg = "none vm_list"
print msg
return None
else:
# update
update_at = datetime.datetime.now()
progress = "2"
sql = ("UPDATE vm_list "
"SET progress = %s ,update_at = '%s' "
"WHERE uuid = '%s' "
"AND deleted = 0 "
"AND (progress = 0 OR progress = 1)"
) % (progress, update_at,uuid)
cursor.execute(sql)
# sysout
sql = ("mysql --host=%s --database=vm_ha "
"--user=%s --password=%s "
"-e\"SELECT "
"create_at,"
"update_at,"
"uuid,"
"progress,"
"notification_id,"
"recover_by "
"FROM vm_list "
"WHERE uuid = '%s' AND update_at = '%s' "
"AND deleted = 0 "
"AND progress = 2\";"
) % (mysql_node_name,
mysql_user_name,
mysql_user_password,
uuid,
update_at)
return sql
except:
msg = "vm_list update failed"
print msg
raise
finally:
db.commit()
db.close()
################################################################################
if __name__ == '__main__':
recovery_status_manage()
##########################################################################################
#
#(command)
#
#[python recovery_status_manage.py --mode list --db-user root --db-password openstack --db-host localhost]
#[python recovery_status_manage.py --mode list --uuid DB1-UUID-0001 --db-user root --db-password openstack --db-host localhost]
#[python recovery_status_manage.py --mode update --uuid DB1-UUID-0001 --db-user root --db-password openstack --db-host localhost]
#
##########################################################################################
|
|
import shutil
import tempfile
import time
from ethereum import spv
import ethereum
import ethereum.db as db
import ethereum.opcodes as opcodes
import ethereum.abi as abi
from ethereum.slogging import LogRecorder, configure_logging, set_level
from ethereum.utils import to_string
from ethereum.config import Env
from ethereum._solidity import get_solidity
import rlp
from rlp.utils import decode_hex, encode_hex, ascii_chr
serpent = None
u = ethereum.utils
t = ethereum.transactions
b = ethereum.blocks
pb = ethereum.processblock
vm = ethereum.vm
accounts = []
keys = []
for i in range(10):
keys.append(u.sha3(to_string(i)))
accounts.append(u.privtoaddr(keys[-1]))
k0, k1, k2, k3, k4, k5, k6, k7, k8, k9 = keys[:10]
a0, a1, a2, a3, a4, a5, a6, a7, a8, a9 = accounts[:10]
languages = {}
_solidity = get_solidity()
if _solidity:
languages['solidity'] = _solidity
seed = 3 ** 160
def dict_without(d, *args):
o = {}
for k, v in list(d.items()):
if k not in args:
o[k] = v
return o
def dict_with(d, **kwargs):
o = {}
for k, v in list(d.items()):
o[k] = v
for k, v in list(kwargs.items()):
o[k] = v
return o
# Pseudo-RNG (deterministic for now for testing purposes)
def rand():
global seed
seed = pow(seed, 2, 2 ** 512)
return seed % 2 ** 256
class TransactionFailed(Exception):
pass
class ContractCreationFailed(Exception):
pass
class ABIContract():
def __init__(self, _state, _abi, address, listen=True, log_listener=None):
self.address = address
self._translator = abi.ContractTranslator(_abi)
self.abi = _abi
if listen:
if not log_listener:
listener = lambda log: self._translator.listen(log, noprint=False)
else:
def listener(log):
r = self._translator.listen(log, noprint=True)
if r:
log_listener(r)
_state.block.log_listeners.append(listener)
def kall_factory(f):
def kall(*args, **kwargs):
o = _state._send(kwargs.get('sender', k0),
self.address,
kwargs.get('value', 0),
self._translator.encode(f, args),
**dict_without(kwargs, 'sender', 'value', 'output'))
# Compute output data
if kwargs.get('output', '') == 'raw':
outdata = o['output']
elif not o['output']:
outdata = None
else:
outdata = self._translator.decode(f, o['output'])
outdata = outdata[0] if len(outdata) == 1 else outdata
# Format output
if kwargs.get('profiling', ''):
return dict_with(o, output=outdata)
else:
return outdata
return kall
for f in self._translator.function_data:
vars(self)[f] = kall_factory(f)
class state():
def __init__(self, num_accounts=len(keys)):
global serpent
if not serpent:
serpent = __import__('serpent')
self.temp_data_dir = tempfile.mkdtemp()
self.db = db.EphemDB()
self.env = Env(self.db)
o = {}
for i in range(num_accounts):
o[accounts[i]] = {"wei": 10 ** 24}
for i in range(1, 5):
o[u.int_to_addr(i)] = {"wei": 1}
self.block = b.genesis(self.env, start_alloc=o)
self.blocks = [self.block]
self.block.timestamp = 1410973349
self.block.coinbase = a0
self.block.gas_limit = 10 ** 9
def __del__(self):
shutil.rmtree(self.temp_data_dir)
def contract(self, code, sender=k0, endowment=0, language='serpent', gas=None):
if language not in languages:
languages[language] = __import__(language)
language = languages[language]
evm = language.compile(code)
o = self.evm(evm, sender, endowment)
assert len(self.block.get_code(o)), "Contract code empty"
return o
def abi_contract(self, code, sender=k0, endowment=0, language='serpent', contract_name='',
gas=None, log_listener=None, listen=True, **kwargs):
if contract_name:
assert language == 'solidity'
cn_args = dict(contract_name=contract_name)
else:
cn_args = kwargs
if language not in languages:
languages[language] = __import__(language)
language = languages[language]
evm = language.compile(code, **cn_args)
address = self.evm(evm, sender, endowment, gas)
assert len(self.block.get_code(address)), "Contract code empty"
_abi = language.mk_full_signature(code, **cn_args)
return ABIContract(self, _abi, address, listen=listen, log_listener=log_listener)
def evm(self, evm, sender=k0, endowment=0, gas=None):
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
tx = t.contract(sendnonce, gas_price, gas_limit, endowment, evm)
tx.sign(sender)
if gas is not None:
tx.startgas = gas
# print('starting', tx.startgas, gas_limit)
(s, a) = pb.apply_transaction(self.block, tx)
if not s:
raise ContractCreationFailed()
return a
def call(*args, **kwargs):
raise Exception("Call deprecated. Please use the abi_contract "
"mechanism or send(sender, to, value, "
"data) directly, using the abi module to generate "
"data if needed")
def _send(self, sender, to, value, evmdata='', output=None,
funid=None, abi=None, profiling=0):
if funid is not None or abi is not None:
raise Exception("Send with funid+abi is deprecated. Please use"
" the abi_contract mechanism")
tm, g = time.time(), self.block.gas_used
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
tx = t.Transaction(sendnonce, gas_price, gas_limit, to, value, evmdata)
self.last_tx = tx
tx.sign(sender)
recorder = LogRecorder() if profiling > 1 else None
(s, o) = pb.apply_transaction(self.block, tx)
if not s:
raise TransactionFailed()
out = {"output": o}
if profiling > 0:
zero_bytes = tx.data.count(ascii_chr(0))
non_zero_bytes = len(tx.data) - zero_bytes
intrinsic_gas_used = opcodes.GTXDATAZERO * zero_bytes + \
opcodes.GTXDATANONZERO * non_zero_bytes
ntm, ng = time.time(), self.block.gas_used
out["time"] = ntm - tm
out["gas"] = ng - g - intrinsic_gas_used
if profiling > 1:
trace = recorder.pop_records()
ops = [x['op'] for x in trace if x['event'] == 'vm']
opdict = {}
for op in ops:
opdict[op] = opdict.get(op, 0) + 1
out["ops"] = opdict
return out
def profile(self, *args, **kwargs):
kwargs['profiling'] = True
return self._send(*args, **kwargs)
def send(self, *args, **kwargs):
return self._send(*args, **kwargs)["output"]
def mkspv(self, sender, to, value, data=[], funid=None, abi=None):
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
if funid is not None:
evmdata = serpent.encode_abi(funid, *abi)
else:
evmdata = serpent.encode_datalist(*data)
tx = t.Transaction(sendnonce, gas_price, gas_limit, to, value, evmdata)
self.last_tx = tx
tx.sign(sender)
return spv.mk_transaction_spv_proof(self.block, tx)
def verifyspv(self, sender, to, value, data=[],
funid=None, abi=None, proof=[]):
sendnonce = self.block.get_nonce(u.privtoaddr(sender))
if funid is not None:
evmdata = serpent.encode_abi(funid, *abi)
else:
evmdata = serpent.encode_datalist(*data)
tx = t.Transaction(sendnonce, gas_price, gas_limit, to, value, evmdata)
self.last_tx = tx
tx.sign(sender)
return spv.verify_transaction_spv_proof(self.block, tx, proof)
def trace(self, sender, to, value, data=[]):
# collect log events (independent of loglevel filters)
recorder = LogRecorder()
self.send(sender, to, value, data)
return recorder.pop_records()
def mine(self, n=1, coinbase=a0):
for i in range(n):
self.block.finalize()
self.block.commit_state()
self.db.put(self.block.hash, rlp.encode(self.block))
t = self.block.timestamp + 6 + rand() % 12
x = b.Block.init_from_parent(self.block, coinbase, timestamp=t)
self.block = x
self.blocks.append(self.block)
def snapshot(self):
return rlp.encode(self.block)
def revert(self, data):
self.block = rlp.decode(data, b.Block, env=self.env)
self.block._mutable = True
self.block.header._mutable = True
self.block._cached_rlp = None
self.block.header._cached_rlp = None
# logging
def set_logging_level(lvl=0):
trace_lvl_map = [
':info',
'eth.vm.log:trace',
':info,eth.vm.log:trace,eth.vm.exit:trace',
':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace',
':info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,' +
'eth.vm.storage:trace,eth.vm.memory:trace'
]
configure_logging(config_string=trace_lvl_map[lvl])
if lvl == 0:
set_level(None, 'info')
print('Set logging level: %d' % lvl)
def set_log_trace(logger_names=[]):
"""
sets all named loggers to level 'trace'
attention: vm.op.* are only active if vm.op is active
"""
for name in logger_names:
assert name in slogging.get_logger_names()
slogging.set_level(name, 'trace')
def enable_logging():
set_logging_level(1)
def disable_logging():
set_logging_level(0)
gas_limit = 3141592
gas_price = 1
|
|
# Copyright (c) 2010 matt
# Copyright (c) 2010-2011 Paul Colomiets
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Julien Iguchi-Cartigny
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 dequis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .base import Layout
from .. import window
DEFAULT_FLOAT_WM_TYPES = set([
'utility',
'notification',
'toolbar',
'splash',
'dialog',
])
DEFAULT_FLOAT_RULES = [
{"role": "About"},
]
class Floating(Layout):
"""
Floating layout, which does nothing with windows but handles focus order
"""
defaults = [
("border_focus", "#0000ff", "Border colour for the focused window."),
("border_normal", "#000000", "Border colour for un-focused winows."),
("border_width", 1, "Border width."),
("max_border_width", 0, "Border width for maximize."),
("fullscreen_border_width", 0, "Border width for fullscreen."),
("name", "floating", "Name of this layout."),
(
"auto_float_types",
DEFAULT_FLOAT_WM_TYPES,
"default wm types to automatically float"
),
]
def __init__(self, float_rules=None, **config):
"""
If you have certain apps that you always want to float you can
provide ``float_rules`` to do so.
``float_rules`` is a list of dictionaries containing:
{wname: WM_NAME, wmclass: WM_CLASS
role: WM_WINDOW_ROLE}
The keys must be specified as above. You only need one, but
you need to provide the value for it. When a new window is
opened it's ``match`` method is called with each of these
rules. If one matches, the window will float. The following
will float gimp and skype:
float_rules=[dict(wmclass="skype"), dict(wmclass="gimp")]
Specify these in the ``floating_layout`` in your config.
"""
Layout.__init__(self, **config)
self.clients = []
self.focused = None
self.float_rules = float_rules or DEFAULT_FLOAT_RULES
self.add_defaults(Floating.defaults)
def match(self, win):
"""
Used to default float some windows.
"""
if win.window.get_wm_type() in self.auto_float_types:
return True
for rule_dict in self.float_rules:
if win.match(**rule_dict):
return True
return False
def to_screen(self, new_screen):
"""
Adjust offsets of clients within current screen
"""
for i, win in enumerate(self.clients):
if win.maximized:
win.enablemaximize()
continue
elif win.fullscreen:
win.enablemaximize(state=window.FULLSCREEN)
continue
offset_x = win._float_info['x']
offset_y = win._float_info['y']
new_x = new_screen.x + offset_x
new_y = new_screen.y + offset_y
right_edge = new_screen.x + new_screen.width
bottom_edge = new_screen.y + new_screen.height
while new_x > right_edge:
new_x = (new_x - new_screen.x) // 2
while new_y > bottom_edge:
new_y = (new_y - new_screen.y) // 2
win.x = new_x
win.y = new_y
win.group = new_screen.group
def focus_first(self):
if self.clients:
return self.clients[0]
def focus_next(self, win):
if win not in self.clients:
return
idx = self.clients.index(win)
if len(self.clients) > idx + 1:
return self.clients[idx + 1]
def focus_last(self):
if self.clients:
return self.clients[-1]
def focus_previous(self, win):
if win not in self.clients:
return
idx = self.clients.index(win)
if idx > 0:
return self.clients[idx - 1]
def focus(self, client):
self.focused = client
def blur(self):
self.focused = None
def configure(self, client, screen):
if client is self.focused:
bc = self.group.qtile.colorPixel(self.border_focus)
else:
bc = self.group.qtile.colorPixel(self.border_normal)
if client.maximized:
bw = self.max_border_width
elif client.fullscreen:
bw = self.fullscreen_border_width
else:
bw = self.border_width
client.place(
client.x,
client.y,
client.width,
client.height,
bw,
bc
)
client.unhide()
def clone(self, group):
c = Layout.clone(self, group)
c.clients = []
return c
def add(self, client):
self.clients.append(client)
self.focused = client
def remove(self, client):
if client not in self.clients:
return
self.focused = self.focus_next(client)
self.clients.remove(client)
return self.focused
def info(self):
d = Layout.info(self)
d["clients"] = [x.name for x in self.clients]
return d
def cmd_next(self):
client = self.focus_next(self.focused) or \
self.focus_first()
self.group.focus(client)
def cmd_previous(self):
client = self.focus_previous(self.focused) or \
self.focus_last()
self.group.focus(client)
|
|
from datetime import time
from datetime import timedelta
import pendulum
from .constants import SECS_PER_HOUR
from .constants import SECS_PER_MIN
from .constants import USECS_PER_SEC
from .duration import AbsoluteDuration
from .duration import Duration
from .mixins.default import FormattableMixin
class Time(FormattableMixin, time):
"""
Represents a time instance as hour, minute, second, microsecond.
"""
# String formatting
def __repr__(self):
us = ""
if self.microsecond:
us = f", {self.microsecond}"
tzinfo = ""
if self.tzinfo:
tzinfo = ", tzinfo={}".format(repr(self.tzinfo))
return "{}({}, {}, {}{}{})".format(
self.__class__.__name__, self.hour, self.minute, self.second, us, tzinfo
)
# Comparisons
def closest(self, dt1, dt2):
"""
Get the closest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() < self.diff(dt2).in_seconds():
return dt1
return dt2
def farthest(self, dt1, dt2):
"""
Get the farthest time from the instance.
:type dt1: Time or time
:type dt2: Time or time
:rtype: Time
"""
dt1 = self.__class__(dt1.hour, dt1.minute, dt1.second, dt1.microsecond)
dt2 = self.__class__(dt2.hour, dt2.minute, dt2.second, dt2.microsecond)
if self.diff(dt1).in_seconds() > self.diff(dt2).in_seconds():
return dt1
return dt2
# ADDITIONS AND SUBSTRACTIONS
def add(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.add(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def subtract(self, hours=0, minutes=0, seconds=0, microseconds=0):
"""
Add duration to the instance.
:param hours: The number of hours
:type hours: int
:param minutes: The number of minutes
:type minutes: int
:param seconds: The number of seconds
:type seconds: int
:param microseconds: The number of microseconds
:type microseconds: int
:rtype: Time
"""
from .datetime import DateTime
return (
DateTime.EPOCH.at(self.hour, self.minute, self.second, self.microsecond)
.subtract(
hours=hours, minutes=minutes, seconds=seconds, microseconds=microseconds
)
.time()
)
def add_timedelta(self, delta):
"""
Add timedelta duration to the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError("Cannot add timedelta with days to Time.")
return self.add(seconds=delta.seconds, microseconds=delta.microseconds)
def subtract_timedelta(self, delta):
"""
Remove timedelta duration from the instance.
:param delta: The timedelta instance
:type delta: datetime.timedelta
:rtype: Time
"""
if delta.days:
raise TypeError("Cannot subtract timedelta with days to Time.")
return self.subtract(seconds=delta.seconds, microseconds=delta.microseconds)
def __add__(self, other):
if not isinstance(other, timedelta):
return NotImplemented
return self.add_timedelta(other)
def __sub__(self, other):
if not isinstance(other, (Time, time, timedelta)):
return NotImplemented
if isinstance(other, timedelta):
return self.subtract_timedelta(other)
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.diff(self, False)
def __rsub__(self, other):
if not isinstance(other, (Time, time)):
return NotImplemented
if isinstance(other, time):
if other.tzinfo is not None:
raise TypeError("Cannot subtract aware times to or from Time.")
other = self.__class__(
other.hour, other.minute, other.second, other.microsecond
)
return other.__sub__(self)
# DIFFERENCES
def diff(self, dt=None, abs=True):
"""
Returns the difference between two Time objects as an Duration.
:type dt: Time or None
:param abs: Whether to return an absolute interval or not
:type abs: bool
:rtype: Duration
"""
if dt is None:
dt = pendulum.now().time()
else:
dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)
us1 = (
self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
) * USECS_PER_SEC
us2 = (
dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
) * USECS_PER_SEC
klass = Duration
if abs:
klass = AbsoluteDuration
return klass(microseconds=us2 - us1)
def diff_for_humans(self, other=None, absolute=False, locale=None):
"""
Get the difference in a human readable format in the current locale.
:type other: Time or time
:param absolute: removes time difference modifiers ago, after, etc
:type absolute: bool
:param locale: The locale to use for localization
:type locale: str
:rtype: str
"""
is_now = other is None
if is_now:
other = pendulum.now().time()
diff = self.diff(other)
return pendulum.format_diff(diff, is_now, absolute, locale)
# Compatibility methods
def replace(
self, hour=None, minute=None, second=None, microsecond=None, tzinfo=True
):
if tzinfo is True:
tzinfo = self.tzinfo
hour = hour if hour is not None else self.hour
minute = minute if minute is not None else self.minute
second = second if second is not None else self.second
microsecond = microsecond if microsecond is not None else self.microsecond
t = super().replace(hour, minute, second, microsecond, tzinfo=tzinfo)
return self.__class__(
t.hour, t.minute, t.second, t.microsecond, tzinfo=t.tzinfo
)
def __getnewargs__(self):
return (self,)
def _get_state(self, protocol=3):
tz = self.tzinfo
return (self.hour, self.minute, self.second, self.microsecond, tz)
def __reduce__(self):
return self.__reduce_ex__(2)
def __reduce_ex__(self, protocol):
return self.__class__, self._get_state(protocol)
Time.min = Time(0, 0, 0)
Time.max = Time(23, 59, 59, 999999)
Time.resolution = Duration(microseconds=1)
|
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import dask.dataframe as dd
from dask.dataframe.utils import (shard_df_on_index, meta_nonempty, make_meta,
raise_on_meta_error)
import pytest
def test_shard_df_on_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
result = list(shard_df_on_index(df, [20, 50]))
assert list(result[0].index) == [10]
assert list(result[1].index) == [20, 30, 40]
assert list(result[2].index) == [50, 60]
def test_make_meta():
df = pd.DataFrame({'a': [1, 2, 3], 'b': list('abc'), 'c': [1., 2., 3.]},
index=[10, 20, 30])
# Pandas dataframe
meta = make_meta(df)
assert len(meta) == 0
assert (meta.dtypes == df.dtypes).all()
assert isinstance(meta.index, type(df.index))
# Pandas series
meta = make_meta(df.a)
assert len(meta) == 0
assert meta.dtype == df.a.dtype
assert isinstance(meta.index, type(df.index))
# Pandas index
meta = make_meta(df.index)
assert isinstance(meta, type(df.index))
assert len(meta) == 0
# Dask object
ddf = dd.from_pandas(df, npartitions=2)
assert make_meta(ddf) is ddf._meta
# Dict
meta = make_meta({'a': 'i8', 'b': 'O', 'c': 'f8'})
assert isinstance(meta, pd.DataFrame)
assert len(meta) == 0
assert (meta.dtypes == df.dtypes).all()
assert isinstance(meta.index, pd.RangeIndex)
# Iterable
meta = make_meta([('a', 'i8'), ('c', 'f8'), ('b', 'O')])
assert (meta.columns == ['a', 'c', 'b']).all()
assert len(meta) == 0
assert (meta.dtypes == df.dtypes[meta.dtypes.index]).all()
assert isinstance(meta.index, pd.RangeIndex)
# Tuple
meta = make_meta(('a', 'i8'))
assert isinstance(meta, pd.Series)
assert len(meta) == 0
assert meta.dtype == 'i8'
assert meta.name == 'a'
# With index
meta = make_meta({'a': 'i8', 'b': 'i4'}, pd.Int64Index([1, 2], name='foo'))
assert isinstance(meta.index, pd.Int64Index)
assert len(meta.index) == 0
meta = make_meta(('a', 'i8'), pd.Int64Index([1, 2], name='foo'))
assert isinstance(meta.index, pd.Int64Index)
assert len(meta.index) == 0
# Numpy scalar
meta = make_meta(np.float64(1.0))
assert isinstance(meta, np.float64)
# Python scalar
meta = make_meta(1.0)
assert isinstance(meta, np.float64)
# Timestamp
x = pd.Timestamp(2000, 1, 1)
meta = make_meta(x)
assert meta is x
# Dtype expressions
meta = make_meta('i8')
assert isinstance(meta, np.int64)
meta = make_meta(float)
assert isinstance(meta, np.dtype(float).type)
meta = make_meta(np.dtype('bool'))
assert isinstance(meta, np.bool_)
assert pytest.raises(TypeError, lambda: make_meta(None))
def test_meta_nonempty():
df1 = pd.DataFrame({'A': pd.Categorical(['Alice', 'Bob', 'Carol']),
'B': list('abc'),
'C': 'bar',
'D': np.float32(1),
'E': np.int32(1),
'F': pd.Timestamp('2016-01-01'),
'G': pd.date_range('2016-01-01', periods=3,
tz='America/New_York'),
'H': pd.Timedelta('1 hours', 'ms'),
'I': np.void(b' ')},
columns=list('DCBAHGFEI'))
df2 = df1.iloc[0:0]
df3 = meta_nonempty(df2)
assert (df3.dtypes == df2.dtypes).all()
assert df3['A'][0] == 'Alice'
assert df3['B'][0] == 'foo'
assert df3['C'][0] == 'foo'
assert df3['D'][0] == np.float32(1)
assert df3['D'][0].dtype == 'f4'
assert df3['E'][0] == np.int32(1)
assert df3['E'][0].dtype == 'i4'
assert df3['F'][0] == pd.Timestamp('1970-01-01 00:00:00')
assert df3['G'][0] == pd.Timestamp('1970-01-01 00:00:00',
tz='America/New_York')
assert df3['H'][0] == pd.Timedelta('1', 'ms')
assert df3['I'][0] == 'foo'
s = meta_nonempty(df2['A'])
assert s.dtype == df2['A'].dtype
assert (df3['A'] == s).all()
def test_meta_duplicated():
df = pd.DataFrame(columns=['A', 'A', 'B'])
res = meta_nonempty(df)
exp = pd.DataFrame([['foo', 'foo', 'foo'],
['foo', 'foo', 'foo']],
index=['a', 'b'],
columns=['A', 'A', 'B'])
tm.assert_frame_equal(res, exp)
def test_meta_nonempty_index():
idx = pd.RangeIndex(1, name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.RangeIndex
assert res.name == idx.name
idx = pd.Int64Index([1], name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.Int64Index
assert res.name == idx.name
idx = pd.Index(['a'], name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.Index
assert res.name == idx.name
idx = pd.DatetimeIndex(['1970-01-01'], freq='d',
tz='America/New_York', name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.DatetimeIndex
assert res.tz == idx.tz
assert res.freq == idx.freq
assert res.name == idx.name
idx = pd.PeriodIndex(['1970-01-01'], freq='d', name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.PeriodIndex
assert res.freq == idx.freq
assert res.name == idx.name
idx = pd.TimedeltaIndex([np.timedelta64(1, 'D')], freq='d', name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.TimedeltaIndex
assert res.freq == idx.freq
assert res.name == idx.name
idx = pd.CategoricalIndex(['a'], ['a', 'b'], ordered=True, name='foo')
res = meta_nonempty(idx)
assert type(res) is pd.CategoricalIndex
assert (res.categories == idx.categories).all()
assert res.ordered == idx.ordered
assert res.name == idx.name
levels = [pd.Int64Index([1], name='a'),
pd.Float64Index([1.0], name='b')]
idx = pd.MultiIndex(levels=levels, labels=[[0], [0]], names=['a', 'b'])
res = meta_nonempty(idx)
assert type(res) is pd.MultiIndex
for idx1, idx2 in zip(idx.levels, res.levels):
assert type(idx1) is type(idx2)
assert idx1.name == idx2.name
assert res.names == idx.names
def test_meta_nonempty_scalar():
meta = meta_nonempty(np.float64(1.0))
assert isinstance(meta, np.float64)
x = pd.Timestamp(2000, 1, 1)
meta = meta_nonempty(x)
assert meta is x
def test_raise_on_meta_error():
try:
with raise_on_meta_error():
raise RuntimeError("Bad stuff")
except Exception as e:
assert e.args[0].startswith("Metadata inference failed.\n")
assert 'RuntimeError' in e.args[0]
try:
with raise_on_meta_error("myfunc"):
raise RuntimeError("Bad stuff")
except Exception as e:
assert e.args[0].startswith("Metadata inference failed in `myfunc`.\n")
assert 'RuntimeError' in e.args[0]
|
|
"""Support for Zabbix."""
from contextlib import suppress
import json
import logging
import math
import queue
import threading
import time
from urllib.error import HTTPError
from urllib.parse import urljoin
from pyzabbix import ZabbixAPI, ZabbixAPIException, ZabbixMetric, ZabbixSender
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PATH,
CONF_SSL,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import event as event_helper, state as state_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entityfilter import (
INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA,
convert_include_exclude_filter,
)
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
CONF_PUBLISH_STATES_HOST = "publish_states_host"
DEFAULT_SSL = False
DEFAULT_PATH = "zabbix"
DOMAIN = "zabbix"
TIMEOUT = 5
RETRY_DELAY = 20
QUEUE_BACKLOG_SECONDS = 30
RETRY_INTERVAL = 60 # seconds
RETRY_MESSAGE = f"%s Retrying in {RETRY_INTERVAL} seconds."
BATCH_TIMEOUT = 1
BATCH_BUFFER_SIZE = 100
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PUBLISH_STATES_HOST): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Zabbix component."""
conf = config[DOMAIN]
protocol = "https" if conf[CONF_SSL] else "http"
url = urljoin(f"{protocol}://{conf[CONF_HOST]}", conf[CONF_PATH])
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
publish_states_host = conf.get(CONF_PUBLISH_STATES_HOST)
entities_filter = convert_include_exclude_filter(conf)
try:
zapi = ZabbixAPI(url=url, user=username, password=password)
_LOGGER.info("Connected to Zabbix API Version %s", zapi.api_version())
except ZabbixAPIException as login_exception:
_LOGGER.error("Unable to login to the Zabbix API: %s", login_exception)
return False
except HTTPError as http_error:
_LOGGER.error("HTTPError when connecting to Zabbix API: %s", http_error)
zapi = None
_LOGGER.error(RETRY_MESSAGE, http_error)
event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config))
return True
hass.data[DOMAIN] = zapi
def event_to_metrics(event, float_keys, string_keys):
"""Add an event to the outgoing Zabbix list."""
state = event.data.get("new_state")
if state is None or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE):
return
entity_id = state.entity_id
if not entities_filter(entity_id):
return
floats = {}
strings = {}
try:
_state_as_value = float(state.state)
floats[entity_id] = _state_as_value
except ValueError:
try:
_state_as_value = float(state_helper.state_as_number(state))
floats[entity_id] = _state_as_value
except ValueError:
strings[entity_id] = state.state
for key, value in state.attributes.items():
# For each value we try to cast it as float
# But if we can not do it we store the value
# as string
attribute_id = f"{entity_id}/{key}"
try:
float_value = float(value)
except (ValueError, TypeError):
float_value = None
if float_value is None or not math.isfinite(float_value):
strings[attribute_id] = str(value)
else:
floats[attribute_id] = float_value
metrics = []
float_keys_count = len(float_keys)
float_keys.update(floats)
if len(float_keys) != float_keys_count:
floats_discovery = []
for float_key in float_keys:
floats_discovery.append({"{#KEY}": float_key})
metric = ZabbixMetric(
publish_states_host,
"homeassistant.floats_discovery",
json.dumps(floats_discovery),
)
metrics.append(metric)
for key, value in floats.items():
metric = ZabbixMetric(
publish_states_host, f"homeassistant.float[{key}]", value
)
metrics.append(metric)
string_keys.update(strings)
return metrics
if publish_states_host:
zabbix_sender = ZabbixSender(zabbix_server=conf[CONF_HOST])
instance = ZabbixThread(hass, zabbix_sender, event_to_metrics)
instance.setup(hass)
return True
class ZabbixThread(threading.Thread):
"""A threaded event handler class."""
MAX_TRIES = 3
def __init__(self, hass, zabbix_sender, event_to_metrics):
"""Initialize the listener."""
threading.Thread.__init__(self, name="Zabbix")
self.queue = queue.Queue()
self.zabbix_sender = zabbix_sender
self.event_to_metrics = event_to_metrics
self.write_errors = 0
self.shutdown = False
self.float_keys = set()
self.string_keys = set()
def setup(self, hass):
"""Set up the thread and start it."""
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self._shutdown)
self.start()
_LOGGER.debug("Started publishing state changes to Zabbix")
def _shutdown(self, event):
"""Shut down the thread."""
self.queue.put(None)
self.join()
@callback
def _event_listener(self, event):
"""Listen for new messages on the bus and queue them for Zabbix."""
item = (time.monotonic(), event)
self.queue.put(item)
def get_metrics(self):
"""Return a batch of events formatted for writing."""
queue_seconds = QUEUE_BACKLOG_SECONDS + self.MAX_TRIES * RETRY_DELAY
count = 0
metrics = []
dropped = 0
with suppress(queue.Empty):
while len(metrics) < BATCH_BUFFER_SIZE and not self.shutdown:
timeout = None if count == 0 else BATCH_TIMEOUT
item = self.queue.get(timeout=timeout)
count += 1
if item is None:
self.shutdown = True
else:
timestamp, event = item
age = time.monotonic() - timestamp
if age < queue_seconds:
event_metrics = self.event_to_metrics(
event, self.float_keys, self.string_keys
)
if event_metrics:
metrics += event_metrics
else:
dropped += 1
if dropped:
_LOGGER.warning("Catching up, dropped %d old events", dropped)
return count, metrics
def write_to_zabbix(self, metrics):
"""Write preprocessed events to zabbix, with retry."""
for retry in range(self.MAX_TRIES + 1):
try:
self.zabbix_sender.send(metrics)
if self.write_errors:
_LOGGER.error("Resumed, lost %d events", self.write_errors)
self.write_errors = 0
_LOGGER.debug("Wrote %d metrics", len(metrics))
break
except OSError as err:
if retry < self.MAX_TRIES:
time.sleep(RETRY_DELAY)
else:
if not self.write_errors:
_LOGGER.error("Write error: %s", err)
self.write_errors += len(metrics)
def run(self):
"""Process incoming events."""
while not self.shutdown:
count, metrics = self.get_metrics()
if metrics:
self.write_to_zabbix(metrics)
for _ in range(count):
self.queue.task_done()
|
|
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pyOCD.pyDAPAccess import DAPAccess
import logging
from time import sleep
# !! This value are A[2:3] and not A[3:2]
DP_REG = {'IDCODE': DAPAccess.REG.DP_0x0,
'ABORT': DAPAccess.REG.DP_0x0,
'CTRL_STAT': DAPAccess.REG.DP_0x4,
'SELECT': DAPAccess.REG.DP_0x8
}
AP_REG = {'CSW' : 0x00,
'TAR' : 0x04,
'DRW' : 0x0C,
'IDR' : 0xFC
}
# DP Control / Status Register bit definitions
CTRLSTAT_STICKYORUN = 0x00000002
CTRLSTAT_STICKYCMP = 0x00000010
CTRLSTAT_STICKYERR = 0x00000020
IDCODE = 0 << 2
AP_ACC = 1 << 0
DP_ACC = 0 << 0
READ = 1 << 1
WRITE = 0 << 1
VALUE_MATCH = 1 << 4
MATCH_MASK = 1 << 5
APBANKSEL = 0x000000f0
# AP Control and Status Word definitions
CSW_SIZE = 0x00000007
CSW_SIZE8 = 0x00000000
CSW_SIZE16 = 0x00000001
CSW_SIZE32 = 0x00000002
CSW_ADDRINC = 0x00000030
CSW_NADDRINC = 0x00000000
CSW_SADDRINC = 0x00000010
CSW_PADDRINC = 0x00000020
CSW_DBGSTAT = 0x00000040
CSW_TINPROG = 0x00000080
CSW_HPROT = 0x02000000
CSW_MSTRTYPE = 0x20000000
CSW_MSTRCORE = 0x00000000
CSW_MSTRDBG = 0x20000000
CSW_RESERVED = 0x01000000
CSW_VALUE = (CSW_RESERVED | CSW_MSTRDBG | CSW_HPROT | CSW_DBGSTAT | CSW_SADDRINC)
TRANSFER_SIZE = {8: CSW_SIZE8,
16: CSW_SIZE16,
32: CSW_SIZE32
}
COMMANDS_PER_DAP_TRANSFER = 12
def _ap_addr_to_reg(addr):
return DAPAccess.REG(4 + ((addr & 0x0c) >> 2))
class Dap(object):
"""
This class implements the CMSIS-DAP protocol
"""
def __init__(self, link):
self.link = link
self.csw = -1
self.dp_select = -1
def init(self):
self._clear_sticky_err()
def writeMem(self, addr, data, transfer_size=32):
self.writeAP(AP_REG['CSW'], CSW_VALUE | TRANSFER_SIZE[transfer_size])
if transfer_size == 8:
data = data << ((addr & 0x03) << 3)
elif transfer_size == 16:
data = data << ((addr & 0x02) << 3)
try:
reg = _ap_addr_to_reg(WRITE | AP_ACC | AP_REG['TAR'])
self.link.write_reg(reg, addr)
reg = _ap_addr_to_reg(WRITE | AP_ACC | AP_REG['DRW'])
self.link.write_reg(reg, data)
except DAPAccess.Error as error:
self._handle_error(error)
raise
def readMem(self, addr, transfer_size=32, now=True):
res = None
try:
self.writeAP(AP_REG['CSW'], CSW_VALUE |
TRANSFER_SIZE[transfer_size])
reg = _ap_addr_to_reg(WRITE | AP_ACC | AP_REG['TAR'])
self.link.write_reg(reg, addr)
reg = _ap_addr_to_reg(READ | AP_ACC | AP_REG['DRW'])
result_cb = self.link.read_reg(reg, now=False)
except DAPAccess.Error as error:
self._handle_error(error)
raise
def readMemCb():
try:
res = result_cb()
if transfer_size == 8:
res = (res >> ((addr & 0x03) << 3) & 0xff)
elif transfer_size == 16:
res = (res >> ((addr & 0x02) << 3) & 0xffff)
except DAPAccess.Error as error:
self._handle_error(error)
raise
return res
if now:
return readMemCb()
else:
return readMemCb
# write aligned word ("data" are words)
def writeBlock32(self, addr, data):
# put address in TAR
self.writeAP(AP_REG['CSW'], CSW_VALUE | CSW_SIZE32)
self.writeAP(AP_REG['TAR'], addr)
try:
reg = _ap_addr_to_reg(WRITE | AP_ACC | AP_REG['DRW'])
self.link.reg_write_repeat(len(data), reg, data)
except DAPAccess.Error as error:
self._handle_error(error)
raise
# read aligned word (the size is in words)
def readBlock32(self, addr, size):
# put address in TAR
self.writeAP(AP_REG['CSW'], CSW_VALUE | CSW_SIZE32)
self.writeAP(AP_REG['TAR'], addr)
try:
reg = _ap_addr_to_reg(READ | AP_ACC | AP_REG['DRW'])
resp = self.link.reg_read_repeat(size, reg)
except DAPAccess.Error as error:
self._handle_error(error)
raise
return resp
def readDP(self, addr, now=True):
assert addr in DAPAccess.REG
try:
result_cb = self.link.read_reg(addr, now=False)
except DAPAccess.Error as error:
self._handle_error(error)
raise
def readDPCb():
try:
return result_cb()
except DAPAccess.Error as error:
self._handle_error(error)
raise
if now:
return readDPCb()
else:
return readDPCb
def writeDP(self, addr, data):
assert addr in DAPAccess.REG
if addr == DP_REG['SELECT']:
if data == self.dp_select:
return
self.dp_select = data
try:
self.link.write_reg(addr, data)
except DAPAccess.Error as error:
self._handle_error(error)
raise
return True
def writeAP(self, addr, data):
assert type(addr) in (int, long)
ap_sel = addr & 0xff000000
bank_sel = addr & APBANKSEL
self.writeDP(DP_REG['SELECT'], ap_sel | bank_sel)
if addr == AP_REG['CSW']:
if data == self.csw:
return
self.csw = data
ap_reg = _ap_addr_to_reg(WRITE | AP_ACC | (addr & 0x0c))
try:
self.link.write_reg(ap_reg, data)
except DAPAccess.Error as error:
self._handle_error(error)
raise
return True
def readAP(self, addr, now=True):
assert type(addr) in (int, long)
res = None
ap_reg = _ap_addr_to_reg(READ | AP_ACC | (addr & 0x0c))
try:
ap_sel = addr & 0xff000000
bank_sel = addr & APBANKSEL
self.writeDP(DP_REG['SELECT'], ap_sel | bank_sel)
result_cb = self.link.read_reg(ap_reg, now=False)
except DAPAccess.Error as error:
self._handle_error(error)
raise
def readAPCb():
try:
return result_cb()
except DAPAccess.Error as error:
self._handle_error(error)
raise
if now:
return readAPCb()
else:
return readAPCb
def _handle_error(self, error):
# Invalidate cached registers
self.csw = -1
self.dp_select = -1
# Clear sticky error for Fault errors only
if isinstance(error, DAPAccess.TransferFaultError):
self._clear_sticky_err()
def _clear_sticky_err(self):
mode = self.link.get_swj_mode()
if mode == DAPAccess.PORT.SWD:
self.link.write_reg(DAPAccess.REG.DP_0x0, (1 << 2))
elif mode == DAPAccess.PORT.JTAG:
self.link.write_reg(DP_REG['CTRL_STAT'], CTRLSTAT_STICKYERR)
else:
assert False
|
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from helpdesk.models import Queue, Ticket
from helpdesk import settings
class PerQueueStaffMembershipTestCase(TestCase):
IDENTIFIERS = (1, 2)
def setUp(self):
"""
Create user_1 with access to queue_1 containing 1 ticket
and user_2 with access to queue_2 containing 2 tickets
and superuser who should be able to access both queues
"""
self.HELPDESK_ENABLE_PER_QUEUE_STAFF_PERMISSION = settings.HELPDESK_ENABLE_PER_QUEUE_STAFF_PERMISSION
settings.HELPDESK_ENABLE_PER_QUEUE_STAFF_PERMISSION = True
self.client = Client()
User = get_user_model()
self.superuser = User.objects.create(
username='superuser',
is_staff=True,
is_superuser=True,
)
self.superuser.set_password('superuser')
self.superuser.save()
for identifier in self.IDENTIFIERS:
queue = self.__dict__['queue_%d' % identifier] = Queue.objects.create(
title='Queue %d' % identifier,
slug='q%d' % identifier,
)
user = self.__dict__['user_%d' % identifier] = User.objects.create(
username='User_%d' % identifier,
is_staff=True,
)
user.set_password(identifier)
user.save()
# The prefix 'helpdesk.' must be trimmed
p = Permission.objects.get(codename=queue.permission_name[9:])
user.user_permissions.add(p)
for ticket_number in range(1, identifier + 1):
Ticket.objects.create(
title='Unassigned Ticket %d in Queue %d' % (ticket_number, identifier),
queue=queue,
)
Ticket.objects.create(
title='Ticket %d in Queue %d Assigned to User_%d' % (ticket_number, identifier, identifier),
queue=queue,
assigned_to=user,
)
def tearDown(self):
"""
Reset HELPDESK_ENABLE_PER_QUEUE_STAFF_MEMBERSHIP to original value
"""
settings.HELPDESK_ENABLE_PER_QUEUE_STAFF_PERMISSION = self.HELPDESK_ENABLE_PER_QUEUE_STAFF_PERMISSION
def test_dashboard_ticket_counts(self):
"""
Check that the regular users' dashboard only shows 1 of the 2 queues,
that user_1 only sees a total of 1 ticket, that user_2 sees a total of 2
tickets, but that the superuser's dashboard shows all queues and tickets.
"""
# Regular users
for identifier in self.IDENTIFIERS:
self.client.login(username='User_%d' % identifier, password=identifier)
response = self.client.get(reverse('helpdesk_dashboard'))
self.assertEqual(
len(response.context['unassigned_tickets']),
identifier,
'Unassigned tickets were not properly limited by queue membership'
)
self.assertEqual(
len(response.context['dash_tickets']),
1,
'The queues in dash_tickets were not properly limited by queue membership'
)
self.assertEqual(
response.context['dash_tickets'][0]['open'],
identifier * 2,
'The tickets in dash_tickets were not properly limited by queue membership'
)
self.assertEqual(
response.context['basic_ticket_stats']['open_ticket_stats'][0][1],
identifier * 2,
'Basic ticket stats were not properly limited by queue membership'
)
# Superuser
self.client.login(username='superuser', password='superuser')
response = self.client.get(reverse('helpdesk_dashboard'))
self.assertEqual(
len(response.context['unassigned_tickets']),
3,
'Unassigned tickets were limited by queue membership for a superuser'
)
self.assertEqual(
len(response.context['dash_tickets']),
2,
'The queues in dash_tickets were limited by queue membership for a superuser'
)
self.assertEqual(
response.context['dash_tickets'][0]['open'] +
response.context['dash_tickets'][1]['open'],
6,
'The tickets in dash_tickets were limited by queue membership for a superuser'
)
self.assertEqual(
response.context['basic_ticket_stats']['open_ticket_stats'][0][1] +
response.context['basic_ticket_stats']['open_ticket_stats'][1][1],
6,
'Basic ticket stats were limited by queue membership for a superuser'
)
def test_ticket_list_per_queue_user_restrictions(self):
"""
Ensure that while the superuser can list all tickets, user_1 can only
list the 1 ticket in his queue and user_2 can list only the 2 tickets
in his queue.
"""
# Regular users
for identifier in self.IDENTIFIERS:
self.client.login(username='User_%d' % identifier, password=identifier)
response = self.client.get(reverse('helpdesk_list'))
self.assertEqual(
len(response.context['tickets']),
identifier * 2,
'Ticket list was not properly limited by queue membership'
)
self.assertEqual(
len(response.context['queue_choices']),
1,
'Queue choices were not properly limited by queue membership'
)
self.assertEqual(
response.context['queue_choices'][0],
Queue.objects.get(title="Queue %d" % identifier),
'Queue choices were not properly limited by queue membership'
)
# Superuser
self.client.login(username='superuser', password='superuser')
response = self.client.get(reverse('helpdesk_list'))
self.assertEqual(
len(response.context['tickets']),
6,
'Ticket list was limited by queue membership for a superuser'
)
def test_ticket_reports_per_queue_user_restrictions(self):
"""
Ensure that while the superuser can generate reports on all queues and
tickets, user_1 can only generate reports for queue 1 and user_2 can
only do so for queue 2
"""
# Regular users
for identifier in self.IDENTIFIERS:
self.client.login(username='User_%d' % identifier, password=identifier)
response = self.client.get(
reverse('helpdesk_run_report', kwargs={'report': 'userqueue'})
)
# Only two columns of data should be present: ticket counts for
# unassigned and this user only
self.assertEqual(
len(response.context['data']),
2,
'Queues in report were not properly limited by queue membership'
)
# Each user should see a total number of tickets equal to twice their ID
self.assertEqual(
sum([sum(user_tickets[1:]) for user_tickets in response.context['data']]),
identifier * 2,
'Tickets in report were not properly limited by queue membership'
)
# Each user should only be able to pick 1 queue
self.assertEqual(
len(response.context['headings']),
2,
'Queue choices were not properly limited by queue membership'
)
# The queue each user can pick should be the queue named after their ID
self.assertEqual(
response.context['headings'][1],
"Queue %d" % identifier,
'Queue choices were not properly limited by queue membership'
)
# Superuser
self.client.login(username='superuser', password='superuser')
response = self.client.get(
reverse('helpdesk_run_report', kwargs={'report': 'userqueue'})
)
# Superuser should see ticket counts for all two queues, which includes
# three columns: unassigned and both user 1 and user 2
self.assertEqual(
len(response.context['data'][0]),
3,
'Queues in report were improperly limited by queue membership for a superuser'
)
# Superuser should see the total ticket count of three tickets
self.assertEqual(
sum([sum(user_tickets[1:]) for user_tickets in response.context['data']]),
6,
'Tickets in report were improperly limited by queue membership for a superuser'
)
self.assertEqual(
len(response.context['headings']),
3,
'Queue choices were improperly limited by queue membership for a superuser'
)
|
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the deploy domain command."""
import os.path
from shutil import rmtree
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDeployDomain(TestBrokerCommand):
def head_commit(self, sandbox, ref="HEAD"):
sandboxdir = os.path.join(self.sandboxdir, sandbox)
head, _ = self.gitcommand(["rev-parse", "%s^{commit}" % ref],
cwd=sandboxdir)
head = head.strip()
return head
def test_100_deploychangetest1domain(self):
command = ["deploy", "--source", "changetest1",
"--target", "deployable", "--reason", "Test reason"]
out = self.statustest(command)
self.matchoutput(out, "Updating the checked out copy of domain "
"deployable...", command)
def test_110_verifydeploy(self):
template = self.find_template("aquilon", "archetype", "base",
domain="deployable")
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by unittest\n")
def test_110_verifydeploylog(self):
kingdir = self.config.get("broker", "kingdir")
command = ["show", "--no-patch", "--pretty=full", "deployable"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchoutput(out, "User:", command)
self.matchoutput(out, "Request-ID:", command)
self.matchoutput(out, "Reason: Test reason", command)
self.matchclean(out, "Justification:", command)
self.matchclean(out, "Code-Review-URL", command)
self.matchclean(out, "Testing-URL", command)
author_email = self.config.get("broker", "git_author_email")
self.matchoutput(out, "Author: %s <%s>" % (self.user, author_email),
command)
def test_120_deployfail(self):
command = ["deploy", "--source", "changetest1",
"--target", "prod"]
_, err = self.failuretest(command, 4)
self.matchoutput(err,
"Domain prod is under change management control. "
"Please specify --justification.",
command)
def test_120_deploydryrun(self):
kingdir = self.config.get("broker", "kingdir")
old_prod, _ = self.gitcommand(["rev-list", "--max-count=1", "prod"],
cwd=kingdir)
command = ["deploy", "--source", "changetest1",
"--target", "prod", "--dryrun"]
self.successtest(command)
new_prod, _ = self.gitcommand(["rev-list", "--max-count=1", "prod"],
cwd=kingdir)
self.assertEqual(old_prod, new_prod,
"Domain prod changed despite --dryrun")
def test_120_deploybadjustification(self):
command = ["deploy", "--source", "changetest1", "--target", "prod",
"--justification", "I felt like deploying changes."]
out = self.badrequesttest(command)
self.matchoutput(out, "Failed to parse the justification", command)
def test_123_request_review(self):
command = ["request_review", "--source", "changetest1", "--target", "prod"]
self.noouttest(command)
def test_123_request_review_tracking(self):
command = ["request_review", "--source", "changetest1", "--target", "ut-prod"]
out = self.badrequesttest(command)
self.matchoutput(out, "The target needs to be a non-tracking domain, "
"maybe you meant prod?", command)
def test_124_show_review(self):
changetest1_head = self.head_commit("changetest1")
command = ["show_review", "--source", "changetest1", "--target", "prod"]
out = self.commandtest(command)
self.output_equals(out, """
Review request
Target Domain: prod
Source Sandbox: changetest1
Published Commit: %s
Testing Status: Untested
Approval Status: No decision
""" % changetest1_head,
command)
def test_124_show_review_all(self):
changetest1_head = self.head_commit("changetest1")
command = ["show_review", "--all"]
out = self.commandtest(command)
self.matchoutput(out, changetest1_head, command)
def test_125_update_review_cr(self):
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--review_url", "http://review.example.org/changes/1234"]
self.noouttest(command)
def test_126_update_review_testing(self):
changetest1_head = self.head_commit("changetest1")
prod_head = self.head_commit("changetest1", ref="origin/prod")
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", changetest1_head,
"--target_commit_tested", prod_head,
"--testing_url", "http://ci.example.org/builds/5678",
"--testing_succeeded"]
self.noouttest(command)
def test_126_update_review_approval(self):
changetest1_head = self.head_commit("changetest1")
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", changetest1_head,
"--approved"]
self.noouttest(command)
def test_128_show_review(self):
changetest1_head = self.head_commit("changetest1")
prod_head = self.head_commit("changetest1", ref="origin/prod")
command = ["show_review", "--source", "changetest1", "--target", "prod"]
out = self.commandtest(command)
self.output_equals(out, """
Review request
Target Domain: prod
Tested Commit: %s
Source Sandbox: changetest1
Published Commit: %s
Code Review URL: http://review.example.org/changes/1234
Testing URL: http://ci.example.org/builds/5678
Testing Status: Success
Approval Status: Approved
""" % (prod_head, changetest1_head),
command)
def test_128_show_review_csv(self):
changetest1_head = self.head_commit("changetest1")
prod_head = self.head_commit("changetest1", ref="origin/prod")
command = ["show_review", "--source", "changetest1", "--target", "prod",
"--format", "csv"]
out = self.commandtest(command)
self.matchoutput(out,
"prod,changetest1,%s,http://review.example.org/changes/1234,http://ci.example.org/builds/5678,%s,True,True"
% (changetest1_head, prod_head),
command)
def test_129_bad_target_commit_id(self):
changetest1_head = self.head_commit("changetest1")
commit_not_in_templates = "576afd9bd9f620293a9e0e249032be5157ba5d29"
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", changetest1_head, "--testing_failed",
"--target_commit_tested", commit_not_in_templates]
out = self.badrequesttest(command)
self.matchoutput(out, "Domain prod does not contain commit %s." %
commit_not_in_templates, command)
def test_129_stale_testing(self):
changetest1_head = self.head_commit("changetest1")
commit_not_in_templates = "576afd9bd9f620293a9e0e249032be5157ba5d29"
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", commit_not_in_templates,
"--testing_url", "http://ci.example.org/builds/5677",
"--testing_failed"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Possible attempt to update an old review record - "
"the commit being reviewed is %s, not %s." %
(changetest1_head, commit_not_in_templates),
command)
def test_129_short_commit(self):
abbrev_hash_not_in_templates = "576afd9bd9f620"
command = ["update_review", "--source", "changetest1", "--target", "prod",
"--commit_id", abbrev_hash_not_in_templates,
"--testing_url", "http://ci.example.org/builds/5677",
"--testing_failed"]
out = self.badrequesttest(command)
self.matchoutput(out, "Invalid commit ID (%s), make sure to pass the "
"full hash." % abbrev_hash_not_in_templates, command)
def test_130_deploynosync(self):
command = ["deploy", "--source", "changetest1", "--target", "prod",
"--nosync", "--justification", "tcm=12345678",
"--reason", "Just because"]
out = self.statustest(command)
self.matchoutput(out, "Updating the checked out copy of domain prod...",
command)
self.matchclean(out, "ut-prod", command)
self.matchclean(out, "not approved", command)
def test_131_verifydeploylog(self):
kingdir = self.config.get("broker", "kingdir")
command = ["show", "--no-patch", "--format=%B", "prod"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchoutput(out, "User:", command)
self.matchoutput(out, "Request-ID:", command)
self.matchoutput(out, "Justification: tcm=12345678", command)
self.matchoutput(out, "Reason: Just because", command)
self.matchoutput(out,
"Code-Review-URL: http://review.example.org/changes/1234",
command)
self.matchoutput(out,
"Testing-URL: http://ci.example.org/builds/5678",
command)
def test_200_verifynosync(self):
# The change should be in prod...
template = self.find_template("aquilon", "archetype", "base",
domain="prod")
with open(template) as f:
contents = f.readlines()
self.assertEqual(contents[-1], "#Added by unittest\n")
# ...but not in the ut-prod tracking domain.
template = self.find_template("aquilon", "archetype", "base",
domain="ut-prod")
with open(template) as f:
contents = f.readlines()
self.assertNotEqual(contents[-1], "#Added by unittest\n")
def test_210_verifynosynclog(self):
kingdir = self.config.get("broker", "kingdir")
# Note: "prod" is a copy of the real thing so limit the amount of
# history checked to avoid being fooled by real commits
# The change must be in prod...
command = ["show", "--no-patch", "--format=%B", "prod"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchoutput(out, "Justification: tcm=12345678", command)
self.matchoutput(out, "Reason: Just because", command)
# ... but not in ut-prod
command = ["show", "--no-patch", "--format=%B", "ut-prod"]
out, _ = self.gitcommand(command, cwd=kingdir)
self.matchclean(out, "tcm=12345678", command)
def test_300_add_advanced(self):
self.successtest(["add", "sandbox", "--sandbox", "advanced",
"--start", "prod"])
def test_310_deploy_leftbehind(self):
command = ["deploy", "--source", "advanced", "--target", "leftbehind"]
out = self.badrequesttest(command)
self.matchoutput(out,
"You're trying to deploy a sandbox to a domain that "
"does not contain the commit where the sandbox was "
"branched from.",
command)
def test_310_review_leftbehild(self):
command = ["request_review", "--source", "advanced", "--target", "leftbehind"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Domain leftbehind does not contain the commit where "
"sandbox advanced was branched from.",
command)
def test_320_update_leftbehind(self):
command = ["deploy", "--source", "prod", "--target", "leftbehind"]
self.successtest(command)
def test_330_deploy_again(self):
command = ["deploy", "--source", "advanced", "--target", "leftbehind"]
self.successtest(command)
def test_340_cleanup_advanced(self):
self.successtest(["del_sandbox", "--sandbox", "advanced"])
sandboxdir = os.path.join(self.sandboxdir, "advanced")
rmtree(sandboxdir, ignore_errors=True)
def test_800_deploy_utsandbox(self):
# utsandbox contains changes needed to compile test hosts
command = ["deploy", "--source", "utsandbox", "--target", "prod",
"--justification", "tcm=12345678"]
out = self.statustest(command)
for domain in ["prod", "ut-prod", "netinfra"]:
self.matchoutput(out,
"Updating the checked out copy of domain %s..." %
domain, command)
#self.matchoutput(out, "Warning: this deployment request was "
# "not approved", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDeployDomain)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
import numpy as np
from mpi4py import MPI
from mpi4py_fft import PFFT
from pySDC.core.Errors import ParameterError, ProblemError
from pySDC.core.Problem import ptype
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
from mpi4py_fft import newDistArray
class allencahn_imex(ptype):
"""
Example implementing Allen-Cahn equation in 2-3D using mpi4py-fft for solving linear parts, IMEX time-stepping
mpi4py-fft: https://mpi4py-fft.readthedocs.io/en/latest/
Attributes:
fft: fft object
X: grid coordinates in real space
K2: Laplace operator in spectral space
dx: mesh width in x direction
dy: mesh width in y direction
"""
def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: fft data type (will be passed to parent class)
dtype_f: fft data type wuth implicit and explicit parts (will be passed to parent class)
"""
if 'L' not in problem_params:
problem_params['L'] = 1.0
if 'init_type' not in problem_params:
problem_params['init_type'] = 'circle'
if 'comm' not in problem_params:
problem_params['comm'] = None
if 'dw' not in problem_params:
problem_params['dw'] = 0.0
# these parameters will be used later, so assert their existence
essential_keys = ['nvars', 'eps', 'L', 'radius', 'dw', 'spectral']
for key in essential_keys:
if key not in problem_params:
msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))
raise ParameterError(msg)
if not (isinstance(problem_params['nvars'], tuple) and len(problem_params['nvars']) > 1):
raise ProblemError('Need at least two dimensions')
# Creating FFT structure
ndim = len(problem_params['nvars'])
axes = tuple(range(ndim))
self.fft = PFFT(problem_params['comm'], list(problem_params['nvars']), axes=axes, dtype=np.float64,
collapse=True)
# get test data to figure out type and dimensions
tmp_u = newDistArray(self.fft, problem_params['spectral'])
# invoke super init, passing the communicator and the local dimensions as init
super(allencahn_imex, self).__init__(init=(tmp_u.shape, problem_params['comm'], tmp_u.dtype),
dtype_u=dtype_u, dtype_f=dtype_f, params=problem_params)
L = np.array([self.params.L] * ndim, dtype=float)
# get local mesh
X = np.ogrid[self.fft.local_slice(False)]
N = self.fft.global_shape()
for i in range(len(N)):
X[i] = (X[i] * L[i] / N[i])
self.X = [np.broadcast_to(x, self.fft.shape(False)) for x in X]
# get local wavenumbers and Laplace operator
s = self.fft.local_slice()
N = self.fft.global_shape()
k = [np.fft.fftfreq(n, 1. / n).astype(int) for n in N[:-1]]
k.append(np.fft.rfftfreq(N[-1], 1. / N[-1]).astype(int))
K = [ki[si] for ki, si in zip(k, s)]
Ks = np.meshgrid(*K, indexing='ij', sparse=True)
Lp = 2 * np.pi / L
for i in range(ndim):
Ks[i] = (Ks[i] * Lp[i]).astype(float)
K = [np.broadcast_to(k, self.fft.shape(True)) for k in Ks]
K = np.array(K).astype(float)
self.K2 = np.sum(K * K, 0, dtype=float)
# Need this for diagnostics
self.dx = self.params.L / problem_params['nvars'][0]
self.dy = self.params.L / problem_params['nvars'][1]
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
f = self.dtype_f(self.init)
if self.params.spectral:
f.impl = -self.K2 * u
if self.params.eps > 0:
tmp = self.fft.backward(u)
tmpf = - 2.0 / self.params.eps ** 2 * tmp * (1.0 - tmp) * (1.0 - 2.0 * tmp) - \
6.0 * self.params.dw * tmp * (1.0 - tmp)
f.expl[:] = self.fft.forward(tmpf)
else:
u_hat = self.fft.forward(u)
lap_u_hat = -self.K2 * u_hat
f.impl[:] = self.fft.backward(lap_u_hat, f.impl)
if self.params.eps > 0:
f.expl = - 2.0 / self.params.eps ** 2 * u * (1.0 - u) * (1.0 - 2.0 * u) - \
6.0 * self.params.dw * u * (1.0 - u)
return f
def solve_system(self, rhs, factor, u0, t):
"""
Simple FFT solver for the diffusion part
Args:
rhs (dtype_f): right-hand side for the linear system
factor (float) : abbrev. for the node-to-node stepsize (or any other factor required)
u0 (dtype_u): initial guess for the iterative solver (not used here so far)
t (float): current time (e.g. for time-dependent BCs)
Returns:
dtype_u: solution as mesh
"""
if self.params.spectral:
me = rhs / (1.0 + factor * self.K2)
else:
me = self.dtype_u(self.init)
rhs_hat = self.fft.forward(rhs)
rhs_hat /= (1.0 + factor * self.K2)
me[:] = self.fft.backward(rhs_hat)
return me
def u_exact(self, t):
"""
Routine to compute the exact solution at time t
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
assert t == 0, 'ERROR: u_exact only valid for t=0'
me = self.dtype_u(self.init, val=0.0)
if self.params.init_type == 'circle':
r2 = (self.X[0] - 0.5) ** 2 + (self.X[1] - 0.5) ** 2
if self.params.spectral:
tmp = 0.5 * (1.0 + np.tanh((self.params.radius - np.sqrt(r2)) / (np.sqrt(2) * self.params.eps)))
me[:] = self.fft.forward(tmp)
else:
me[:] = 0.5 * (1.0 + np.tanh((self.params.radius - np.sqrt(r2)) / (np.sqrt(2) * self.params.eps)))
elif self.params.init_type == 'circle_rand':
ndim = len(me.shape)
L = int(self.params.L)
# get random radii for circles/spheres
np.random.seed(1)
lbound = 3.0 * self.params.eps
ubound = 0.5 - self.params.eps
rand_radii = (ubound - lbound) * np.random.random_sample(size=tuple([L] * ndim)) + lbound
# distribute circles/spheres
tmp = newDistArray(self.fft, False)
if ndim == 2:
for i in range(0, L):
for j in range(0, L):
# build radius
r2 = (self.X[0] + i - L + 0.5) ** 2 + (self.X[1] + j - L + 0.5) ** 2
# add this blob, shifted by 1 to avoid issues with adding up negative contributions
tmp += np.tanh((rand_radii[i, j] - np.sqrt(r2)) / (np.sqrt(2) * self.params.eps)) + 1
# normalize to [0,1]
tmp *= 0.5
assert np.all(tmp <= 1.0)
if self.params.spectral:
me[:] = self.fft.forward(tmp)
else:
me[:] = tmp[:]
else:
raise NotImplementedError('type of initial value not implemented, got %s' % self.params.init_type)
return me
class allencahn_imex_timeforcing(allencahn_imex):
"""
Example implementing Allen-Cahn equation in 2-3D using mpi4py-fft for solving linear parts, IMEX time-stepping,
time-dependent forcing
"""
def eval_f(self, u, t):
"""
Routine to evaluate the RHS
Args:
u (dtype_u): current values
t (float): current time
Returns:
dtype_f: the RHS
"""
f = self.dtype_f(self.init)
if self.params.spectral:
f.impl = -self.K2 * u
tmp = newDistArray(self.fft, False)
tmp[:] = self.fft.backward(u, tmp)
if self.params.eps > 0:
tmpf = -2.0 / self.params.eps ** 2 * tmp * (1.0 - tmp) * (1.0 - 2.0 * tmp)
else:
tmpf = self.dtype_f(self.init, val=0.0)
# build sum over RHS without driving force
Rt_local = float(np.sum(self.fft.backward(f.impl) + tmpf))
if self.params.comm is not None:
Rt_global = self.params.comm.allreduce(sendobj=Rt_local, op=MPI.SUM)
else:
Rt_global = Rt_local
# build sum over driving force term
Ht_local = float(np.sum(6.0 * tmp * (1.0 - tmp)))
if self.params.comm is not None:
Ht_global = self.params.comm.allreduce(sendobj=Ht_local, op=MPI.SUM)
else:
Ht_global = Rt_local
# add/substract time-dependent driving force
if Ht_global != 0.0:
dw = Rt_global / Ht_global
else:
dw = 0.0
tmpf -= 6.0 * dw * tmp * (1.0 - tmp)
f.expl[:] = self.fft.forward(tmpf)
else:
u_hat = self.fft.forward(u)
lap_u_hat = -self.K2 * u_hat
f.impl[:] = self.fft.backward(lap_u_hat, f.impl)
if self.params.eps > 0:
f.expl = -2.0 / self.params.eps ** 2 * u * (1.0 - u) * (1.0 - 2.0 * u)
# build sum over RHS without driving force
Rt_local = float(np.sum(f.impl + f.expl))
if self.params.comm is not None:
Rt_global = self.params.comm.allreduce(sendobj=Rt_local, op=MPI.SUM)
else:
Rt_global = Rt_local
# build sum over driving force term
Ht_local = float(np.sum(6.0 * u * (1.0 - u)))
if self.params.comm is not None:
Ht_global = self.params.comm.allreduce(sendobj=Ht_local, op=MPI.SUM)
else:
Ht_global = Rt_local
# add/substract time-dependent driving force
if Ht_global != 0.0:
dw = Rt_global / Ht_global
else:
dw = 0.0
f.expl -= 6.0 * dw * u * (1.0 - u)
return f
|
|
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
import logging
from flask import request
from django.db.models.expressions import F
from framework.exceptions import HTTPError
from framework.auth.utils import privacy_info_handle
from framework.auth.decorators import must_be_logged_in
from framework.flask import redirect
from addons.wiki.utils import to_mongo_key
from addons.wiki import settings
from addons.wiki import utils as wiki_utils
from addons.wiki.models import WikiPage, WikiVersion
from osf import features
from website.profile.utils import get_profile_image_url
from website.project.views.node import _view_project
from website.project.model import has_anonymous_link
from website.ember_osf_web.decorators import ember_flag_is_active
from website.project.decorators import (
must_be_contributor_or_public,
must_have_addon, must_not_be_registration,
must_be_valid_project,
must_have_permission,
must_have_write_permission_or_public_wiki,
must_not_be_retracted_registration,
)
from osf.exceptions import ValidationError, NodeStateError
from osf.utils.permissions import ADMIN, WRITE
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
InvalidVersionError,
)
logger = logging.getLogger(__name__)
WIKI_NAME_EMPTY_ERROR = HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be empty.'
))
WIKI_NAME_MAXIMUM_LENGTH_ERROR = HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be more than 100 characters.'
))
WIKI_PAGE_CANNOT_RENAME_ERROR = HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page cannot be renamed.'
))
WIKI_PAGE_CONFLICT_ERROR = HTTPError(http_status.HTTP_409_CONFLICT, data=dict(
message_short='Page conflict',
message_long='A wiki page with that name already exists.'
))
WIKI_PAGE_NOT_FOUND_ERROR = HTTPError(http_status.HTTP_404_NOT_FOUND, data=dict(
message_short='Not found',
message_long='A wiki page could not be found.'
))
WIKI_INVALID_VERSION_ERROR = HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The requested version of this wiki page does not exist.'
))
def _get_wiki_versions(node, name, anonymous=False):
# Skip if wiki_page doesn't exist; happens on new projects before
# default "home" page is created
wiki_page = WikiPage.objects.get_for_node(node, name)
if wiki_page:
versions = wiki_page.get_versions()
else:
return []
return [
{
'version': version.identifier,
'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True),
'date': '{} UTC'.format(version.created.replace(microsecond=0).isoformat().replace('T', ' ')),
}
for version in versions
]
def _get_wiki_pages_latest(node):
return [
{
'name': page.wiki_page.page_name,
'url': node.web_url_for('project_wiki_view', wname=page.wiki_page.page_name, _guid=True),
'wiki_id': page.wiki_page._primary_key,
'wiki_content': _wiki_page_content(page.wiki_page.page_name, node=node)
}
for page in WikiPage.objects.get_wiki_pages_latest(node).order_by(F('name'))
]
def _get_wiki_api_urls(node, name, additional_urls=None):
urls = {
'base': node.api_url_for('project_wiki_home'),
'delete': node.api_url_for('project_wiki_delete', wname=name),
'rename': node.api_url_for('project_wiki_rename', wname=name),
'content': node.api_url_for('wiki_page_content', wname=name),
'settings': node.api_url_for('edit_wiki_settings'),
'grid': node.api_url_for('project_wiki_grid_data', wname=name)
}
if additional_urls:
urls.update(additional_urls)
return urls
def _get_wiki_web_urls(node, key, version=1, additional_urls=None):
urls = {
'base': node.web_url_for('project_wiki_home', _guid=True),
'edit': node.web_url_for('project_wiki_view', wname=key, _guid=True),
'home': node.web_url_for('project_wiki_home', _guid=True),
'page': node.web_url_for('project_wiki_view', wname=key, _guid=True),
}
if additional_urls:
urls.update(additional_urls)
return urls
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_have_addon('wiki', 'node')
def wiki_page_draft(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_version = WikiVersion.objects.get_for_node(node, wname)
return {
'wiki_content': wiki_version.content if wiki_version else None,
'wiki_draft': (wiki_version.get_draft(node) if wiki_version
else wiki_utils.get_sharejs_content(node, wname)),
}
def _wiki_page_content(wname, wver=None, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_version = WikiVersion.objects.get_for_node(node, wname, wver)
return {
'wiki_content': wiki_version.content if wiki_version else '',
'rendered_before_update': wiki_version.rendered_before_update if wiki_version else False
}
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_page_content(wname, wver=None, **kwargs):
return _wiki_page_content(wname, wver=wver, **kwargs)
@must_be_valid_project # injects project
@must_have_permission(WRITE) # injects user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_delete(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = WikiPage.objects.get_for_node(node, wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
if not wiki_page:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
wiki_page.delete(auth)
wiki_utils.broadcast_to_sharejs('delete', sharejs_uuid, node)
return {}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
@must_not_be_retracted_registration
def project_wiki_view(auth, wname, path=None, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
wiki_name = (wname or '').strip()
wiki_key = to_mongo_key(wiki_name)
wiki_page = WikiPage.objects.get_for_node(node, wiki_name)
wiki_version = WikiVersion.objects.get_for_node(node, wiki_name)
wiki_settings = node.get_addon('wiki')
can_edit = (
auth.logged_in and not
node.is_registration and (
node.has_permission(auth.user, WRITE) or
wiki_settings.is_publicly_editable
)
)
versions = _get_wiki_versions(node, wiki_name, anonymous=anonymous)
# Determine panels used in view
panels = {'view', 'edit', 'compare', 'menu'}
if request.args and set(request.args).intersection(panels):
panels_used = [panel for panel in request.args if panel in panels]
num_columns = len(set(panels_used).intersection({'view', 'edit', 'compare'}))
if num_columns == 0:
panels_used.append('view')
num_columns = 1
else:
panels_used = ['view', 'menu']
num_columns = 1
try:
view = wiki_utils.format_wiki_version(
version=request.args.get('view'),
num_versions=len(versions),
allow_preview=True,
)
compare = wiki_utils.format_wiki_version(
version=request.args.get('compare'),
num_versions=len(versions),
allow_preview=False,
)
except InvalidVersionError:
raise WIKI_INVALID_VERSION_ERROR
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_version:
version = wiki_version.identifier
is_current = wiki_version.is_current
content = wiki_version.html(node)
rendered_before_update = wiki_version.rendered_before_update
else:
version = 'NA'
is_current = False
content = ''
rendered_before_update = False
if can_edit:
if wiki_key not in node.wiki_private_uuids:
wiki_utils.generate_private_uuid(node, wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
else:
if not wiki_page and wiki_key != 'home':
raise WIKI_PAGE_NOT_FOUND_ERROR
if 'edit' in request.args:
if wiki_settings.is_publicly_editable:
raise HTTPError(http_status.HTTP_401_UNAUTHORIZED)
if node.can_view(auth):
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True))
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
sharejs_uuid = None
# Opens 'edit' panel when home wiki is empty
if not content and can_edit and wiki_name == 'home':
panels_used.append('edit')
# Default versions for view and compare
version_settings = {
'view': view or ('preview' if 'edit' in panels_used else 'current'),
'compare': compare or 'previous',
}
ret = {
'wiki_id': wiki_page._primary_key if wiki_page else None,
'wiki_name': wiki_page.page_name if wiki_page else wiki_name,
'wiki_content': content,
'rendered_before_update': rendered_before_update,
'page': wiki_page,
'version': version,
'versions': versions,
'sharejs_uuid': sharejs_uuid or '',
'sharejs_url': settings.SHAREJS_URL,
'is_current': is_current,
'version_settings': version_settings,
'pages_current': _get_wiki_pages_latest(node),
'category': node.category,
'panels_used': panels_used,
'num_columns': num_columns,
'urls': {
'api': _get_wiki_api_urls(node, wiki_name, {
'content': node.api_url_for('wiki_page_content', wname=wiki_name),
'draft': node.api_url_for('wiki_page_draft', wname=wiki_name),
}),
'web': _get_wiki_web_urls(node, wiki_name),
'profile_image': get_profile_image_url(auth.user, 25),
},
}
ret.update(_view_project(node, auth, primary=True))
ret['user']['can_edit_wiki_body'] = can_edit
return ret
@must_be_valid_project # injects node or project
@must_have_write_permission_or_public_wiki # injects user
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit_post(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_version = WikiVersion.objects.get_for_node(node, wiki_name)
redirect_url = node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True)
form_wiki_content = request.form['content']
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_version:
# Only update wiki if content has changed
if form_wiki_content != wiki_version.content:
wiki_version.wiki_page.update(auth.user, form_wiki_content)
ret = {'status': 'success'}
else:
ret = {'status': 'unmodified'}
else:
# Create a wiki
WikiPage.objects.create_for_node(node, wiki_name, form_wiki_content, auth)
ret = {'status': 'success'}
return ret, http_status.HTTP_302_FOUND, None, redirect_url
@must_be_valid_project # injects node or project
@must_have_permission(ADMIN)
@must_not_be_registration
@must_have_addon('wiki', 'node')
def edit_wiki_settings(node, auth, **kwargs):
wiki_settings = node.get_addon('wiki')
permissions = request.get_json().get('permission', None)
if not wiki_settings:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Cannot change wiki settings without a wiki'
))
if permissions == 'public':
permissions = True
elif permissions == 'private':
permissions = False
else:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Permissions flag used is incorrect.'
))
try:
wiki_settings.set_editing(permissions, auth, log=True)
except NodeStateError as e:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=str(e)
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_logged_in
@must_be_valid_project
def get_node_wiki_permissions(node, auth, **kwargs):
return wiki_utils.serialize_wiki_settings(auth.user, [node])
@must_be_valid_project
@must_have_addon('wiki', 'node')
@ember_flag_is_active(features.EMBER_PROJECT_WIKI)
def project_wiki_home(**kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname='home', _guid=True))
@must_be_valid_project # injects project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_id_page(auth, wid, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki = WikiPage.objects.get_for_node(node, id=wid)
if wiki:
return redirect(node.web_url_for('project_wiki_view', wname=wiki.page_name, _guid=True))
else:
raise WIKI_PAGE_NOT_FOUND_ERROR
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?edit&view&menu')
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_compare(wname, wver, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?view&compare={0}&menu'.format(wver))
@must_not_be_registration
@must_have_permission(WRITE)
@must_have_addon('wiki', 'node')
def project_wiki_rename(auth, wname, **kwargs):
"""View that handles user the X-editable input for wiki page renaming.
:param wname: The target wiki page name.
:param-json value: The new wiki page name.
"""
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
new_wiki_name = request.get_json().get('value', None)
wiki_page = WikiPage.objects.get_for_node(node, wiki_name)
if not wiki_page:
raise WIKI_PAGE_NOT_FOUND_ERROR
try:
wiki_page.rename(new_wiki_name, auth)
except NameEmptyError:
raise WIKI_NAME_EMPTY_ERROR
except NameInvalidError as error:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid name',
message_long=error.args[0]
))
except NameMaximumLengthError:
raise WIKI_NAME_MAXIMUM_LENGTH_ERROR
except PageCannotRenameError:
raise WIKI_PAGE_CANNOT_RENAME_ERROR
except PageConflictError:
raise WIKI_PAGE_CONFLICT_ERROR
except PageNotFoundError:
raise WIKI_PAGE_NOT_FOUND_ERROR
except ValidationError as err:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long=err.messages[0]
))
else:
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, new_wiki_name)
wiki_utils.broadcast_to_sharejs('redirect', sharejs_uuid, node, new_wiki_name)
@must_be_valid_project # returns project
@must_have_permission(WRITE) # returns user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_validate_name(wname, auth, node, **kwargs):
wiki_name = wname.strip()
wiki = WikiPage.objects.get_for_node(node, wiki_name)
if wiki or wiki_name.lower() == 'home':
raise HTTPError(http_status.HTTP_409_CONFLICT, data=dict(
message_short='Wiki page name conflict.',
message_long='A wiki page with that name already exists.'
))
else:
WikiPage.objects.create_for_node(node, wiki_name, '', auth)
return {'message': wiki_name}
@must_be_valid_project
@must_be_contributor_or_public
def project_wiki_grid_data(auth, node, **kwargs):
pages = []
project_wiki_pages = {
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_project_wiki_pages(node, auth)
}
pages.append(project_wiki_pages)
component_wiki_pages = {
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_component_wiki_pages(node, auth)
}
if len(component_wiki_pages['children']) > 0:
pages.append(component_wiki_pages)
return pages
def format_home_wiki_page(node):
home_wiki = WikiPage.objects.get_for_node(node, 'home')
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
if home_wiki:
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_wiki._primary_key,
}
}
return home_wiki_page
def format_project_wiki_pages(node, auth):
pages = []
can_edit = node.has_permission(auth.user, WRITE) and not node.is_registration
project_wiki_pages = _get_wiki_pages_latest(node)
home_wiki_page = format_home_wiki_page(node)
pages.append(home_wiki_page)
for wiki_page in project_wiki_pages:
if wiki_page['name'] != 'home':
has_content = bool(wiki_page['wiki_content'].get('wiki_content'))
page = {
'page': {
'url': wiki_page['url'],
'name': wiki_page['name'],
'id': wiki_page['wiki_id'],
}
}
if can_edit or has_content:
pages.append(page)
return pages
def format_component_wiki_pages(node, auth):
pages = []
for node in node.get_nodes(is_deleted=False):
if any([not node.can_view(auth),
not node.has_addon('wiki')]):
continue
else:
serialized = serialize_component_wiki(node, auth)
if serialized:
pages.append(serialized)
return pages
def serialize_component_wiki(node, auth):
children = []
url = node.web_url_for('project_wiki_view', wname='home', _guid=True)
home_has_content = bool(_wiki_page_content('home', node=node).get('wiki_content'))
component_home_wiki = {
'page': {
'url': url,
'name': 'Home',
# Handle pointers
'id': node._id
}
}
can_edit = node.has_permission(auth.user, WRITE) and not node.is_registration
if can_edit or home_has_content:
children.append(component_home_wiki)
for page in _get_wiki_pages_latest(node):
if page['name'] != 'home':
has_content = bool(page['wiki_content'].get('wiki_content'))
component_page = {
'page': {
'url': page['url'],
'name': page['name'],
'id': page['wiki_id'],
}
}
if can_edit or has_content:
children.append(component_page)
if len(children) > 0:
component = {
'page': {
'name': node.title,
'url': url,
},
'kind': 'component',
'category': node.category,
'pointer': not node.primary,
'children': children,
}
return component
return None
|
|
"""Tests formatting as writer-agnostic ExcelCells
ExcelFormatter is tested implicitly in pandas/tests/io/test_excel.py
"""
import pytest
import pandas.util.testing as tm
from pandas.io.formats.css import CSSWarning
from pandas.io.formats.excel import CSSToExcelConverter
@pytest.mark.parametrize('css,expected', [
# FONT
# - name
('font-family: foo,bar', {'font': {'name': 'foo'}}),
('font-family: "foo bar",baz', {'font': {'name': 'foo bar'}}),
('font-family: foo,\nbar', {'font': {'name': 'foo'}}),
('font-family: foo, bar, baz', {'font': {'name': 'foo'}}),
('font-family: bar, foo', {'font': {'name': 'bar'}}),
('font-family: \'foo bar\', baz', {'font': {'name': 'foo bar'}}),
('font-family: \'foo \\\'bar\', baz', {'font': {'name': 'foo \'bar'}}),
('font-family: "foo \\"bar", baz', {'font': {'name': 'foo "bar'}}),
('font-family: "foo ,bar", baz', {'font': {'name': 'foo ,bar'}}),
# - family
('font-family: serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: Serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: roman, serif', {'font': {'name': 'roman', 'family': 1}}),
('font-family: roman, sans-serif', {'font': {'name': 'roman',
'family': 2}}),
('font-family: roman, sans serif', {'font': {'name': 'roman'}}),
('font-family: roman, sansserif', {'font': {'name': 'roman'}}),
('font-family: roman, cursive', {'font': {'name': 'roman', 'family': 4}}),
('font-family: roman, fantasy', {'font': {'name': 'roman', 'family': 5}}),
# - size
('font-size: 1em', {'font': {'size': 12}}),
('font-size: xx-small', {'font': {'size': 6}}),
('font-size: x-small', {'font': {'size': 7.5}}),
('font-size: small', {'font': {'size': 9.6}}),
('font-size: medium', {'font': {'size': 12}}),
('font-size: large', {'font': {'size': 13.5}}),
('font-size: x-large', {'font': {'size': 18}}),
('font-size: xx-large', {'font': {'size': 24}}),
('font-size: 50%', {'font': {'size': 6}}),
# - bold
('font-weight: 100', {'font': {'bold': False}}),
('font-weight: 200', {'font': {'bold': False}}),
('font-weight: 300', {'font': {'bold': False}}),
('font-weight: 400', {'font': {'bold': False}}),
('font-weight: normal', {'font': {'bold': False}}),
('font-weight: lighter', {'font': {'bold': False}}),
('font-weight: bold', {'font': {'bold': True}}),
('font-weight: bolder', {'font': {'bold': True}}),
('font-weight: 700', {'font': {'bold': True}}),
('font-weight: 800', {'font': {'bold': True}}),
('font-weight: 900', {'font': {'bold': True}}),
# - italic
('font-style: italic', {'font': {'italic': True}}),
('font-style: oblique', {'font': {'italic': True}}),
# - underline
('text-decoration: underline',
{'font': {'underline': 'single'}}),
('text-decoration: overline',
{}),
('text-decoration: none',
{}),
# - strike
('text-decoration: line-through',
{'font': {'strike': True}}),
('text-decoration: underline line-through',
{'font': {'strike': True, 'underline': 'single'}}),
('text-decoration: underline; text-decoration: line-through',
{'font': {'strike': True}}),
# - color
('color: red', {'font': {'color': 'FF0000'}}),
('color: #ff0000', {'font': {'color': 'FF0000'}}),
('color: #f0a', {'font': {'color': 'FF00AA'}}),
# - shadow
('text-shadow: none', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #CCC', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #999', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px', {'font': {'shadow': False}}),
('text-shadow: 2px -0em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -2em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px', {'font': {'shadow': True}}),
('text-shadow: 0px -2em', {'font': {'shadow': True}}),
# FILL
# - color, fillType
('background-color: red', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #ff0000', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #f0a', {'fill': {'fgColor': 'FF00AA',
'patternType': 'solid'}}),
# BORDER
# - style
('border-style: solid',
{'border': {'top': {'style': 'medium'},
'bottom': {'style': 'medium'},
'left': {'style': 'medium'},
'right': {'style': 'medium'}}}),
('border-style: solid; border-width: thin',
{'border': {'top': {'style': 'thin'},
'bottom': {'style': 'thin'},
'left': {'style': 'thin'},
'right': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: thin',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: 1pt',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: medium',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: 2pt',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: thick',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: solid; border-top-width: 4pt',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: dotted',
{'border': {'top': {'style': 'mediumDashDotDot'}}}),
('border-top-style: dotted; border-top-width: thin',
{'border': {'top': {'style': 'dotted'}}}),
('border-top-style: dashed',
{'border': {'top': {'style': 'mediumDashed'}}}),
('border-top-style: dashed; border-top-width: thin',
{'border': {'top': {'style': 'dashed'}}}),
('border-top-style: double',
{'border': {'top': {'style': 'double'}}}),
# - color
('border-style: solid; border-color: #0000ff',
{'border': {'top': {'style': 'medium', 'color': '0000FF'},
'right': {'style': 'medium', 'color': '0000FF'},
'bottom': {'style': 'medium', 'color': '0000FF'},
'left': {'style': 'medium', 'color': '0000FF'}}}),
('border-top-style: double; border-top-color: blue',
{'border': {'top': {'style': 'double', 'color': '0000FF'}}}),
('border-top-style: solid; border-top-color: #06c',
{'border': {'top': {'style': 'medium', 'color': '0066CC'}}}),
# ALIGNMENT
# - horizontal
('text-align: center',
{'alignment': {'horizontal': 'center'}}),
('text-align: left',
{'alignment': {'horizontal': 'left'}}),
('text-align: right',
{'alignment': {'horizontal': 'right'}}),
('text-align: justify',
{'alignment': {'horizontal': 'justify'}}),
# - vertical
('vertical-align: top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: text-top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: middle',
{'alignment': {'vertical': 'center'}}),
('vertical-align: bottom',
{'alignment': {'vertical': 'bottom'}}),
('vertical-align: text-bottom',
{'alignment': {'vertical': 'bottom'}}),
# - wrap_text
('white-space: nowrap',
{'alignment': {'wrap_text': False}}),
('white-space: pre',
{'alignment': {'wrap_text': False}}),
('white-space: pre-line',
{'alignment': {'wrap_text': False}}),
('white-space: normal',
{'alignment': {'wrap_text': True}}),
# NUMBER FORMAT
('number-format: 0%',
{'number_format': {'format_code': '0%'}}),
])
def test_css_to_excel(css, expected):
convert = CSSToExcelConverter()
assert expected == convert(css)
def test_css_to_excel_multiple():
convert = CSSToExcelConverter()
actual = convert('''
font-weight: bold;
text-decoration: underline;
color: red;
border-width: thin;
text-align: center;
vertical-align: top;
unused: something;
''')
assert {"font": {"bold": True, "underline": "single", "color": "FF0000"},
"border": {"top": {"style": "thin"},
"right": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"}},
"alignment": {"horizontal": "center",
"vertical": "top"}} == actual
@pytest.mark.parametrize('css,inherited,expected', [
('font-weight: bold', '',
{'font': {'bold': True}}),
('', 'font-weight: bold',
{'font': {'bold': True}}),
('font-weight: bold', 'font-style: italic',
{'font': {'bold': True, 'italic': True}}),
('font-style: normal', 'font-style: italic',
{'font': {'italic': False}}),
('font-style: inherit', '', {}),
('font-style: normal; font-style: inherit', 'font-style: italic',
{'font': {'italic': True}}),
])
def test_css_to_excel_inherited(css, inherited, expected):
convert = CSSToExcelConverter(inherited)
assert expected == convert(css)
@pytest.mark.parametrize("input_color,output_color", (
[(name, rgb) for name, rgb in CSSToExcelConverter.NAMED_COLORS.items()] +
[("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()] +
[("#F0F", "FF00FF"), ("#ABC", "AABBCC")])
)
def test_css_to_excel_good_colors(input_color, output_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
expected["fill"] = {
"patternType": "solid",
"fgColor": output_color
}
expected["font"] = {
"color": output_color
}
expected["border"] = {
k: {
"color": output_color,
} for k in ("top", "right", "bottom", "left")
}
with tm.assert_produces_warning(None):
convert = CSSToExcelConverter()
assert expected == convert(css)
@pytest.mark.parametrize("input_color", [None, "not-a-color"])
def test_css_to_excel_bad_colors(input_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
if input_color is not None:
expected["fill"] = {
"patternType": "solid"
}
with tm.assert_produces_warning(CSSWarning):
convert = CSSToExcelConverter()
assert expected == convert(css)
|
|
'''
HTML tag classes.
'''
__license__ = '''
This file is part of Dominate.
Dominate is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
Dominate is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with Dominate. If not, see
<http://www.gnu.org/licenses/>.
'''
from .dom_tag import dom_tag, attr
from .dom1core import dom1core
try:
basestring = basestring
except NameError: # py3
basestring = str
unicode = str
underscored_classes = set(['del', 'input', 'map', 'object'])
# Tag attributes
_ATTR_GLOBAL = set([
'accesskey', 'class', 'class', 'contenteditable', 'contextmenu', 'dir',
'draggable', 'id', 'item', 'hidden', 'lang', 'itemprop', 'spellcheck',
'style', 'subject', 'tabindex', 'title'
])
_ATTR_EVENTS = set([
'onabort', 'onblur', 'oncanplay', 'oncanplaythrough', 'onchange', 'onclick',
'oncontextmenu', 'ondblclick', 'ondrag', 'ondragend', 'ondragenter',
'ondragleave', 'ondragover', 'ondragstart', 'ondrop', 'ondurationchange',
'onemptied', 'onended', 'onerror', 'onfocus', 'onformchange', 'onforminput',
'oninput', 'oninvalid', 'onkeydown', 'onkeypress', 'onkeyup', 'onload',
'onloadeddata', 'onloadedmetadata', 'onloadstart', 'onmousedown',
'onmousemove', 'onmouseout', 'onmouseover', 'onmouseup', 'onmousewheel',
'onpause', 'onplay', 'onplaying', 'onprogress', 'onratechange',
'onreadystatechange', 'onscroll', 'onseeked', 'onseeking', 'onselect',
'onshow', 'onstalled', 'onsubmit', 'onsuspend', 'ontimeupdate',
'onvolumechange', 'onwaiting'
])
ERR_ATTRIBUTE = 'attributes'
ERR_CONTEXT = 'context'
ERR_CONTENT = 'content'
class html_tag(dom_tag, dom1core):
def __init__(self, *args, **kwargs):
'''
Creates a new html tag instance.
'''
super(html_tag, self).__init__(*args, **kwargs)
# def validate(self):
# '''
# Validate the tag. This will check the attributes, context, and contents and
# emit tuples in the form of: element, message.
# '''
# errors = []
# errors.extend(self.validate_attributes())
# errors.extend(self.validate_context())
# errors.extend(self.validate_content())
# return errors
# def validate_attributes(self):
# '''
# Validate the tag attributes.
# '''
# return []
# def validate_context(self):
# '''
# Validate the tag context.
# '''
# return []
# def validate_content(self):
# '''
# Validate the content of the tag.
# '''
# return []
# def _check_attributes(self, *attrs):
# valid = set([])
# for attr in attrs:
# if hasattr(attr, '__iter__'):
# valid |= set(attr)
# else:
# valid.add(attr)
# return set(list(self.attributes.iterkeys())) - valid
################################################################################
############################### Html Tag Classes ###############################
################################################################################
# Root element
class html(html_tag):
'''
The html element represents the root of an HTML document.
'''
pass
# def validate_attributes(self):
# errors = []
# for invalid in self._check_attributes(_ATTR_GLOBAL, 'manifest'):
# errors.append( (self, ERR_ATTRIBUTE, 'Invalid attribute: "%s"' % invalid) )
# return errors
# def validate_context(self):
# if self.parent is not None and not isinstance(self.parent, iframe):
# return [(self, ERR_CONTEXT, 'Must be root element or child of an <iframe>')]
# return []
# def validate_content(self):
# if len(self) != 2 or not isinstance(self[0], head) or not isinstance(self[1], body):
# return [(self, ERR_CONTENT, 'Children must be <head> and then <body>.')]
# return []
# Document metadata
class head(html_tag):
'''
The head element represents a collection of metadata for the document.
'''
pass
class title(html_tag):
'''
The title element represents the document's title or name. Authors should use
titles that identify their documents even when they are used out of context,
for example in a user's history or bookmarks, or in search results. The
document's title is often different from its first heading, since the first
heading does not have to stand alone when taken out of context.
'''
def _get_text(self):
return u''.join(self.get(basestring))
def _set_text(self, text):
self.clear()
self.add(text)
text = property(_get_text, _set_text)
class base(html_tag):
'''
The base element allows authors to specify the document base URL for the
purposes of resolving relative URLs, and the name of the default browsing
context for the purposes of following hyperlinks. The element does not
represent any content beyond this information.
'''
is_single = True
class link(html_tag):
'''
The link element allows authors to link their document to other resources.
'''
is_single = True
class meta(html_tag):
'''
The meta element represents various kinds of metadata that cannot be
expressed using the title, base, link, style, and script elements.
'''
is_single = True
class style(html_tag):
'''
The style element allows authors to embed style information in their
documents. The style element is one of several inputs to the styling
processing model. The element does not represent content for the user.
'''
is_pretty = False
# Scripting
class script(html_tag):
'''
The script element allows authors to include dynamic script and data blocks
in their documents. The element does not represent content for the user.
'''
is_pretty = False
class noscript(html_tag):
'''
The noscript element represents nothing if scripting is enabled, and
represents its children if scripting is disabled. It is used to present
different markup to user agents that support scripting and those that don't
support scripting, by affecting how the document is parsed.
'''
pass
# Sections
class body(html_tag):
'''
The body element represents the main content of the document.
'''
pass
class main(html_tag):
'''
The main content area of a document includes content that is unique to that
document and excludes content that is repeated across a set of documents such
as site navigation links, copyright information, site logos and banners and
search forms (unless the document or application's main function is that of a
search form).
'''
class section(html_tag):
'''
The section element represents a generic section of a document or
application. A section, in this context, is a thematic grouping of content,
typically with a heading.
'''
pass
class nav(html_tag):
'''
The nav element represents a section of a page that links to other pages or
to parts within the page: a section with navigation links.
'''
pass
class article(html_tag):
'''
The article element represents a self-contained composition in a document,
page, application, or site and that is, in principle, independently
distributable or reusable, e.g. in syndication. This could be a forum post, a
magazine or newspaper article, a blog entry, a user-submitted comment, an
interactive widget or gadget, or any other independent item of content.
'''
pass
class aside(html_tag):
'''
The aside element represents a section of a page that consists of content
that is tangentially related to the content around the aside element, and
which could be considered separate from that content. Such sections are
often represented as sidebars in printed typography.
'''
pass
class h1(html_tag):
'''
Represents the highest ranking heading.
'''
pass
class h2(html_tag):
'''
Represents the second-highest ranking heading.
'''
pass
class h3(html_tag):
'''
Represents the third-highest ranking heading.
'''
pass
class h4(html_tag):
'''
Represents the fourth-highest ranking heading.
'''
pass
class h5(html_tag):
'''
Represents the fifth-highest ranking heading.
'''
pass
class h6(html_tag):
'''
Represents the sixth-highest ranking heading.
'''
pass
class hgroup(html_tag):
'''
The hgroup element represents the heading of a section. The element is used
to group a set of h1-h6 elements when the heading has multiple levels, such
as subheadings, alternative titles, or taglines.
'''
pass
class header(html_tag):
'''
The header element represents a group of introductory or navigational aids.
'''
pass
class footer(html_tag):
'''
The footer element represents a footer for its nearest ancestor sectioning
content or sectioning root element. A footer typically contains information
about its section such as who wrote it, links to related documents,
copyright data, and the like.
'''
pass
class address(html_tag):
'''
The address element represents the contact information for its nearest
article or body element ancestor. If that is the body element, then the
contact information applies to the document as a whole.
'''
pass
# Grouping content
class p(html_tag):
'''
The p element represents a paragraph.
'''
pass
class hr(html_tag):
'''
The hr element represents a paragraph-level thematic break, e.g. a scene
change in a story, or a transition to another topic within a section of a
reference book.
'''
is_single = True
class pre(html_tag):
'''
The pre element represents a block of preformatted text, in which structure
is represented by typographic conventions rather than by elements.
'''
is_pretty = False
class blockquote(html_tag):
'''
The blockquote element represents a section that is quoted from another
source.
'''
pass
class ol(html_tag):
'''
The ol element represents a list of items, where the items have been
intentionally ordered, such that changing the order would change the
meaning of the document.
'''
pass
class ul(html_tag):
'''
The ul element represents a list of items, where the order of the items is
not important - that is, where changing the order would not materially change
the meaning of the document.
'''
pass
class li(html_tag):
'''
The li element represents a list item. If its parent element is an ol, ul, or
menu element, then the element is an item of the parent element's list, as
defined for those elements. Otherwise, the list item has no defined
list-related relationship to any other li element.
'''
pass
class dl(html_tag):
'''
The dl element represents an association list consisting of zero or more
name-value groups (a description list). Each group must consist of one or
more names (dt elements) followed by one or more values (dd elements).
Within a single dl element, there should not be more than one dt element for
each name.
'''
pass
class dt(html_tag):
'''
The dt element represents the term, or name, part of a term-description group
in a description list (dl element).
'''
pass
class dd(html_tag):
'''
The dd element represents the description, definition, or value, part of a
term-description group in a description list (dl element).
'''
pass
class figure(html_tag):
'''
The figure element represents some flow content, optionally with a caption,
that is self-contained and is typically referenced as a single unit from the
main flow of the document.
'''
pass
class figcaption(html_tag):
'''
The figcaption element represents a caption or legend for the rest of the
contents of the figcaption element's parent figure element, if any.
'''
pass
class div(html_tag):
'''
The div element has no special meaning at all. It represents its children. It
can be used with the class, lang, and title attributes to mark up semantics
common to a group of consecutive elements.
'''
pass
# Text semantics
class a(html_tag):
'''
If the a element has an href attribute, then it represents a hyperlink (a
hypertext anchor).
If the a element has no href attribute, then the element represents a
placeholder for where a link might otherwise have been placed, if it had been
relevant.
'''
pass
class em(html_tag):
'''
The em element represents stress emphasis of its contents.
'''
pass
class strong(html_tag):
'''
The strong element represents strong importance for its contents.
'''
pass
class small(html_tag):
'''
The small element represents side comments such as small print.
'''
pass
class s(html_tag):
'''
The s element represents contents that are no longer accurate or no longer
relevant.
'''
pass
class cite(html_tag):
'''
The cite element represents the title of a work (e.g. a book, a paper, an
essay, a poem, a score, a song, a script, a film, a TV show, a game, a
sculpture, a painting, a theatre production, a play, an opera, a musical, an
exhibition, a legal case report, etc). This can be a work that is being
quoted or referenced in detail (i.e. a citation), or it can just be a work
that is mentioned in passing.
'''
pass
class q(html_tag):
'''
The q element represents some phrasing content quoted from another source.
'''
pass
class dfn(html_tag):
'''
The dfn element represents the defining instance of a term. The paragraph,
description list group, or section that is the nearest ancestor of the dfn
element must also contain the definition(s) for the term given by the dfn
element.
'''
pass
class abbr(html_tag):
'''
The abbr element represents an abbreviation or acronym, optionally with its
expansion. The title attribute may be used to provide an expansion of the
abbreviation. The attribute, if specified, must contain an expansion of the
abbreviation, and nothing else.
'''
pass
class time_(html_tag):
'''
The time element represents either a time on a 24 hour clock, or a precise
date in the proleptic Gregorian calendar, optionally with a time and a
time-zone offset.
'''
pass
_time = time_
class code(html_tag):
'''
The code element represents a fragment of computer code. This could be an XML
element name, a filename, a computer program, or any other string that a
computer would recognize.
'''
pass
class var(html_tag):
'''
The var element represents a variable. This could be an actual variable in a
mathematical expression or programming context, an identifier representing a
constant, a function parameter, or just be a term used as a placeholder in
prose.
'''
pass
class samp(html_tag):
'''
The samp element represents (sample) output from a program or computing
system.
'''
pass
class kbd(html_tag):
'''
The kbd element represents user input (typically keyboard input, although it
may also be used to represent other input, such as voice commands).
'''
pass
class sub(html_tag):
'''
The sub element represents a subscript.
'''
pass
class sup(html_tag):
'''
The sup element represents a superscript.
'''
pass
class i(html_tag):
'''
The i element represents a span of text in an alternate voice or mood, or
otherwise offset from the normal prose in a manner indicating a different
quality of text, such as a taxonomic designation, a technical term, an
idiomatic phrase from another language, a thought, or a ship name in Western
texts.
'''
pass
class b(html_tag):
'''
The b element represents a span of text to which attention is being drawn for
utilitarian purposes without conveying any extra importance and with no
implication of an alternate voice or mood, such as key words in a document
abstract, product names in a review, actionable words in interactive
text-driven software, or an article lede.
'''
pass
class u(html_tag):
'''
The u element represents a span of text with an unarticulated, though
explicitly rendered, non-textual annotation, such as labeling the text as
being a proper name in Chinese text (a Chinese proper name mark), or
labeling the text as being misspelt.
'''
pass
class mark(html_tag):
'''
The mark element represents a run of text in one document marked or
highlighted for reference purposes, due to its relevance in another context.
When used in a quotation or other block of text referred to from the prose,
it indicates a highlight that was not originally present but which has been
added to bring the reader's attention to a part of the text that might not
have been considered important by the original author when the block was
originally written, but which is now under previously unexpected scrutiny.
When used in the main prose of a document, it indicates a part of the
document that has been highlighted due to its likely relevance to the user's
current activity.
'''
pass
class ruby(html_tag):
'''
The ruby element allows one or more spans of phrasing content to be marked
with ruby annotations. Ruby annotations are short runs of text presented
alongside base text, primarily used in East Asian typography as a guide for
pronunciation or to include other annotations. In Japanese, this form of
typography is also known as furigana.
'''
pass
class rt(html_tag):
'''
The rt element marks the ruby text component of a ruby annotation.
'''
pass
class rp(html_tag):
'''
The rp element can be used to provide parentheses around a ruby text
component of a ruby annotation, to be shown by user agents that don't support
ruby annotations.
'''
pass
class bdi(html_tag):
'''
The bdi element represents a span of text that is to be isolated from its
surroundings for the purposes of bidirectional text formatting.
'''
pass
class bdo(html_tag):
'''
The bdo element represents explicit text directionality formatting control
for its children. It allows authors to override the Unicode bidirectional
algorithm by explicitly specifying a direction override.
'''
pass
class span(html_tag):
'''
The span element doesn't mean anything on its own, but can be useful when
used together with the global attributes, e.g. class, lang, or dir. It
represents its children.
'''
pass
class br(html_tag):
'''
The br element represents a line break.
'''
is_single = True
is_inline = True
class wbr(html_tag):
'''
The wbr element represents a line break opportunity.
'''
is_single = True
is_inline = True
# Edits
class ins(html_tag):
'''
The ins element represents an addition to the document.
'''
pass
class del_(html_tag):
'''
The del element represents a removal from the document.
'''
pass
# Embedded content
class img(html_tag):
'''
An img element represents an image.
'''
is_single = True
class iframe(html_tag):
'''
The iframe element represents a nested browsing context.
'''
pass
class embed(html_tag):
'''
The embed element represents an integration point for an external (typically
non-HTML) application or interactive content.
'''
is_single = True
class object_(html_tag):
'''
The object element can represent an external resource, which, depending on
the type of the resource, will either be treated as an image, as a nested
browsing context, or as an external resource to be processed by a plugin.
'''
pass
_object = object_
class param(html_tag):
'''
The param element defines parameters for plugins invoked by object elements.
It does not represent anything on its own.
'''
is_single = True
class video(html_tag):
'''
A video element is used for playing videos or movies, and audio files with
captions.
'''
pass
class audio(html_tag):
'''
An audio element represents a sound or audio stream.
'''
pass
class source(html_tag):
'''
The source element allows authors to specify multiple alternative media
resources for media elements. It does not represent anything on its own.
'''
is_single = True
class track(html_tag):
'''
The track element allows authors to specify explicit external timed text
tracks for media elements. It does not represent anything on its own.
'''
is_single = True
class canvas(html_tag):
'''
The canvas element provides scripts with a resolution-dependent bitmap
canvas, which can be used for rendering graphs, game graphics, or other
visual images on the fly.
'''
pass
class map_(html_tag):
'''
The map element, in conjunction with any area element descendants, defines an
image map. The element represents its children.
'''
pass
class area(html_tag):
'''
The area element represents either a hyperlink with some text and a
corresponding area on an image map, or a dead area on an image map.
'''
is_single = True
# Tabular data
class table(html_tag):
'''
The table element represents data with more than one dimension, in the form
of a table.
'''
pass
class caption(html_tag):
'''
The caption element represents the title of the table that is its parent, if
it has a parent and that is a table element.
'''
pass
class colgroup(html_tag):
'''
The colgroup element represents a group of one or more columns in the table
that is its parent, if it has a parent and that is a table element.
'''
pass
class col(html_tag):
'''
If a col element has a parent and that is a colgroup element that itself has
a parent that is a table element, then the col element represents one or more
columns in the column group represented by that colgroup.
'''
is_single = True
class tbody(html_tag):
'''
The tbody element represents a block of rows that consist of a body of data
for the parent table element, if the tbody element has a parent and it is a
table.
'''
pass
class thead(html_tag):
'''
The thead element represents the block of rows that consist of the column
labels (headers) for the parent table element, if the thead element has a
parent and it is a table.
'''
pass
class tfoot(html_tag):
'''
The tfoot element represents the block of rows that consist of the column
summaries (footers) for the parent table element, if the tfoot element has a
parent and it is a table.
'''
pass
class tr(html_tag):
'''
The tr element represents a row of cells in a table.
'''
pass
class td(html_tag):
'''
The td element represents a data cell in a table.
'''
pass
class th(html_tag):
'''
The th element represents a header cell in a table.
'''
pass
# Forms
class form(html_tag):
'''
The form element represents a collection of form-associated elements, some of
which can represent editable values that can be submitted to a server for
processing.
'''
pass
class fieldset(html_tag):
'''
The fieldset element represents a set of form controls optionally grouped
under a common name.
'''
pass
class legend(html_tag):
'''
The legend element represents a caption for the rest of the contents of the
legend element's parent fieldset element, if any.
'''
pass
class label(html_tag):
'''
The label represents a caption in a user interface. The caption can be
associated with a specific form control, known as the label element's labeled
control, either using for attribute, or by putting the form control inside
the label element itself.
'''
pass
class input_(html_tag):
'''
The input element represents a typed data field, usually with a form control
to allow the user to edit the data.
'''
is_single = True
input = _input = input_
class button(html_tag):
'''
The button element represents a button. If the element is not disabled, then
the user agent should allow the user to activate the button.
'''
pass
class select(html_tag):
'''
The select element represents a control for selecting amongst a set of
options.
'''
pass
class datalist(html_tag):
'''
The datalist element represents a set of option elements that represent
predefined options for other controls. The contents of the element represents
fallback content for legacy user agents, intermixed with option elements that
represent the predefined options. In the rendering, the datalist element
represents nothing and it, along with its children, should be hidden.
'''
pass
class optgroup(html_tag):
'''
The optgroup element represents a group of option elements with a common
label.
'''
pass
class option(html_tag):
'''
The option element represents an option in a select element or as part of a
list of suggestions in a datalist element.
'''
pass
class textarea(html_tag):
'''
The textarea element represents a multiline plain text edit control for the
element's raw value. The contents of the control represent the control's
default value.
'''
pass
class keygen(html_tag):
'''
The keygen element represents a key pair generator control. When the
control's form is submitted, the private key is stored in the local keystore,
and the public key is packaged and sent to the server.
'''
is_single = True
class output(html_tag):
'''
The output element represents the result of a calculation.
'''
pass
class progress(html_tag):
'''
The progress element represents the completion progress of a task. The
progress is either indeterminate, indicating that progress is being made but
that it is not clear how much more work remains to be done before the task is
complete (e.g. because the task is waiting for a remote host to respond), or
the progress is a number in the range zero to a maximum, giving the fraction
of work that has so far been completed.
'''
pass
class meter(html_tag):
'''
The meter element represents a scalar measurement within a known range, or a
fractional value; for example disk usage, the relevance of a query result, or
the fraction of a voting population to have selected a particular candidate.
'''
pass
# Interactive elements
class details(html_tag):
'''
The details element represents a disclosure widget from which the user can
obtain additional information or controls.
'''
pass
class summary(html_tag):
'''
The summary element represents a summary, caption, or legend for the rest of
the contents of the summary element's parent details element, if any.
'''
pass
class command(html_tag):
'''
The command element represents a command that the user can invoke.
'''
is_single = True
class menu(html_tag):
'''
The menu element represents a list of commands.
'''
pass
class font(html_tag):
'''
The font element represents the font in a html .
'''
pass
# Additional markup
class comment(html_tag):
'''
Normal, one-line comment:
>>> print comment("Hello, comments!")
<!--Hello, comments!-->
For IE's "if" statement comments:
>>> print comment(p("Upgrade your browser."), condition='lt IE6')
<!--[if lt IE6]><p>Upgrade your browser.</p><![endif]-->
Downlevel conditional comments:
>>> print comment(p("You are using a ", em("downlevel"), " browser."),
condition='false', downlevel='revealed')
<![if false]><p>You are using a <em>downlevel</em> browser.</p><![endif]>
For more on conditional comments see:
http://msdn.microsoft.com/en-us/library/ms537512(VS.85).aspx
'''
ATTRIBUTE_CONDITION = 'condition'
# Valid values are 'hidden', 'downlevel' or 'revealed'
ATTRIBUTE_DOWNLEVEL = 'downlevel'
def _render(self, sb, indent_level=1, indent_str=' ', pretty=True, xhtml=False):
has_condition = comment.ATTRIBUTE_CONDITION in self.attributes
is_revealed = comment.ATTRIBUTE_DOWNLEVEL in self.attributes and \
self.attributes[comment.ATTRIBUTE_DOWNLEVEL] == 'revealed'
sb.append('<!')
if not is_revealed:
sb.append('--')
if has_condition:
sb.append('[if %s]>' % self.attributes[comment.ATTRIBUTE_CONDITION])
pretty = self._render_children(sb, indent_level - 1, indent_str, pretty, xhtml)
# if len(self.children) > 1:
if any(isinstance(child, dom_tag) for child in self):
sb.append('\n')
sb.append(indent_str * (indent_level - 1))
if has_condition:
sb.append('<![endif]')
if not is_revealed:
sb.append('--')
sb.append('>')
return sb
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import functools
import os
import socket
import ssl
import threading
import time
import uuid
import kombu
import kombu.connection
import kombu.entity
import kombu.messaging
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
import six
from six.moves.urllib import parse
from oslo_messaging._drivers import amqp as rpc_amqp
from oslo_messaging._drivers import amqpdriver
from oslo_messaging._drivers import base
from oslo_messaging._drivers import common as rpc_common
from oslo_messaging._i18n import _
from oslo_messaging._i18n import _LE
from oslo_messaging._i18n import _LI
from oslo_messaging._i18n import _LW
from oslo_messaging import _utils
from oslo_messaging import exceptions
rabbit_opts = [
cfg.StrOpt('kombu_ssl_version',
default='',
deprecated_group='DEFAULT',
help='SSL version to use (valid only if SSL enabled). '
'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, '
'TLSv1_1, and TLSv1_2 may be available on some '
'distributions.'
),
cfg.StrOpt('kombu_ssl_keyfile',
default='',
deprecated_group='DEFAULT',
help='SSL key file (valid only if SSL enabled).'),
cfg.StrOpt('kombu_ssl_certfile',
default='',
deprecated_group='DEFAULT',
help='SSL cert file (valid only if SSL enabled).'),
cfg.StrOpt('kombu_ssl_ca_certs',
default='',
deprecated_group='DEFAULT',
help='SSL certification authority file '
'(valid only if SSL enabled).'),
cfg.FloatOpt('kombu_reconnect_delay',
default=1.0,
deprecated_group='DEFAULT',
help='How long to wait before reconnecting in response to an '
'AMQP consumer cancel notification.'),
cfg.IntOpt('kombu_reconnect_timeout',
# NOTE(dhellmann): We want this to be similar to
# rpc_response_timeout, but we can't use
# "$rpc_response_timeout" as a default because that
# option may not have been defined by the time this
# option is accessed. Instead, document the intent in
# the help text for this option and provide a separate
# literal default value.
default=60,
help='How long to wait before considering a reconnect '
'attempt to have failed. This value should not be '
'longer than rpc_response_timeout.'),
cfg.StrOpt('rabbit_host',
default='localhost',
deprecated_group='DEFAULT',
help='The RabbitMQ broker address where a single node is '
'used.'),
cfg.IntOpt('rabbit_port',
default=5672,
deprecated_group='DEFAULT',
help='The RabbitMQ broker port where a single node is used.'),
cfg.ListOpt('rabbit_hosts',
default=['$rabbit_host:$rabbit_port'],
deprecated_group='DEFAULT',
help='RabbitMQ HA cluster host:port pairs.'),
cfg.BoolOpt('rabbit_use_ssl',
default=False,
deprecated_group='DEFAULT',
help='Connect over SSL for RabbitMQ.'),
cfg.StrOpt('rabbit_userid',
default='guest',
deprecated_group='DEFAULT',
help='The RabbitMQ userid.'),
cfg.StrOpt('rabbit_password',
default='guest',
deprecated_group='DEFAULT',
help='The RabbitMQ password.',
secret=True),
cfg.StrOpt('rabbit_login_method',
default='AMQPLAIN',
deprecated_group='DEFAULT',
help='The RabbitMQ login method.'),
cfg.StrOpt('rabbit_virtual_host',
default='/',
deprecated_group='DEFAULT',
help='The RabbitMQ virtual host.'),
cfg.IntOpt('rabbit_retry_interval',
default=1,
help='How frequently to retry connecting with RabbitMQ.'),
cfg.IntOpt('rabbit_retry_backoff',
default=2,
deprecated_group='DEFAULT',
help='How long to backoff for between retries when connecting '
'to RabbitMQ.'),
cfg.IntOpt('rabbit_max_retries',
default=0,
deprecated_group='DEFAULT',
help='Maximum number of RabbitMQ connection retries. '
'Default is 0 (infinite retry count).'),
cfg.BoolOpt('rabbit_ha_queues',
default=False,
deprecated_group='DEFAULT',
help='Use HA queues in RabbitMQ (x-ha-policy: all). '
'If you change this option, you must wipe the '
'RabbitMQ database.'),
cfg.IntOpt('heartbeat_timeout_threshold',
default=60,
help="Number of seconds after which the Rabbit broker is "
"considered down if heartbeat's keep-alive fails "
"(0 disable the heartbeat). EXPERIMENTAL"),
cfg.IntOpt('heartbeat_rate',
default=2,
help='How often times during the heartbeat_timeout_threshold '
'we check the heartbeat.'),
# NOTE(sileht): deprecated option since oslo_messaging 1.5.0,
cfg.BoolOpt('fake_rabbit',
default=False,
deprecated_group='DEFAULT',
help='Deprecated, use rpc_backend=kombu+memory or '
'rpc_backend=fake'),
]
LOG = logging.getLogger(__name__)
def _get_queue_arguments(rabbit_ha_queues):
"""Construct the arguments for declaring a queue.
If the rabbit_ha_queues option is set, we declare a mirrored queue
as described here:
http://www.rabbitmq.com/ha.html
Setting x-ha-policy to all means that the queue will be mirrored
to all nodes in the cluster.
"""
return {'x-ha-policy': 'all'} if rabbit_ha_queues else {}
class RabbitMessage(dict):
def __init__(self, raw_message):
super(RabbitMessage, self).__init__(
rpc_common.deserialize_msg(raw_message.payload))
LOG.trace('RabbitMessage.Init: message %s', self)
self._raw_message = raw_message
def acknowledge(self):
LOG.trace('RabbitMessage.acknowledge: message %s', self)
self._raw_message.ack()
def requeue(self):
LOG.trace('RabbitMessage.requeue: message %s', self)
self._raw_message.requeue()
class Consumer(object):
"""Consumer class."""
def __init__(self, exchange_name, queue_name, routing_key, type, durable,
auto_delete, callback, nowait=True, rabbit_ha_queues=None):
"""Init the Publisher class with the exchange_name, routing_key,
type, durable auto_delete
"""
self.queue_name = queue_name
self.exchange_name = exchange_name
self.routing_key = routing_key
self.auto_delete = auto_delete
self.durable = durable
self.callback = callback
self.type = type
self.nowait = nowait
self.queue_arguments = _get_queue_arguments(rabbit_ha_queues)
self.queue = None
self.exchange = kombu.entity.Exchange(
name=exchange_name,
type=type,
durable=self.durable,
auto_delete=self.auto_delete)
def declare(self, conn):
"""Re-declare the queue after a rabbit (re)connect."""
self.queue = kombu.entity.Queue(
name=self.queue_name,
channel=conn.channel,
exchange=self.exchange,
durable=self.durable,
auto_delete=self.auto_delete,
routing_key=self.routing_key,
queue_arguments=self.queue_arguments)
try:
LOG.trace('ConsumerBase.declare: '
'queue %s', self.queue_name)
self.queue.declare()
except conn.connection.channel_errors as exc:
# NOTE(jrosenboom): This exception may be triggered by a race
# condition. Simply retrying will solve the error most of the time
# and should work well enough as a workaround until the race
# condition itself can be fixed.
# See https://bugs.launchpad.net/neutron/+bug/1318721 for details.
if exc.code == 404:
self.queue.declare()
else:
raise
def consume(self, tag):
"""Actually declare the consumer on the amqp channel. This will
start the flow of messages from the queue. Using the
Connection.consume() will process the messages,
calling the appropriate callback.
"""
self.queue.consume(callback=self._callback,
consumer_tag=six.text_type(tag),
nowait=self.nowait)
def cancel(self, tag):
LOG.trace('ConsumerBase.cancel: canceling %s', tag)
self.queue.cancel(six.text_type(tag))
def _callback(self, message):
"""Call callback with deserialized message.
Messages that are processed and ack'ed.
"""
m2p = getattr(self.queue.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
try:
self.callback(RabbitMessage(message))
except Exception:
LOG.exception(_LE("Failed to process message"
" ... skipping it."))
message.ack()
class DummyConnectionLock(object):
def acquire(self):
pass
def release(self):
pass
def heartbeat_acquire(self):
pass
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
class ConnectionLock(DummyConnectionLock):
"""Lock object to protect access the the kombu connection
This is a lock object to protect access the the kombu connection
object between the heartbeat thread and the driver thread.
They are two way to acquire this lock:
* lock.acquire()
* lock.heartbeat_acquire()
In both case lock.release(), release the lock.
The goal is that the heartbeat thread always have the priority
for acquiring the lock. This ensures we have no heartbeat
starvation when the driver sends a lot of messages.
So when lock.heartbeat_acquire() is called next time the lock
is released(), the caller unconditionnaly acquires
the lock, even someone else have asked for the lock before it.
"""
def __init__(self):
self._workers_waiting = 0
self._heartbeat_waiting = False
self._lock_acquired = None
self._monitor = threading.Lock()
self._workers_locks = threading.Condition(self._monitor)
self._heartbeat_lock = threading.Condition(self._monitor)
self._get_thread_id = _utils.fetch_current_thread_functor()
def acquire(self):
with self._monitor:
while self._lock_acquired:
self._workers_waiting += 1
self._workers_locks.wait()
self._workers_waiting -= 1
self._lock_acquired = self._get_thread_id()
def heartbeat_acquire(self):
# NOTE(sileht): must be called only one time
with self._monitor:
while self._lock_acquired is not None:
self._heartbeat_waiting = True
self._heartbeat_lock.wait()
self._heartbeat_waiting = False
self._lock_acquired = self._get_thread_id()
def release(self):
with self._monitor:
if self._lock_acquired is None:
raise RuntimeError("We can't release a not acquired lock")
thread_id = self._get_thread_id()
if self._lock_acquired != thread_id:
raise RuntimeError("We can't release lock acquired by another "
"thread/greenthread; %s vs %s" %
(self._lock_acquired, thread_id))
self._lock_acquired = None
if self._heartbeat_waiting:
self._heartbeat_lock.notify()
elif self._workers_waiting > 0:
self._workers_locks.notify()
@contextlib.contextmanager
def for_heartbeat(self):
self.heartbeat_acquire()
try:
yield
finally:
self.release()
class Connection(object):
"""Connection object."""
pools = {}
def __init__(self, conf, url, purpose):
# NOTE(viktors): Parse config options
driver_conf = conf.oslo_messaging_rabbit
self.max_retries = driver_conf.rabbit_max_retries
self.interval_start = driver_conf.rabbit_retry_interval
self.interval_stepping = driver_conf.rabbit_retry_backoff
self.login_method = driver_conf.rabbit_login_method
self.fake_rabbit = driver_conf.fake_rabbit
self.virtual_host = driver_conf.rabbit_virtual_host
self.rabbit_hosts = driver_conf.rabbit_hosts
self.rabbit_port = driver_conf.rabbit_port
self.rabbit_userid = driver_conf.rabbit_userid
self.rabbit_password = driver_conf.rabbit_password
self.rabbit_ha_queues = driver_conf.rabbit_ha_queues
self.heartbeat_timeout_threshold = \
driver_conf.heartbeat_timeout_threshold
self.heartbeat_rate = driver_conf.heartbeat_rate
self.kombu_reconnect_delay = driver_conf.kombu_reconnect_delay
self.amqp_durable_queues = driver_conf.amqp_durable_queues
self.amqp_auto_delete = driver_conf.amqp_auto_delete
self.rabbit_use_ssl = driver_conf.rabbit_use_ssl
self.kombu_reconnect_timeout = driver_conf.kombu_reconnect_timeout
if self.rabbit_use_ssl:
self.kombu_ssl_version = driver_conf.kombu_ssl_version
self.kombu_ssl_keyfile = driver_conf.kombu_ssl_keyfile
self.kombu_ssl_certfile = driver_conf.kombu_ssl_certfile
self.kombu_ssl_ca_certs = driver_conf.kombu_ssl_ca_certs
# Try forever?
if self.max_retries <= 0:
self.max_retries = None
# max retry-interval = 30 seconds
self.interval_max = 30
if url.virtual_host is not None:
virtual_host = url.virtual_host
else:
virtual_host = self.virtual_host
self._url = ''
if self.fake_rabbit:
LOG.warn("Deprecated: fake_rabbit option is deprecated, set "
"rpc_backend to kombu+memory or use the fake "
"driver instead.")
self._url = 'memory://%s/' % virtual_host
elif url.hosts:
if url.transport.startswith('kombu+'):
LOG.warn(_LW('Selecting the kombu transport through the '
'transport url (%s) is a experimental feature '
'and this is not yet supported.') % url.transport)
for host in url.hosts:
transport = url.transport.replace('kombu+', '')
transport = transport.replace('rabbit', 'amqp')
self._url += '%s%s://%s:%s@%s:%s/%s' % (
";" if self._url else '',
transport,
parse.quote(host.username or ''),
parse.quote(host.password or ''),
self._parse_url_hostname(host.hostname) or '',
str(host.port or 5672),
virtual_host)
elif url.transport.startswith('kombu+'):
# NOTE(sileht): url have a + but no hosts
# (like kombu+memory:///), pass it to kombu as-is
transport = url.transport.replace('kombu+', '')
self._url = "%s://%s" % (transport, virtual_host)
else:
for adr in self.rabbit_hosts:
hostname, port = netutils.parse_host_port(
adr, default_port=self.rabbit_port)
self._url += '%samqp://%s:%s@%s:%s/%s' % (
";" if self._url else '',
parse.quote(self.rabbit_userid, ''),
parse.quote(self.rabbit_password, ''),
self._parse_url_hostname(hostname), port,
virtual_host)
self._initial_pid = os.getpid()
self._consumers = []
self._new_consumers = []
self._consume_loop_stopped = False
self.channel = None
# NOTE(sileht): if purpose is PURPOSE_LISTEN
# we don't need the lock because we don't
# have a heartbeat thread
if purpose == rpc_amqp.PURPOSE_SEND:
self._connection_lock = ConnectionLock()
else:
self._connection_lock = DummyConnectionLock()
self.connection = kombu.connection.Connection(
self._url, ssl=self._fetch_ssl_params(),
login_method=self.login_method,
failover_strategy="shuffle",
heartbeat=self.heartbeat_timeout_threshold,
transport_options={
'confirm_publish': True,
'on_blocked': self._on_connection_blocked,
'on_unblocked': self._on_connection_unblocked,
},
)
LOG.info(_LI('Connecting to AMQP server on %(hostname)s:%(port)s'),
self.connection.info())
# NOTE(sileht): kombu recommend to run heartbeat_check every
# seconds, but we use a lock around the kombu connection
# so, to not lock to much this lock to most of the time do nothing
# expected waiting the events drain, we start heartbeat_check and
# retreive the server heartbeat packet only two times more than
# the minimum required for the heartbeat works
# (heatbeat_timeout/heartbeat_rate/2.0, default kombu
# heartbeat_rate is 2)
self._heartbeat_wait_timeout = (
float(self.heartbeat_timeout_threshold) /
float(self.heartbeat_rate) / 2.0)
self._heartbeat_support_log_emitted = False
# NOTE(sileht): just ensure the connection is setuped at startup
self.ensure_connection()
# NOTE(sileht): if purpose is PURPOSE_LISTEN
# the consume code does the heartbeat stuff
# we don't need a thread
self._heartbeat_thread = None
if purpose == rpc_amqp.PURPOSE_SEND:
self._heartbeat_start()
LOG.info(_LI('Connected to AMQP server on %(hostname)s:%(port)s'),
self.connection.info())
# NOTE(sileht): value choosen according the best practice from kombu
# http://kombu.readthedocs.org/en/latest/reference/kombu.common.html#kombu.common.eventloop
# For heatbeat, we can set a bigger timeout, and check we receive the
# heartbeat packets regulary
if self._heartbeat_supported_and_enabled():
self._poll_timeout = self._heartbeat_wait_timeout
else:
self._poll_timeout = 1
if self._url.startswith('memory://'):
# Kludge to speed up tests.
self.connection.transport.polling_interval = 0.0
# Fixup logging
self.connection.hostname = "memory_driver"
self.connection.port = 1234
self._poll_timeout = 0.05
# FIXME(markmc): use oslo sslutils when it is available as a library
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23
}
_OPTIONAL_PROTOCOLS = {
'sslv2': 'PROTOCOL_SSLv2',
'sslv3': 'PROTOCOL_SSLv3',
'tlsv1_1': 'PROTOCOL_TLSv1_1',
'tlsv1_2': 'PROTOCOL_TLSv1_2',
}
for protocol in _OPTIONAL_PROTOCOLS:
try:
_SSL_PROTOCOLS[protocol] = getattr(ssl,
_OPTIONAL_PROTOCOLS[protocol])
except AttributeError:
pass
@classmethod
def validate_ssl_version(cls, version):
key = version.lower()
try:
return cls._SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
def _parse_url_hostname(self, hostname):
"""Handles hostname returned from urlparse and checks whether it's
ipaddress. If it's ipaddress it ensures that it has brackets for IPv6.
"""
return '[%s]' % hostname if ':' in hostname else hostname
def _fetch_ssl_params(self):
"""Handles fetching what ssl params should be used for the connection
(if any).
"""
if self.rabbit_use_ssl:
ssl_params = dict()
# http://docs.python.org/library/ssl.html - ssl.wrap_socket
if self.kombu_ssl_version:
ssl_params['ssl_version'] = self.validate_ssl_version(
self.kombu_ssl_version)
if self.kombu_ssl_keyfile:
ssl_params['keyfile'] = self.kombu_ssl_keyfile
if self.kombu_ssl_certfile:
ssl_params['certfile'] = self.kombu_ssl_certfile
if self.kombu_ssl_ca_certs:
ssl_params['ca_certs'] = self.kombu_ssl_ca_certs
# We might want to allow variations in the
# future with this?
ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
return ssl_params or True
return False
@staticmethod
def _on_connection_blocked(reason):
LOG.error(_LE("The broker has blocked the connection: %s"), reason)
@staticmethod
def _on_connection_unblocked():
LOG.info(_LI("The broker has unblocked the connection"))
def ensure_connection(self):
self.ensure(method=lambda: True)
def ensure(self, method, retry=None,
recoverable_error_callback=None, error_callback=None,
timeout_is_error=True):
"""Will retry up to retry number of times.
retry = None means use the value of rabbit_max_retries
retry = -1 means to retry forever
retry = 0 means no retry
retry = N means N retries
NOTE(sileht): Must be called within the connection lock
"""
current_pid = os.getpid()
if self._initial_pid != current_pid:
LOG.warn("Process forked after connection established! "
"This can result in unpredictable behavior. "
"See: http://docs.openstack.org/developer/"
"oslo_messaging/transport.html")
self._initial_pid = current_pid
if retry is None:
retry = self.max_retries
if retry is None or retry < 0:
retry = None
def on_error(exc, interval):
LOG.debug("Received recoverable error from kombu:",
exc_info=True)
recoverable_error_callback and recoverable_error_callback(exc)
interval = (self.kombu_reconnect_delay + interval
if self.kombu_reconnect_delay > 0
else interval)
info = {'err_str': exc, 'sleep_time': interval}
info.update(self.connection.info())
if 'Socket closed' in six.text_type(exc):
LOG.error(_LE('AMQP server %(hostname)s:%(port)s closed'
' the connection. Check login credentials:'
' %(err_str)s'), info)
else:
LOG.error(_LE('AMQP server on %(hostname)s:%(port)s is '
'unreachable: %(err_str)s. Trying again in '
'%(sleep_time)d seconds.'), info)
# XXX(nic): when reconnecting to a RabbitMQ cluster
# with mirrored queues in use, the attempt to release the
# connection can hang "indefinitely" somewhere deep down
# in Kombu. Blocking the thread for a bit prior to
# release seems to kludge around the problem where it is
# otherwise reproduceable.
# TODO(sileht): Check if this is useful since we
# use kombu for HA connection, the interval_step
# should sufficient, because the underlying kombu transport
# connection object freed.
if self.kombu_reconnect_delay > 0:
LOG.trace('Delaying reconnect for %1.1f seconds ...',
self.kombu_reconnect_delay)
time.sleep(self.kombu_reconnect_delay)
def on_reconnection(new_channel):
"""Callback invoked when the kombu reconnects and creates
a new channel, we use it the reconfigure our consumers.
"""
self._set_current_channel(new_channel)
for consumer in self._consumers:
consumer.declare(self)
LOG.info(_LI('Reconnected to AMQP server on '
'%(hostname)s:%(port)s'),
self.connection.info())
def execute_method(channel):
self._set_current_channel(channel)
method()
# NOTE(sileht): Some dummy driver like the in-memory one doesn't
# have notion of recoverable connection, so we must raise the original
# exception like kombu does in this case.
has_modern_errors = hasattr(
self.connection.transport, 'recoverable_connection_errors',
)
if has_modern_errors:
recoverable_errors = (
self.connection.recoverable_channel_errors +
self.connection.recoverable_connection_errors)
else:
recoverable_errors = ()
try:
autoretry_method = self.connection.autoretry(
execute_method, channel=self.channel,
max_retries=retry,
errback=on_error,
interval_start=self.interval_start or 1,
interval_step=self.interval_stepping,
on_revive=on_reconnection,
)
ret, channel = autoretry_method()
self._set_current_channel(channel)
return ret
except recoverable_errors as exc:
LOG.debug("Received recoverable error from kombu:",
exc_info=True)
error_callback and error_callback(exc)
self._set_current_channel(None)
# NOTE(sileht): number of retry exceeded and the connection
# is still broken
info = {'err_str': exc, 'retry': retry}
info.update(self.connection.info())
msg = _('Unable to connect to AMQP server on '
'%(hostname)s:%(port)s after %(retry)s '
'tries: %(err_str)s') % info
LOG.error(msg)
raise exceptions.MessageDeliveryFailure(msg)
except Exception as exc:
error_callback and error_callback(exc)
raise
def _set_current_channel(self, new_channel):
"""Change the channel to use.
NOTE(sileht): Must be called within the connection lock
"""
if self.channel is not None and new_channel != self.channel:
self.PUBLISHER_DECLARED_QUEUES.pop(self.channel, None)
self.connection.maybe_close_channel(self.channel)
self.channel = new_channel
def close(self):
"""Close/release this connection."""
self._heartbeat_stop()
if self.connection:
self._set_current_channel(None)
self.connection.release()
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
recoverable_errors = (self.connection.recoverable_channel_errors +
self.connection.recoverable_connection_errors)
with self._connection_lock:
try:
for tag, consumer in enumerate(self._consumers):
consumer.cancel(tag=tag)
except recoverable_errors:
self._set_current_channel(None)
self.ensure_connection()
self._consumers = []
def _heartbeat_supported_and_enabled(self):
if self.heartbeat_timeout_threshold <= 0:
return False
if self.connection.supports_heartbeats:
return True
elif not self._heartbeat_support_log_emitted:
LOG.warn(_LW("Heartbeat support requested but it is not supported "
"by the kombu driver or the broker"))
self._heartbeat_support_log_emitted = True
return False
@contextlib.contextmanager
def _transport_socket_timeout(self, timeout):
# NOTE(sileht): they are some case where the heartbeat check
# or the producer.send return only when the system socket
# timeout if reach. kombu doesn't allow use to customise this
# timeout so for py-amqp we tweak ourself
sock = getattr(self.connection.transport, 'sock', None)
if sock:
orig_timeout = sock.gettimeout()
sock.settimeout(timeout)
yield
if sock:
sock.settimeout(orig_timeout)
def _heartbeat_check(self):
# NOTE(sileht): we are suposed to send at least one heartbeat
# every heartbeat_timeout_threshold, so no need to way more
with self._transport_socket_timeout(
self.heartbeat_timeout_threshold):
self.connection.heartbeat_check(
rate=self.heartbeat_rate)
def _heartbeat_start(self):
if self._heartbeat_supported_and_enabled():
self._heartbeat_exit_event = threading.Event()
self._heartbeat_thread = threading.Thread(
target=self._heartbeat_thread_job)
self._heartbeat_thread.daemon = True
self._heartbeat_thread.start()
else:
self._heartbeat_thread = None
def _heartbeat_stop(self):
if self._heartbeat_thread is not None:
self._heartbeat_exit_event.set()
self._heartbeat_thread.join()
self._heartbeat_thread = None
def _heartbeat_thread_job(self):
"""Thread that maintains inactive connections
"""
while not self._heartbeat_exit_event.is_set():
with self._connection_lock.for_heartbeat():
recoverable_errors = (
self.connection.recoverable_channel_errors +
self.connection.recoverable_connection_errors)
try:
try:
self._heartbeat_check()
# NOTE(sileht): We need to drain event to receive
# heartbeat from the broker but don't hold the
# connection too much times. In amqpdriver a connection
# is used exclusivly for read or for write, so we have
# to do this for connection used for write drain_events
# already do that for other connection
try:
self.connection.drain_events(timeout=0.001)
except socket.timeout:
pass
except recoverable_errors as exc:
LOG.info(_LI("A recoverable connection/channel error "
"occurred, trying to reconnect: %s"), exc)
self.ensure_connection()
except Exception:
LOG.warning(_LW("Unexpected error during heartbeart "
"thread processing, retrying..."))
LOG.debug('Exception', exc_info=True)
self._heartbeat_exit_event.wait(
timeout=self._heartbeat_wait_timeout)
self._heartbeat_exit_event.clear()
def declare_consumer(self, consumer):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': consumer.routing_key, 'err_str': exc}
LOG.error(_LE("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s"), log_info)
def _declare_consumer():
consumer.declare(self)
self._consumers.append(consumer)
self._new_consumers.append(consumer)
return consumer
with self._connection_lock:
return self.ensure(_declare_consumer,
error_callback=_connect_error)
def consume(self, timeout=None):
"""Consume from all queues/consumers."""
timer = rpc_common.DecayingTimer(duration=timeout)
timer.start()
def _raise_timeout(exc):
LOG.debug('Timed out waiting for RPC response: %s', exc)
raise rpc_common.Timeout()
def _recoverable_error_callback(exc):
self._new_consumers = self._consumers
timer.check_return(_raise_timeout, exc)
def _error_callback(exc):
_recoverable_error_callback(exc)
LOG.error(_LE('Failed to consume message from queue: %s'),
exc)
def _consume():
# NOTE(sileht): in case the acknowledgement or requeue of a
# message fail, the kombu transport can be disconnected
# In this case, we must redeclare our consumers, so raise
# a recoverable error to trigger the reconnection code.
if not self.connection.connected:
raise self.connection.recoverable_connection_errors[0]
if self._new_consumers:
for tag, consumer in enumerate(self._consumers):
if consumer in self._new_consumers:
consumer.consume(tag=tag)
self._new_consumers = []
poll_timeout = (self._poll_timeout if timeout is None
else min(timeout, self._poll_timeout))
while True:
if self._consume_loop_stopped:
return
if self._heartbeat_supported_and_enabled():
self._heartbeat_check()
try:
self.connection.drain_events(timeout=poll_timeout)
return
except socket.timeout as exc:
poll_timeout = timer.check_return(
_raise_timeout, exc, maximum=self._poll_timeout)
with self._connection_lock:
self.ensure(_consume,
recoverable_error_callback=_recoverable_error_callback,
error_callback=_error_callback)
def stop_consuming(self):
self._consume_loop_stopped = True
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
consumer = Consumer(exchange_name=topic,
queue_name=topic,
routing_key=topic,
type='direct',
durable=False,
auto_delete=True,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues)
self.declare_consumer(consumer)
def declare_topic_consumer(self, exchange_name, topic, callback=None,
queue_name=None):
"""Create a 'topic' consumer."""
consumer = Consumer(exchange_name=exchange_name,
queue_name=queue_name or topic,
routing_key=topic,
type='topic',
durable=self.amqp_durable_queues,
auto_delete=self.amqp_auto_delete,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues)
self.declare_consumer(consumer)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
unique = uuid.uuid4().hex
exchange_name = '%s_fanout' % topic
queue_name = '%s_fanout_%s' % (topic, unique)
consumer = Consumer(exchange_name=exchange_name,
queue_name=queue_name,
routing_key=topic,
type='fanout',
durable=False,
auto_delete=True,
callback=callback,
rabbit_ha_queues=self.rabbit_ha_queues)
self.declare_consumer(consumer)
def _ensure_publishing(self, method, exchange, msg, routing_key=None,
timeout=None, retry=None):
"""Send to a publisher based on the publisher class."""
def _error_callback(exc):
log_info = {'topic': exchange.name, 'err_str': exc}
LOG.error(_LE("Failed to publish message to topic "
"'%(topic)s': %(err_str)s"), log_info)
LOG.debug('Exception', exc_info=exc)
method = functools.partial(method, exchange, msg, routing_key, timeout)
with self._connection_lock:
self.ensure(method, retry=retry, error_callback=_error_callback)
def _publish(self, exchange, msg, routing_key=None, timeout=None):
"""Publish a message."""
producer = kombu.messaging.Producer(exchange=exchange,
channel=self.channel,
routing_key=routing_key)
expiration = None
if timeout:
# AMQP TTL is in milliseconds when set in the property.
# Details: http://www.rabbitmq.com/ttl.html#per-message-ttl
expiration = int(timeout * 1000)
# NOTE(sileht): no need to wait more, caller expects
# a answer before timeout is reached
transport_timeout = timeout
heartbeat_timeout = self.heartbeat_timeout_threshold
if (self._heartbeat_supported_and_enabled() and (
transport_timeout is None or
transport_timeout > heartbeat_timeout)):
# NOTE(sileht): we are supposed to send heartbeat every
# heartbeat_timeout, no need to wait more otherwise will
# disconnect us, so raise timeout earlier ourself
transport_timeout = heartbeat_timeout
log_info = {'msg': msg,
'who': exchange or 'default',
'key': routing_key}
LOG.trace('Connection._publish: sending message %(msg)s to'
' %(who)s with routing key %(key)s', log_info)
with self._transport_socket_timeout(transport_timeout):
producer.publish(msg, expiration=expiration)
# List of notification queue declared on the channel to avoid
# unnecessary redeclaration. This list is resetted each time
# the connection is resetted in Connection._set_current_channel
PUBLISHER_DECLARED_QUEUES = collections.defaultdict(set)
def _publish_and_creates_default_queue(self, exchange, msg,
routing_key=None, timeout=None):
"""Publisher that declares a default queue
When the exchange is missing instead of silently creates an exchange
not binded to a queue, this publisher creates a default queue
named with the routing_key
This is mainly used to not miss notification in case of nobody consumes
them yet. If the future consumer bind the default queue it can retrieve
missing messages.
_set_current_channel is responsible to cleanup the cache.
"""
queue_indentifier = (exchange.name, routing_key)
# NOTE(sileht): We only do it once per reconnection
# the Connection._set_current_channel() is responsible to clear
# this cache
if (queue_indentifier not in
self.PUBLISHER_DECLARED_QUEUES[self.channel]):
queue = kombu.entity.Queue(
channel=self.channel,
exchange=exchange,
durable=exchange.durable,
auto_delete=exchange.auto_delete,
name=routing_key,
routing_key=routing_key,
queue_arguments=_get_queue_arguments(self.rabbit_ha_queues))
log_info = {'key': routing_key, 'exchange': exchange}
LOG.trace(
'Connection._publish_and_creates_default_queue: '
'declare queue %(key)s on %(exchange)s exchange', log_info)
queue.declare()
self.PUBLISHER_DECLARED_QUEUES[self.channel].add(queue_indentifier)
self._publish(exchange, msg, routing_key=routing_key, timeout=timeout)
def _publish_and_retry_on_missing_exchange(self, exchange, msg,
routing_key=None, timeout=None):
"""Publisher that retry if the exchange is missing.
"""
if not exchange.passive:
raise RuntimeError("_publish_and_retry_on_missing_exchange() must "
"be called with an passive exchange.")
# TODO(sileht): use @retrying
# NOTE(sileht): no need to wait the application expect a response
# before timeout is exshauted
duration = (
timeout if timeout is not None
else self.kombu_reconnect_timeout
)
timer = rpc_common.DecayingTimer(duration=duration)
timer.start()
while True:
try:
self._publish(exchange, msg, routing_key=routing_key,
timeout=timeout)
return
except self.connection.channel_errors as exc:
# NOTE(noelbk/sileht):
# If rabbit dies, the consumer can be disconnected before the
# publisher sends, and if the consumer hasn't declared the
# queue, the publisher's will send a message to an exchange
# that's not bound to a queue, and the message wll be lost.
# So we set passive=True to the publisher exchange and catch
# the 404 kombu ChannelError and retry until the exchange
# appears
if exc.code == 404 and timer.check_return() > 0:
LOG.info(_LI("The exchange %(exchange)s to send to "
"%(routing_key)s doesn't exist yet, "
"retrying...") % {
'exchange': exchange.name,
'routing_key': routing_key})
time.sleep(0.25)
continue
elif exc.code == 404:
msg = _("The exchange %(exchange)s to send to "
"%(routing_key)s still doesn't exist after "
"%(duration)s sec abandonning...") % {
'duration': duration,
'exchange': exchange.name,
'routing_key': routing_key}
LOG.info(msg)
raise rpc_amqp.AMQPDestinationNotFound(msg)
raise
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
exchange = kombu.entity.Exchange(name=msg_id,
type='direct',
durable=False,
auto_delete=True,
passive=True)
self._ensure_publishing(self._publish_and_retry_on_missing_exchange,
exchange, msg, routing_key=msg_id)
def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None):
"""Send a 'topic' message."""
exchange = kombu.entity.Exchange(
name=exchange_name,
type='topic',
durable=self.amqp_durable_queues,
auto_delete=self.amqp_auto_delete)
self._ensure_publishing(self._publish, exchange, msg,
routing_key=topic, retry=retry)
def fanout_send(self, topic, msg, retry=None):
"""Send a 'fanout' message."""
exchange = kombu.entity.Exchange(name='%s_fanout' % topic,
type='fanout',
durable=False,
auto_delete=True)
self._ensure_publishing(self._publish, exchange, msg, retry=retry)
def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs):
"""Send a notify message on a topic."""
exchange = kombu.entity.Exchange(
name=exchange_name,
type='topic',
durable=self.amqp_durable_queues,
auto_delete=self.amqp_auto_delete)
self._ensure_publishing(self._publish_and_creates_default_queue,
exchange, msg, routing_key=topic, retry=retry)
class RabbitDriver(amqpdriver.AMQPDriverBase):
"""RabbitMQ Driver
The ``rabbit`` driver is the default driver used in OpenStack's
integration tests.
The driver is aliased as ``kombu`` to support upgrading existing
installations with older settings.
"""
def __init__(self, conf, url,
default_exchange=None,
allowed_remote_exmods=None):
opt_group = cfg.OptGroup(name='oslo_messaging_rabbit',
title='RabbitMQ driver options')
conf.register_group(opt_group)
conf.register_opts(rabbit_opts, group=opt_group)
conf.register_opts(rpc_amqp.amqp_opts, group=opt_group)
conf.register_opts(base.base_opts, group=opt_group)
connection_pool = rpc_amqp.ConnectionPool(
conf, conf.oslo_messaging_rabbit.rpc_conn_pool_size,
url, Connection)
super(RabbitDriver, self).__init__(
conf, url,
connection_pool,
default_exchange,
allowed_remote_exmods,
conf.oslo_messaging_rabbit.send_single_reply,
)
def require_features(self, requeue=True):
pass
|
|
import flask
import pymysql.cursors
from pymysql.constants import ER
from pymysql.err import IntegrityError
from donut.auth_utils import get_user_id
TERMS = {'FA': 1, 'WI': 2, 'SP': 3}
TERM_NAMES = {v: k for k, v in TERMS.items()}
def try_int(x):
"""
Converts a float to an int if it is already an integral value.
Makes the JSON a little smaller.
"""
as_int = int(x)
return as_int if as_int == x else x
def get_terms():
"""
Returns {'year', 'term'} structs for each year with courses,
sorted from most to least recent.
"""
query = """
SELECT DISTINCT year, term FROM courses
ORDER BY year DESC, (term + 1) % 3 DESC
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
def get_year_courses():
"""
Returns {'ids'[], number', name', 'units'[3], 'instructor', 'terms'[]}
structs for all courses in the most recent FA, WI, and SP terms.
'ids' and 'terms' link the ids of different terms of the same course.
"""
# Find most recent year for each term that has courses
term_years = {}
for term_year in get_terms():
term = term_year['term']
if term not in term_years:
term_years[term] = term_year['year']
query = """
SELECT
course_id,
CONCAT(department, ' ', course_number) AS number,
name,
units_lecture, units_lab, units_homework
FROM courses
WHERE year = %s AND term = %s
"""
instructor_query = """
SELECT DISTINCT instructor
FROM sections NATURAL JOIN instructors
WHERE course_id = %s
"""
courses = {} # mapping of course numbers to course structs
with flask.g.pymysql_db.cursor() as cursor:
for term, year in term_years.items():
cursor.execute(query, (year, term))
for course in cursor.fetchall():
number = course['number']
cursor.execute(instructor_query, course['course_id'])
instructors = cursor.fetchall()
instructor = instructors[0]['instructor'] \
if len(instructors) == 1 else None
matching_course = courses.get(number)
if matching_course:
matching_course['terms'].append(term)
matching_course['ids'].append(course['course_id'])
if instructor != matching_course['instructor']:
matching_course['instructor'] = None
else:
units = (course['units_lecture'], course['units_lab'],
course['units_homework'])
courses[number] = {
# Separate course id for each term
'ids': [course['course_id']],
'number': number,
'name': course['name'],
'units': tuple(map(try_int, units)),
'instructor': instructor,
'terms': [term]
}
return sorted(
courses.values(), key=lambda course: course['number'].lower())
def add_planner_course(username, course_id, year):
"""
Adds a certain course to a certain user's planner for a given year.
Year 1 is frosh year, year 2 is smore year, etc.
"""
user_id = get_user_id(username)
query = 'INSERT INTO planner_courses (user_id, course_id, planner_year) VALUES (%s, %s, %s)'
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course_id, year))
def drop_planner_course(username, course_id, year):
"""
Removes a certain course from a certain user's planner for a given year.
Year 1 is frosh year, year 2 is smore year, etc.
"""
user_id = get_user_id(username)
query = """
DELETE FROM planner_courses
WHERE user_id = %s AND course_id = %s AND planner_year = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course_id, year))
def add_planner_placeholder(username, year, term, course, units):
"""
Adds a placedholder course to a user's planner for a given term.
Year 1 is frosh year, year 2 is smore year, etc.
Term 1 is FA, 2 is WI, and 3 is SP.
"""
user_id = get_user_id(username)
query = """
INSERT INTO planner_placeholders
(user_id, planner_year, term, course_name, course_units)
VALUES (%s, %s, %s, %s, %s)
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, year, term, course, units))
return cursor.lastrowid
def drop_planner_placeholder(username, placeholder_id):
"""
Removes the placeholder with the given ID from the user's planner.
Returns whether successful (i.e. the given placeholder did belong to the user).
"""
user_id = get_user_id(username)
query = """
DELETE FROM planner_placeholders
WHERE placeholder_id = %s AND user_id = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (placeholder_id, user_id))
return cursor.rowcount > 0
def get_user_planner_courses(username):
"""
Returns {'ids'[1], 'number', 'units', 'terms'[1], 'year'} structs
for each course on a certain user's planner.
Unlike in get_planner_courses(), the unit counts are already summed.
"""
query = """
SELECT
course_id,
CONCAT(department, ' ', course_number) AS number,
term,
units,
planner_year
FROM users NATURAL JOIN planner_courses NATURAL JOIN courses
WHERE username = %s
ORDER BY units DESC, number
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, username)
courses = cursor.fetchall()
return [{
'ids': (course['course_id'], ),
'number': course['number'],
'units': try_int(course['units']),
'terms': (course['term'], ),
'year': course['planner_year']
} for course in courses]
def get_user_planner_placeholders(username):
query = """
SELECT placeholder_id, planner_year, term, course_name, course_units
FROM planner_placeholders NATURAL JOIN users
WHERE username = %s
ORDER BY course_units DESC, course_name
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, username)
placeholders = cursor.fetchall()
return [{
'id': placeholder['placeholder_id'],
'year': placeholder['planner_year'],
'term': placeholder['term'],
'course': placeholder['course_name'],
'units': try_int(placeholder['course_units'])
} for placeholder in placeholders]
def get_scheduler_courses(year, term):
"""
Returns {'id', 'number', 'name', 'units'[3], 'sections'[]} structs for each
course in a certain term of a certain year.
'sections' is a list of {'number', 'instructor', 'grades', 'times'} structs.
"""
query = """
SELECT
course_id,
CONCAT(department, ' ', course_number) AS number,
name,
units_lecture, units_lab, units_homework,
section_number,
instructor,
grades_type,
times,
locations
FROM
courses
NATURAL JOIN sections
NATURAL JOIN instructors
NATURAL JOIN grades_types
WHERE year = %s AND term = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (year, term))
sections = cursor.fetchall()
course_sections = {}
for section in sections:
course_id = section['course_id']
course = course_sections.get(course_id)
if course:
sections = course['sections']
else:
sections = []
units = (section['units_lecture'], section['units_lab'],
section['units_homework'])
course_sections[course_id] = {
'id': course_id,
'number': section['number'],
'name': section['name'],
'units': tuple(map(try_int, units)),
'sections': sections
}
sections.append({
'number': section['section_number'],
'instructor': section['instructor'],
'grades': section['grades_type'],
'times': section['times'],
'locations': section['locations']
})
courses = course_sections.values()
for course in courses:
course['sections'].sort(key=lambda section: section['number'])
return sorted(courses, key=lambda course: course['number'].lower())
def add_scheduler_section(username, course, section):
"""
Adds a certain section number of a certain course
to a certain user's schedule for the course's term.
"""
user_id = get_user_id(username)
query = """
INSERT INTO scheduler_sections (user_id, course_id, section_number)
VALUES (%s, %s, %s)
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
def drop_scheduler_section(username, course, section):
"""
Removes a certain section number of a certain course
from a certain user's schedule for the course's term.
"""
user_id = get_user_id(username)
query = """
DELETE FROM scheduler_sections
WHERE user_id = %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
def get_user_scheduler_sections(username, year, term):
"""
Returns {'id' (course_id), 'section' (section_number)} structs for each
section on a certain user's schedule for a certain term of a certain year.
"""
query = """
SELECT course_id, section_number
FROM
users
NATURAL JOIN scheduler_sections
NATURAL JOIN courses
WHERE username = %s AND year = %s AND term = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (username, year, term))
sections = cursor.fetchall()
return [{
'id': section['course_id'],
'section': section['section_number']
} for section in sections]
is_duplicate_error = lambda e: \
isinstance(e, IntegrityError) and e.args[0] == ER.DUP_ENTRY
def get_notes(username, course, section):
user_id = get_user_id(username)
query = """
SELECT notes FROM scheduler_sections
WHERE user_id = %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
notes = cursor.fetchone()
return notes and notes['notes']
def edit_notes(username, course, section, notes):
user_id = get_user_id(username)
query = """
UPDATE scheduler_sections SET notes = %s
WHERE user_id = %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (notes, user_id, course, section))
def delete_notes(username, course, section):
user_id = get_user_id(username)
query = """
UPDATE scheduler_sections SET notes = NULL
WHERE user_id = %s AND course_id = %s AND section_number = %s
"""
with flask.g.pymysql_db.cursor() as cursor:
cursor.execute(query, (user_id, course, section))
|
|
#!/usr/bin/env python2
# -*- coding: utf-8-*-
import os
import wave
import json
import tempfile
import logging
import urllib
import urlparse
import re
import subprocess
from abc import ABCMeta, abstractmethod
import requests
import yaml
import nikitapath
import diagnose
import vocabcompiler
class AbstractSTTEngine(object):
"""
Generic parent class for all STT engines
"""
__metaclass__ = ABCMeta
VOCABULARY_TYPE = None
@classmethod
def get_config(cls):
return {}
@classmethod
def get_instance(cls, vocabulary_name, phrases):
config = cls.get_config()
if cls.VOCABULARY_TYPE:
vocabulary = cls.VOCABULARY_TYPE(vocabulary_name,
path=nikitapath.config(
'vocabularies'))
if not vocabulary.matches_phrases(phrases):
vocabulary.compile(phrases)
config['vocabulary'] = vocabulary
instance = cls(**config)
return instance
@classmethod
def get_passive_instance(cls):
phrases = vocabcompiler.get_keyword_phrases()
return cls.get_instance('keyword', phrases)
@classmethod
def get_active_instance(cls):
phrases = vocabcompiler.get_all_phrases()
return cls.get_instance('default', phrases)
@classmethod
@abstractmethod
def is_available(cls):
return True
@abstractmethod
def transcribe(self, fp):
pass
class PocketSphinxSTT(AbstractSTTEngine):
"""
The default Speech-to-Text implementation which relies on PocketSphinx.
"""
SLUG = 'sphinx'
VOCABULARY_TYPE = vocabcompiler.PocketsphinxVocabulary
def __init__(self, vocabulary, hmm_dir="/usr/local/share/" +
"pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k"):
"""
Initiates the pocketsphinx instance.
Arguments:
vocabulary -- a PocketsphinxVocabulary instance
hmm_dir -- the path of the Hidden Markov Model (HMM)
"""
self._logger = logging.getLogger(__name__)
# quirky bug where first import doesn't work
try:
import pocketsphinx as ps
except:
import pocketsphinx as ps
with tempfile.NamedTemporaryFile(prefix='psdecoder_',
suffix='.log', delete=False) as f:
self._logfile = f.name
self._logger.debug("Initializing PocketSphinx Decoder with hmm_dir " +
"'%s'", hmm_dir)
# Perform some checks on the hmm_dir so that we can display more
# meaningful error messages if neccessary
if not os.path.exists(hmm_dir):
msg = ("hmm_dir '%s' does not exist! Please make sure that you " +
"have set the correct hmm_dir in your profile.") % hmm_dir
self._logger.error(msg)
raise RuntimeError(msg)
# Lets check if all required files are there. Refer to:
# http://cmusphinx.sourceforge.net/wiki/acousticmodelformat
# for details
missing_hmm_files = []
for fname in ('mdef', 'feat.params', 'means', 'noisedict',
'transition_matrices', 'variances'):
if not os.path.exists(os.path.join(hmm_dir, fname)):
missing_hmm_files.append(fname)
mixweights = os.path.exists(os.path.join(hmm_dir, 'mixture_weights'))
sendump = os.path.exists(os.path.join(hmm_dir, 'sendump'))
if not mixweights and not sendump:
# We only need mixture_weights OR sendump
missing_hmm_files.append('mixture_weights or sendump')
if missing_hmm_files:
self._logger.warning("hmm_dir '%s' is missing files: %s. Please " +
"make sure that you have set the correct " +
"hmm_dir in your profile.",
hmm_dir, ', '.join(missing_hmm_files))
self._decoder = ps.Decoder(hmm=hmm_dir, logfn=self._logfile,
**vocabulary.decoder_kwargs)
def __del__(self):
os.remove(self._logfile)
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = nikitapath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
try:
config['hmm_dir'] = profile['pocketsphinx']['hmm_dir']
except KeyError:
pass
return config
def transcribe(self, fp):
"""
Performs STT, transcribing an audio file and returning the result.
Arguments:
fp -- a file object containing audio data
"""
fp.seek(44)
# FIXME: Can't use the Decoder.decode_raw() here, because
# pocketsphinx segfaults with tempfile.SpooledTemporaryFile()
data = fp.read()
self._decoder.start_utt()
self._decoder.process_raw(data, False, True)
self._decoder.end_utt()
result = self._decoder.get_hyp()
with open(self._logfile, 'r+') as f:
for line in f:
self._logger.debug(line.strip())
f.truncate()
transcribed = [result[0]]
self._logger.transcript('Transcribed: %r', transcribed)
return transcribed
@classmethod
def is_available(cls):
return diagnose.check_python_import('pocketsphinx')
class JuliusSTT(AbstractSTTEngine):
"""
A very basic Speech-to-Text engine using Julius.
"""
SLUG = 'julius'
VOCABULARY_TYPE = vocabcompiler.JuliusVocabulary
def __init__(self, vocabulary=None, hmmdefs="/usr/share/voxforge/julius/" +
"acoustic_model_files/hmmdefs", tiedlist="/usr/share/" +
"voxforge/julius/acoustic_model_files/tiedlist"):
self._logger = logging.getLogger(__name__)
self._vocabulary = vocabulary
self._hmmdefs = hmmdefs
self._tiedlist = tiedlist
self._pattern = re.compile(r'sentence(\d+): <s> (.+) </s>')
# Inital test run: we run this command once to log errors/warnings
cmd = ['julius',
'-input', 'stdin',
'-dfa', self._vocabulary.dfa_file,
'-v', self._vocabulary.dict_file,
'-h', self._hmmdefs,
'-hlist', self._tiedlist,
'-forcedict']
cmd = [str(x) for x in cmd]
self._logger.debug('Executing: %r', cmd)
with tempfile.SpooledTemporaryFile() as out_f:
with tempfile.SpooledTemporaryFile() as f:
with tempfile.SpooledTemporaryFile() as err_f:
subprocess.call(cmd, stdin=f, stdout=out_f, stderr=err_f)
out_f.seek(0)
for line in out_f.read().splitlines():
line = line.strip()
if len(line) > 7 and line[:7].upper() == 'ERROR: ':
if not line[7:].startswith('adin_'):
self._logger.error(line[7:])
elif len(line) > 9 and line[:9].upper() == 'WARNING: ':
self._logger.warning(line[9:])
elif len(line) > 6 and line[:6].upper() == 'STAT: ':
self._logger.debug(line[6:])
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = nikitapath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'julius' in profile:
if 'hmmdefs' in profile['julius']:
config['hmmdefs'] = profile['julius']['hmmdefs']
if 'tiedlist' in profile['julius']:
config['tiedlist'] = profile['julius']['tiedlist']
return config
def transcribe(self, fp, mode=None):
cmd = ['julius',
'-quiet',
'-nolog',
'-input', 'stdin',
'-dfa', self._vocabulary.dfa_file,
'-v', self._vocabulary.dict_file,
'-h', self._hmmdefs,
'-hlist', self._tiedlist,
'-forcedict']
cmd = [str(x) for x in cmd]
self._logger.debug('Executing: %r', cmd)
with tempfile.SpooledTemporaryFile() as out_f:
with tempfile.SpooledTemporaryFile() as err_f:
subprocess.call(cmd, stdin=fp, stdout=out_f, stderr=err_f)
out_f.seek(0)
results = [(int(i), text) for i, text in
self._pattern.findall(out_f.read())]
transcribed = [text for i, text in
sorted(results, key=lambda x: x[0])
if text]
if not transcribed:
transcribed.append('')
self._logger.info('Transcribed: %r', transcribed)
return transcribed
@classmethod
def is_available(cls):
return diagnose.check_executable('julius')
class GoogleSTT(AbstractSTTEngine):
"""
Speech-To-Text implementation which relies on the Google Speech API.
This implementation requires a Google API key to be present in profile.yml
To obtain an API key:
1. Join the Chromium Dev group:
https://groups.google.com/a/chromium.org/forum/?fromgroups#!forum/chromium-dev
2. Create a project through the Google Developers console:
https://console.developers.google.com/project
3. Select your project. In the sidebar, navigate to "APIs & Auth." Activate
the Speech API.
4. Under "APIs & Auth," navigate to "Credentials." Create a new key for
public API access.
5. Add your credentials to your profile.yml. Add an entry to the 'keys'
section using the key name 'GOOGLE_SPEECH.' Sample configuration:
6. Set the value of the 'stt_engine' key in your profile.yml to 'google'
Excerpt from sample profile.yml:
...
timezone: US/Pacific
stt_engine: google
keys:
GOOGLE_SPEECH: $YOUR_KEY_HERE
"""
SLUG = 'google'
def __init__(self, api_key=None, language='en-us'):
# FIXME: get init args from config
"""
Arguments:
api_key - the public api key which allows access to Google APIs
"""
self._logger = logging.getLogger(__name__)
self._request_url = None
self._language = None
self._api_key = None
self._http = requests.Session()
self.language = language
self.api_key = api_key
@property
def request_url(self):
return self._request_url
@property
def language(self):
return self._language
@language.setter
def language(self, value):
self._language = value
self._regenerate_request_url()
@property
def api_key(self):
return self._api_key
@api_key.setter
def api_key(self, value):
self._api_key = value
self._regenerate_request_url()
def _regenerate_request_url(self):
if self.api_key and self.language:
query = urllib.urlencode({'output': 'json',
'client': 'chromium',
'key': self.api_key,
'lang': self.language,
'maxresults': 6,
'pfilter': 2})
self._request_url = urlparse.urlunparse(
('https', 'www.google.com', '/speech-api/v2/recognize', '',
query, ''))
else:
self._request_url = None
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = nikitapath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'keys' in profile and 'GOOGLE_SPEECH' in profile['keys']:
config['api_key'] = profile['keys']['GOOGLE_SPEECH']
return config
def transcribe(self, fp):
"""
Performs STT via the Google Speech API, transcribing an audio file and
returning an English string.
Arguments:
audio_file_path -- the path to the .wav file to be transcribed
"""
if not self.api_key:
self._logger.critical('API key missing, transcription request ' +
'aborted.')
return []
elif not self.language:
self._logger.critical('Language info missing, transcription ' +
'request aborted.')
return []
wav = wave.open(fp, 'rb')
frame_rate = wav.getframerate()
wav.close()
data = fp.read()
headers = {'content-type': 'audio/l16; rate=%s' % frame_rate}
r = self._http.post(self.request_url, data=data, headers=headers)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with http status %d',
r.status_code)
if r.status_code == requests.codes['forbidden']:
self._logger.warning('Status 403 is probably caused by an ' +
'invalid Google API key.')
return []
r.encoding = 'utf-8'
try:
# We cannot simply use r.json() because Google sends invalid json
# (i.e. multiple json objects, seperated by newlines. We only want
# the last one).
response = json.loads(list(r.text.strip().split('\n', 1))[-1])
if len(response['result']) == 0:
# Response result is empty
raise ValueError('Nothing has been transcribed.')
results = [alt['transcript'] for alt
in response['result'][0]['alternative']]
except ValueError as e:
self._logger.warning('Empty response: %s', e.args[0])
results = []
except (KeyError, IndexError):
self._logger.warning('Cannot parse response.', exc_info=True)
results = []
else:
# Convert all results to uppercase
results = tuple(result.upper() for result in results)
self._logger.info('Transcribed: %r', results)
return results
@classmethod
def is_available(cls):
return diagnose.check_network_connection()
class AttSTT(AbstractSTTEngine):
"""
Speech-To-Text implementation which relies on the AT&T Speech API.
This implementation requires an AT&T app_key/app_secret to be present in
profile.yml. Please sign up at http://developer.att.com/apis/speech and
create a new app. You can then take the app_key/app_secret and put it into
your profile.yml:
...
stt_engine: att
att-stt:
app_key: 4xxzd6abcdefghijklmnopqrstuvwxyz
app_secret: 6o5jgiabcdefghijklmnopqrstuvwxyz
"""
SLUG = "att"
def __init__(self, app_key, app_secret):
self._logger = logging.getLogger(__name__)
self._token = None
self.app_key = app_key
self.app_secret = app_secret
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# Try to get AT&T app_key/app_secret from config
profile_path = nikitapath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'att-stt' in profile:
if 'app_key' in profile['att-stt']:
config['app_key'] = profile['att-stt']['app_key']
if 'app_secret' in profile['att-stt']:
config['app_secret'] = profile['att-stt']['app_secret']
return config
@property
def token(self):
if not self._token:
headers = {'content-type': 'application/x-www-form-urlencoded',
'accept': 'application/json'}
payload = {'client_id': self.app_key,
'client_secret': self.app_secret,
'scope': 'SPEECH',
'grant_type': 'client_credentials'}
r = requests.post('https://api.att.com/oauth/v4/token',
data=payload,
headers=headers)
self._token = r.json()['access_token']
return self._token
def transcribe(self, fp):
data = fp.read()
r = self._get_response(data)
if r.status_code == requests.codes['unauthorized']:
# Request token invalid, retry once with a new token
self._logger.warning('OAuth access token invalid, generating a ' +
'new one and retrying...')
self._token = None
r = self._get_response(data)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with response: %r',
r.text,
exc_info=True)
return []
except requests.exceptions.RequestException:
self._logger.critical('Request failed.', exc_info=True)
return []
else:
try:
recognition = r.json()['Recognition']
if recognition['Status'] != 'OK':
raise ValueError(recognition['Status'])
results = [(x['Hypothesis'], x['Confidence'])
for x in recognition['NBest']]
except ValueError as e:
self._logger.debug('Recognition failed with status: %s',
e.args[0])
return []
except KeyError:
self._logger.critical('Cannot parse response.',
exc_info=True)
return []
else:
transcribed = [x[0].upper() for x in sorted(results,
key=lambda x: x[1],
reverse=True)]
self._logger.info('Transcribed: %r', transcribed)
return transcribed
def _get_response(self, data):
headers = {'authorization': 'Bearer %s' % self.token,
'accept': 'application/json',
'content-type': 'audio/wav'}
return requests.post('https://api.att.com/speech/v3/speechToText',
data=data,
headers=headers)
@classmethod
def is_available(cls):
return diagnose.check_network_connection()
class WitAiSTT(AbstractSTTEngine):
"""
Speech-To-Text implementation which relies on the Wit.ai Speech API.
This implementation requires an Wit.ai Access Token to be present in
profile.yml. Please sign up at https://wit.ai and copy your instance
token, which can be found under Settings in the Wit console to your
profile.yml:
...
stt_engine: witai
witai-stt:
access_token: ERJKGE86SOMERANDOMTOKEN23471AB
"""
SLUG = "witai"
def __init__(self, access_token):
self._logger = logging.getLogger(__name__)
self.token = access_token
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# Try to get wit.ai Auth token from config
profile_path = nikitapath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'witai-stt' in profile:
if 'access_token' in profile['witai-stt']:
config['access_token'] = \
profile['witai-stt']['access_token']
return config
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
self._headers = {'Authorization': 'Bearer %s' % self.token,
'accept': 'application/json',
'Content-Type': 'audio/wav'}
@property
def headers(self):
return self._headers
def transcribe(self, fp):
data = fp.read()
r = requests.post('https://api.wit.ai/speech?v=20150101',
data=data,
headers=self.headers)
try:
r.raise_for_status()
text = r.json()['_text']
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with response: %r',
r.text,
exc_info=True)
return []
except requests.exceptions.RequestException:
self._logger.critical('Request failed.', exc_info=True)
return []
except ValueError as e:
self._logger.critical('Cannot parse response: %s',
e.args[0])
return []
except KeyError:
self._logger.critical('Cannot parse response.',
exc_info=True)
return []
else:
transcribed = []
if text:
transcribed.append(text.upper())
self._logger.info('Transcribed: %r', transcribed)
return transcribed
@classmethod
def is_available(cls):
return diagnose.check_network_connection()
def get_engine_by_slug(slug=None):
"""
Returns:
An STT Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("Invalid slug '%s'", slug)
selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines())
if len(selected_engines) == 0:
raise ValueError("No STT engine found for slug '%s'" % slug)
else:
if len(selected_engines) > 1:
print(("WARNING: Multiple STT engines found for slug '%s'. " +
"This is most certainly a bug.") % slug)
engine = selected_engines[0]
if not engine.is_available():
raise ValueError(("STT engine '%s' is not available (due to " +
"missing dependencies, missing " +
"dependencies, etc.)") % slug)
return engine
def get_engines():
def get_subclasses(cls):
subclasses = set()
for subclass in cls.__subclasses__():
subclasses.add(subclass)
subclasses.update(get_subclasses(subclass))
return subclasses
return [tts_engine for tts_engine in
list(get_subclasses(AbstractSTTEngine))
if hasattr(tts_engine, 'SLUG') and tts_engine.SLUG]
|
|
"""Classes::
FitData -- The FitData class is a collection of
functions that fit a 3D function with a fourier series.
"""
import numpy as np
from json import dump, load
from scipy.linalg import lstsq
from itertools import product
from dftintegrate import customserializer as cs
class FitData(object):
"""
Fit a periodic 3D function represented by a json file created by
the ReadData object with fourier series, repersent the fit as an
object.
Solve A x = b where A is a matrix and x and b are column vectors.
Variables::
name -- Path to directory with data to work on.
bandnum -- Number of bands to fit.
data -- Data to fit represented in data.json.
kmax -- A number that determines how many terms can be used
in the fourier representation based on the density of the
sample points.
kgrid -- A list of lists. Each inner list is a triplet that
represents a k-point. The outer list is the collection of
the triplets or k-points and therefore represents the k-kgrid.
Note these are the irreducibl k-points.
weights -- A list of floats. Since kgrid represents the
irreducible wedge, each k-point has a weight that
represents in a way how degenerate it is. These
are in the same order as their corresponding k-point
in kgrid.
eigenvals -- A dictionary. At each k-point there is an
eigenvalue (energy) for each band that was calculated. The
keys are the band number and the values are a list of
energies for that band at each k-point.
symops -- A triple nested list. The outer list is a collection
matrices that represent the symmetry operators for the
system calculated. The inner double nested lists are
representations of the matrices.
series -- Matrix representation of the series. "A" in the equation
to solve.
coeffs -- Fourier Coefficients in the Fourier series. "x" in the
equation to solve. A dictionary, the key is the band number and
the value is the list of coefficients for that band.
recips -- Reciprocal lattice vectors in the Fourier sum.
lstsq_err -- Total least squares error for the fit.
Funtions::
_get_fit -- Call gen_recips, gen_series, solve_coeffs, and
serialize.
gen_recips -- Generate the reciprocal lattice vectors.
gen_series -- Generate the sines and cosines in the series in
matrix form.
solve_coeffs -- Use scipy.linalg.lstsq to solve A x = b for x.
serialize -- Serialize the fit to a json file.
"""
def __init__(self, name_of_directory, bandnum='all'):
"""
Arguments::
name_of_directory -- path to directory that contains the
output from readdata.py
Keyword Arguments::
bandnum -- Number of bands to fit. Default is to fit all bands
in data.json.
"""
self.name = name_of_directory+'/'
self.bandnum = bandnum
with open(self.name+'data.json', mode='r',
encoding='utf-8') as inf:
self.data = load(inf)
self.kgrid = self.data['kgrid']
self.eigenvals = self.data['eigenvals']
self.symops = self.data['symops']
self.kmax = int(self.data['kmax'])
self._get_fit()
def _get_fit(self):
self.gen_recips()
self.gen_series()
self.solve_coeffs()
self.serialize()
def gen_recips(self):
"""
In the Fourier basis representation we sum over the reciprocal
lattice vectors; this function generates those reciprocal
lattice vectors. Start by using itertools.product to create
triplets in range 0 to kmax. In order to sum over the entire
Fermi sphere we operate on the triplets with the systems
symmetry operators given in symops. kmax and symops are
explained in more detail in readdata.py
Varibles::
allList -- A list of all vectors seen. Including results of
product and their rotated versions after being operated on
by symops.
recips -- A dictionary with the key being a unique vector and
the value being a list of the symmetric versions of that
unique vector.
"""
allList = set()
recips = {}
# Loop over the positive octant in k-space.
for v in product(range(self.kmax+1), repeat=3):
# Tuple so it's hashable.
v = tuple(v)
# Check if it has been seen before, if so skip, if not add.
if v not in allList:
allList.add(v)
recips[str(v)] = [list(v)]
# Loop over all symops
for i, matrix in enumerate(self.symops):
# Operate on it with the symop.
vRot = tuple(np.dot(matrix, v))
# Check if it has been seen before, if so skip, if not add.
if vRot not in allList:
vRot = tuple([int(x) for x in vRot])
allList.add(vRot)
recips[str(v)].append(list(vRot))
self.recips = recips
def gen_series(self):
"""
In the equation A x = b where A is a matrix and x and b are
column vectors, this function generates A. We use the matrix
equation to fit the 3D function represented by kgrid and
eigenvals. x is the coefficients to the complex exponentials and
b contains the values of the function. Each entry in A is like
exp(i2piG.r).
"""
series = []
i = 1j # imaginary number
pi = np.pi
for kpt in self.kgrid:
row = []
for k, v in sorted(self.recips.items()):
# The 2pi comes from the dot product of real and reciprocal
# space lattice vectors. v is a list of reciprocal lattice
# vectors that are symetric and therefore need to have the same
# coefficient so they are summed together.
gdotr = i*2*pi*np.dot(v, kpt)
row.append(sum(np.exp(gdotr)))
series.append(row)
self.series = series
def solve_coeffs(self):
"""
Solve A x = b with scipy.linalg.lstsq. A is a matrix, see
gen_series for more detail. x is the column vector we are
solving for, it is the Fourier coefficients. b is a column
vector, it is the energy values of the bands.
"""
# A (series), b (eigenvals)
coeffs = {}
lstsq_err = {}
A = self.series
if self.bandnum == 'all':
self.bandnum = len(self.eigenvals.keys())
else:
self.bandnum = int(self.bandnum)
for num in range(1, self.bandnum+1):
num = str(num)
b = self.eigenvals[num]
coeffs[num], lstsq_err[num] = np.array(lstsq(A, b)[:2])
self.coeffs = coeffs
self.lstsq_err = lstsq_err
def serialize(self):
fit_dict = {'coefficients': self.coeffs, 'reciprocals': self.recips,
'series': self.series}
with open(self.name+'fit.json', mode='w', encoding='utf-8') as outf:
dump(fit_dict, outf, indent=2, default=cs.tojson)
|
|
# -*- coding: utf-8 -*-
from bank_account_validator.exceptions import (
BankNotImplemented, InvalidAccount, InvalidAccountlength, InvalidBranch, InvalidBranchAndAccountCombination,
InvalidBranchlength, MissingAccountDigit, MissingBranchDigit, UnexpectedAccountDigit, UnexpectedBranchDigit
)
from bank_account_validator.utils import calculate_verifier_digit, smarter_zfill
def all_subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in all_subclasses(s)]
class Bank(object):
country = None
bank_code = None
branch_length = 4
branch_digit_length = 0
account_length = 10
account_digit_length = 1
def __init__(self, **kwargs):
if not all([self.country, self.bank_code]):
raise RuntimeError('Bank is an abstract class and must not be instantiated. '
'Use its subclasses instead - via Bank.get(bank_code, country).')
self.branch = smarter_zfill(kwargs['branch'], self.branch_length)
self.branch_digit = smarter_zfill(kwargs.get('branch_digit', ''), self.branch_digit_length)
self.account = smarter_zfill(kwargs['account'], self.account_length)
self.account_digit = smarter_zfill(kwargs.get('account_digit', ''), self.account_digit_length)
if len(self.branch) != self.branch_length:
raise InvalidBranchlength(self)
if len(self.branch_digit) < self.branch_digit_length:
raise MissingBranchDigit(self)
if len(self.branch_digit) > self.branch_digit_length:
raise UnexpectedBranchDigit(self)
if len(self.account) != self.account_length:
raise InvalidAccountlength(self)
if len(self.account_digit) < self.account_digit_length:
raise MissingAccountDigit(self)
if len(self.account_digit) > self.account_digit_length:
raise UnexpectedAccountDigit(self)
@classmethod
def get(cls, bank_code, country=None):
if not country:
country = cls.country
subclasses = all_subclasses(cls)
bank_class = list(filter(lambda x: x.bank_code == bank_code and x.country == country, subclasses))
if bank_class:
return bank_class[0]
raise BankNotImplemented(bank_code, country)
def validate_branch_digit(self):
return True
def validate_account_digit(self):
return True
def validate(self):
return True
def execute(self):
if not self.validate_branch_digit():
raise InvalidBranch(self.branch, self.branch_digit)
if not self.validate_account_digit():
raise InvalidAccount(self.account, self.account_digit)
if not self.validate():
raise InvalidBranchAndAccountCombination(self.branch, self.branch_digit, self.account, self.account_digit)
class BrazilianBank(Bank):
country = 'BR'
class BancoDoBrasil(BrazilianBank):
bank_code = '001'
account_length = 8
branch_digit_length = 1
def validate_branch_digit(self):
s = sum(int(x) * y for x, y in zip(list(self.branch), range(5, 1, -1)))
remaining_part = s % 11
dv = 11 - remaining_part
if remaining_part == 0:
dv = 0
elif remaining_part == 1:
dv = 'x'
return self.branch_digit.lower() == str(dv).lower()
def validate_account_digit(self):
dv = calculate_verifier_digit(self.account, pivot='98765432')
dv = 'X' if dv == 10 else dv
dv = '0' if dv == 11 else dv
return self.account_digit.lower() == str(dv).lower()
class Santander(BrazilianBank):
bank_code = '033'
account_length = 8
def validate(self):
account_relevant_data = self.branch + '00' + self.account
dv = calculate_verifier_digit(account_relevant_data, pivot='97310097131973', method='mod10')
dv = '0' if dv == 10 else dv
return self.account_digit.lower() == str(dv).lower()
class Banrisul(BrazilianBank):
bank_code = '041'
branch_digit_length = 2
account_length = 9
# TODO: tests for account validation
def validate_branch_digit(self):
def sum_digits(value):
return sum([int(x) for x in str(value)])
first_digit = 10 - ((sum_digits(int(self.branch[0]) * 1) +
sum_digits(int(self.branch[1]) * 2) +
sum_digits(int(self.branch[2]) * 1) +
sum_digits(int(self.branch[3]) * 2)) % 10)
if first_digit == 10:
first_digit = 0
second_digit = 11 - ((int(self.branch[0]) * 6 +
int(self.branch[1]) * 5 +
int(self.branch[2]) * 4 +
int(self.branch[3]) * 3 +
first_digit * 2) % 11)
if second_digit == 11:
second_digit = 0
elif second_digit == 10:
first_digit = (first_digit + 1) % 10
second_digit = 11 - ((int(self.branch[0]) * 6 +
int(self.branch[1]) * 5 +
int(self.branch[2]) * 4 +
int(self.branch[3]) * 3 +
first_digit * 2) % 11)
return self.branch_digit.lower() == '{}{}'.format(first_digit, second_digit).lower()
def validate_account_digit(self):
dv = calculate_verifier_digit(self.account, pivot='324765432')
dv = '6' if dv == 10 else dv
dv = '0' if dv == 11 else dv
return self.account_digit.lower() == str(dv).lower()
class CaixaEconomicaFederal(BrazilianBank):
bank_code = '104'
account_length = 11
def validate(self):
account_relevant_data = self.branch + self.account
pivot = '876543298765432'
dv = sum([int(x) * int(y) for x, y in zip(account_relevant_data.zfill(len(pivot)), pivot)])
dv *= 10
dv %= 11
dv = '0' if dv == 10 else dv
return self.account_digit.lower() == str(dv).lower()
class Bradesco(BrazilianBank):
bank_code = '237'
account_length = 7
branch_digit_length = 1
def validate_branch_digit(self):
s = sum(int(x) * y for x, y in zip(list(self.branch), range(5, 1, -1)))
remaining_part = s % 11
dv = 11 - remaining_part
if remaining_part == 0:
dv = 0
elif remaining_part == 1:
dv = 0
return self.branch_digit.lower() == str(dv).lower()
def validate_account_digit(self):
dv = calculate_verifier_digit(self.account, pivot='2765432')
dv = '0' if dv == 10 else dv # according to documentation this one should be 'P', but I know this info is outdated
dv = '0' if dv == 11 else dv
return self.account_digit.lower() == str(dv).lower()
class Itau(BrazilianBank):
bank_code = '341'
account_length = 5
def validate(self):
account_relevant_data = self.branch + self.account
dv = calculate_verifier_digit(account_relevant_data, pivot='212121212', sum_digits=True, method='mod10')
dv = '0' if dv == 10 else dv
return self.account_digit.lower() == str(dv).lower()
class HSBC(BrazilianBank):
bank_code = '399'
account_length = 6
# TODO: tests
def validate(self):
account_relevant_data = self.branch + self.account
pivot = '8923456789'
dv = sum([int(x) * int(y) for x, y in zip(account_relevant_data.zfill(len(pivot)), pivot)])
dv %= 11
dv = '0' if dv == 10 else dv
return self.account_digit.lower() == str(dv).lower()
class Citibank(BrazilianBank):
bank_code = '745'
account_length = 7
# TODO: branch validation and tests
def validate_account_digit(self):
dv = calculate_verifier_digit(self.account, pivot='8765432')
dv = '0' if dv == 10 else dv
dv = '0' if dv == 11 else dv
return self.account_digit.lower() == str(dv).lower()
|
|
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
from array import array
from whoosh.compat import xrange
from whoosh.matching import mcore
class CombinationMatcher(mcore.Matcher):
def __init__(self, submatchers, boost=1.0):
self._submatchers = submatchers
self._boost = boost
def supports_block_quality(self):
return all(m.supports_block_quality() for m in self._submatchers)
def max_quality(self):
return max(m.max_quality() for m in self._submatchers
if m.is_active()) * self._boost
def supports(self, astype):
return all(m.supports(astype) for m in self._submatchers)
def children(self):
return iter(self._submatchers)
def score(self):
return sum(m.score() for m in self._submatchers) * self._boost
class PreloadedUnionMatcher(CombinationMatcher):
"""Instead of marching the sub-matchers along in parallel, this
matcher pre-reads the scores for EVERY MATCHING DOCUMENT, trading memory
for speed.
This is faster than the implementation using a binary tree of
:class:`~whoosh.matching.binary.UnionMatcher` objects (possibly just
because of less overhead), but it doesn't allow getting information about
the "current" document other than the score, because there isn't really a
current document, just an array of scores.
"""
def __init__(self, submatchers, doccount, boost=1.0, scored=True):
CombinationMatcher.__init__(self, submatchers, boost=boost)
self._doccount = doccount
a = array("d")
active = [subm for subm in self._submatchers if subm.is_active()]
if active:
offset = self._docnum = min(m.id() for m in active)
for m in active:
while m.is_active():
if scored:
score = m.score() * boost
else:
score = boost
docnum = m.id()
place = docnum - offset
if len(a) <= place:
a.extend(0 for _ in xrange(place - len(a) + 1))
a[place] += score
m.next()
self._a = a
self._offset = offset
else:
self._docnum = 0
self._offset = 0
self._a = a
def is_active(self):
return self._docnum - self._offset < len(self._a)
def id(self):
return self._docnum
def score(self):
return self._a[self._docnum - self._offset]
def next(self):
a = self._a
offset = self._offset
place = self._docnum - offset
place += 1
while place < len(a) and a[place] == 0:
place += 1
self._docnum = place + offset
def max_quality(self):
return max(self._a[self._docnum - self._offset:])
def block_quality(self):
return self.max_quality()
def skip_to(self, docnum):
if docnum < self._docnum:
return
self._docnum = docnum
i = docnum - self._offset
if i < len(self._a) and self._a[i] == 0:
self.next()
def skip_to_quality(self, minquality):
a = self._a
offset = self._offset
place = self._docnum - offset
skipped = 0
while place < len(a) and a[place] <= minquality:
place += 1
skipped = 1
self._docnum = place + offset
return skipped
def supports(self, astype):
# This matcher doesn't support any posting values
return False
def all_ids(self):
a = self._a
offset = self._offset
place = self._docnum - offset
while place < len(a):
if a[place] > 0:
yield place + offset
place += 1
class ArrayUnionMatcher(CombinationMatcher):
"""Instead of marching the sub-matchers along in parallel, this matcher
pre-reads the scores for a large block of documents at a time from each
matcher, accumulating the scores in an array.
This is faster than the implementation using a binary tree of
:class:`~whoosh.matching.binary.UnionMatcher` objects (possibly just
because of less overhead), but it doesn't allow getting information about
the "current" document other than the score, because there isn't really a
current document, just an array of scores.
"""
def __init__(self, submatchers, doccount, boost=1.0, scored=True,
partsize=2048):
CombinationMatcher.__init__(self, submatchers, boost=boost)
self._scored = scored
self._doccount = doccount
if not partsize:
partsize = doccount
self._partsize = partsize
self._a = array("d", (0 for _ in xrange(self._partsize)))
self._docnum = self._min_id()
self._read_part()
def __repr__(self):
return ("%s(%r, boost=%f, scored=%r, partsize=%d)"
% (self.__class__.__name__, self._submatchers, self._boost,
self._scored, self._partsize))
def _min_id(self):
active = [subm for subm in self._submatchers if subm.is_active()]
if active:
return min(subm.id() for subm in active)
else:
return self._doccount
def _read_part(self):
scored = self._scored
boost = self._boost
limit = min(self._docnum + self._partsize, self._doccount)
offset = self._docnum
a = self._a
# Clear the array
for i in xrange(self._partsize):
a[i] = 0
# Add the scores from the submatchers into the array
for m in self._submatchers:
while m.is_active() and m.id() < limit:
i = m.id() - offset
if scored:
a[i] += m.score() * boost
else:
a[i] = 1
m.next()
self._offset = offset
self._limit = limit
def _find_next(self):
a = self._a
docnum = self._docnum
offset = self._offset
limit = self._limit
while docnum < limit:
if a[docnum - offset] > 0:
break
docnum += 1
if docnum == limit:
self._docnum = self._min_id()
self._read_part()
else:
self._docnum = docnum
def supports(self, astype):
# This matcher doesn't support any posting values
return False
def is_active(self):
return self._docnum < self._doccount
def max_quality(self):
return max(m.max_quality() for m in self._submatchers)
def block_quality(self):
return max(self._a)
def skip_to(self, docnum):
if docnum < self._offset:
# We've already passed it
return
elif docnum < self._limit:
# It's in the current part
self._docnum = docnum
self._find_next()
return
# Advance all active submatchers
submatchers = self._submatchers
active = False
for subm in submatchers:
if subm.is_active():
subm.skip_to(docnum)
if any(subm.is_active() for subm in submatchers):
# Rebuffer
self._docnum = self._min_id()
self._read_part()
else:
self._docnum = self._doccount
def skip_to_quality(self, minquality):
skipped = 0
while self.is_active() and self.block_quality() <= minquality:
skipped += 1
self._docnum = self._limit
self._read_part()
if self.is_active():
self._find_next()
return skipped
def id(self):
return self._docnum
def all_ids(self):
doccount = self._doccount
docnum = self._docnum
offset = self._offset
limit = self._limit
a = self._a
while docnum < doccount:
if a[docnum - offset] > 0:
yield docnum
docnum += 1
if docnum == limit:
self._docnum = docnum
self._read_part()
offset = self._offset
limit = self._limit
def next(self):
self._docnum += 1
return self._find_next()
def score(self):
return self._a[self._docnum - self._offset]
|
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# expects a Cassandra server to be running and listening on port 9160.
# (read tests expect insert tests to have run first too.)
from __future__ import with_statement
have_multiproc = False
try:
from multiprocessing import Array as array, Process as Thread
from uuid import uuid1 as get_ident
array('i', 1) # catch "This platform lacks a functioning sem_open implementation"
Thread.isAlive = Thread.is_alive
have_multiproc = True
except ImportError:
from threading import Thread
from thread import get_ident
from array import array
from hashlib import md5
import time, random, sys, os
from random import randint, gauss
from optparse import OptionParser
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.transport import THttpClient
from thrift.protocol import TBinaryProtocol
try:
from cassandra import Cassandra
from cassandra.ttypes import *
except ImportError:
# add cassandra directory to sys.path
L = os.path.abspath(__file__).split(os.path.sep)[:-3]
root = os.path.sep.join(L)
_ipath = os.path.join(root, 'interface', 'thrift', 'gen-py')
sys.path.append(os.path.join(_ipath, 'cassandra'))
import Cassandra
from ttypes import *
except ImportError:
print "Cassandra thrift bindings not found, please run 'ant gen-thrift-py'"
sys.exit(2)
try:
from thrift.protocol import fastbinary
except ImportError:
print "WARNING: thrift binary extension not found, benchmark will not be accurate!"
parser = OptionParser()
parser.add_option('-n', '--num-keys', type="int", dest="numkeys",
help="Number of keys", default=1000**2)
parser.add_option('-N', '--skip-keys', type="float", dest="skipkeys",
help="Fraction of keys to skip initially", default=0)
parser.add_option('-t', '--threads', type="int", dest="threads",
help="Number of threads/procs to use", default=50)
parser.add_option('-c', '--columns', type="int", dest="columns",
help="Number of columns per key", default=5)
parser.add_option('-S', '--column-size', type="int", dest="column_size",
help="Size of column values in bytes", default=34)
parser.add_option('-C', '--cardinality', type="int", dest="cardinality",
help="Number of unique values stored in columns", default=50)
parser.add_option('-d', '--nodes', type="string", dest="nodes",
help="Host nodes (comma separated)", default="localhost")
parser.add_option('-D', '--nodefile', type="string", dest="nodefile",
help="File containing list of nodes (one per line)", default=None)
parser.add_option('-s', '--stdev', type="float", dest="stdev", default=0.1,
help="standard deviation factor")
parser.add_option('-r', '--random', action="store_true", dest="random",
help="use random key generator (stdev will have no effect)")
parser.add_option('-f', '--file', type="string", dest="file",
help="write output to file")
parser.add_option('-p', '--port', type="int", default=9160, dest="port",
help="thrift port")
parser.add_option('-m', '--unframed', action="store_true", dest="unframed",
help="use unframed transport")
parser.add_option('-o', '--operation', type="choice", dest="operation",
default="insert", choices=('insert', 'read', 'rangeslice',
'indexedrangeslice', 'multiget'),
help="operation to perform")
parser.add_option('-u', '--supercolumns', type="int", dest="supers", default=1,
help="number of super columns per key")
parser.add_option('-y', '--family-type', type="choice", dest="cftype",
choices=('regular','super'), default='regular',
help="column family type")
parser.add_option('-k', '--keep-going', action="store_true", dest="ignore",
help="ignore errors inserting or reading")
parser.add_option('-i', '--progress-interval', type="int", default=10,
dest="interval", help="progress report interval (seconds)")
parser.add_option('-g', '--keys-per-call', type="int", default=1000,
dest="rangecount",
help="amount of keys to get_range_slices or multiget per call")
parser.add_option('-l', '--replication-factor', type="int", default=1,
dest="replication",
help="replication factor to use when creating needed column families")
parser.add_option('-e', '--consistency-level', type="str", default='ONE',
dest="consistency", help="consistency level to use")
parser.add_option('-x', '--create-index', type="choice",
choices=('keys','keys_bitmap', 'none'), default='none',
dest="index", help="type of index to create on needed column families")
(options, args) = parser.parse_args()
total_keys = options.numkeys
n_threads = options.threads
keys_per_thread = total_keys / n_threads
columns_per_key = options.columns
supers_per_key = options.supers
# this allows client to round robin requests directly for
# simple request load-balancing
nodes = options.nodes.split(',')
if options.nodefile != None:
with open(options.nodefile) as f:
nodes = [n.strip() for n in f.readlines() if len(n.strip()) > 0]
#format string for keys
fmt = '%0' + str(len(str(total_keys))) + 'd'
# a generator that generates all keys according to a bell curve centered
# around the middle of the keys generated (0..total_keys). Remember that
# about 68% of keys will be within stdev away from the mean and
# about 95% within 2*stdev.
stdev = total_keys * options.stdev
mean = total_keys / 2
consistency = getattr(ConsistencyLevel, options.consistency, None)
if consistency is None:
print "%s is not a valid consistency level" % options.consistency
sys.exit(3)
# generates a list of unique, deterministic values
def generate_values():
values = []
for i in xrange(0, options.cardinality):
h = md5(str(i)).hexdigest()
values.append(h * int(options.column_size/len(h)) + h[:options.column_size % len(h)])
return values
def key_generator_gauss():
while True:
guess = gauss(mean, stdev)
if 0 <= guess < total_keys:
return fmt % int(guess)
# a generator that will generate all keys w/ equal probability. this is the
# worst case for caching.
def key_generator_random():
return fmt % randint(0, total_keys - 1)
key_generator = key_generator_gauss
if options.random:
key_generator = key_generator_random
def get_client(host='127.0.0.1', port=9160):
socket = TSocket.TSocket(host, port)
if options.unframed:
transport = TTransport.TBufferedTransport(socket)
else:
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
client = Cassandra.Client(protocol)
client.transport = transport
return client
def make_keyspaces():
colms = []
if options.index == 'keys':
colms = [ColumnDef(name='C1', validation_class='UTF8Type', index_type=IndexType.KEYS)]
elif options.index == 'keys_bitmap':
colms = [ColumnDef(name='C1', validation_class='UTF8Type', index_type=IndexType.KEYS_BITMAP)]
cfams = [CfDef(keyspace='Keyspace1', name='Standard1', column_metadata=colms),
CfDef(keyspace='Keyspace1', name='Super1', column_type='Super')]
keyspace = KsDef(name='Keyspace1', strategy_class='org.apache.cassandra.locator.SimpleStrategy', replication_factor=options.replication, cf_defs=cfams)
client = get_client(nodes[0], options.port)
client.transport.open()
try:
client.system_add_keyspace(keyspace)
print "Created keyspaces. Sleeping %ss for propagation." % len(nodes)
time.sleep(len(nodes))
except InvalidRequestException, e:
print e.why
client.transport.close()
class Operation(Thread):
def __init__(self, i, opcounts, keycounts, latencies):
Thread.__init__(self)
# generator of the keys to be used
self.range = xrange(int(keys_per_thread * (i + options.skipkeys)),
keys_per_thread * (i + 1))
# we can't use a local counter, since that won't be visible to the parent
# under multiprocessing. instead, the parent passes a "opcounts" array
# and an index that is our assigned counter.
self.idx = i
self.opcounts = opcounts
# similarly, a shared array for latency and key totals
self.latencies = latencies
self.keycounts = keycounts
# random host for pseudo-load-balancing
[hostname] = random.sample(nodes, 1)
# open client
self.cclient = get_client(hostname, options.port)
self.cclient.transport.open()
self.cclient.set_keyspace('Keyspace1')
class Inserter(Operation):
def run(self):
values = generate_values()
columns = [Column('C' + str(j), 'unset', time.time() * 1000000) for j in xrange(columns_per_key)]
if 'super' == options.cftype:
supers = [SuperColumn('S' + str(j), columns) for j in xrange(supers_per_key)]
for i in self.range:
key = fmt % i
if 'super' == options.cftype:
cfmap= {key: {'Super1' : [Mutation(ColumnOrSuperColumn(super_column=s)) for s in supers]}}
else:
cfmap = {key: {'Standard1': [Mutation(ColumnOrSuperColumn(column=c)) for c in columns]}}
# set the correct column values for this row
value = values[i % len(values)]
for column in columns:
column.value = value
start = time.time()
try:
self.cclient.batch_mutate(cfmap, consistency)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.opcounts[self.idx] += 1
self.keycounts[self.idx] += 1
class Reader(Operation):
def run(self):
p = SlicePredicate(slice_range=SliceRange('', '', False, columns_per_key))
if 'super' == options.cftype:
for i in xrange(keys_per_thread):
key = key_generator()
for j in xrange(supers_per_key):
parent = ColumnParent('Super1', 'S' + str(j))
start = time.time()
try:
r = self.cclient.get_slice(key, parent, p, consistency)
if not r: raise RuntimeError("Key %s not found" % key)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.opcounts[self.idx] += 1
self.keycounts[self.idx] += 1
else:
parent = ColumnParent('Standard1')
for i in xrange(keys_per_thread):
key = key_generator()
start = time.time()
try:
r = self.cclient.get_slice(key, parent, p, consistency)
if not r: raise RuntimeError("Key %s not found" % key)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.opcounts[self.idx] += 1
self.keycounts[self.idx] += 1
class RangeSlicer(Operation):
def run(self):
begin = self.range[0]
end = self.range[-1]
current = begin
last = current + options.rangecount
p = SlicePredicate(slice_range=SliceRange('', '', False, columns_per_key))
if 'super' == options.cftype:
while current < end:
keyrange = KeyRange(fmt % current, fmt % last, count = options.rangecount)
res = []
for j in xrange(supers_per_key):
parent = ColumnParent('Super1', 'S' + str(j))
begin = time.time()
try:
res = self.cclient.get_range_slices(parent, p, keyrange, consistency)
if not res: raise RuntimeError("Key %s not found" % key)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - begin
self.opcounts[self.idx] += 1
current += len(r) + 1
last = current + len(r) + 1
self.keycounts[self.idx] += len(r)
else:
parent = ColumnParent('Standard1')
while current < end:
start = fmt % current
finish = fmt % last
keyrange = KeyRange(start, finish, count = options.rangecount)
begin = time.time()
try:
r = self.cclient.get_range_slices(parent, p, keyrange, consistency)
if not r: raise RuntimeError("Range not found:", start, finish)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
print start, finish
raise
current += len(r) + 1
last = current + len(r) + 1
self.latencies[self.idx] += time.time() - begin
self.opcounts[self.idx] += 1
self.keycounts[self.idx] += len(r)
# Each thread queries for a portion of the unique values
# TODO: all threads start at the same key: implement wrapping, and start
# from the thread's appointed range
class IndexedRangeSlicer(Operation):
def run(self):
p = SlicePredicate(slice_range=SliceRange('', '', False, columns_per_key))
values = generate_values()
parent = ColumnParent('Standard1')
# the number of rows with a particular value and the number of values we should query for
expected_per_value = total_keys // len(values)
valuebegin = self.range[0] // expected_per_value
valuecount = len(self.range) // expected_per_value
for valueidx in xrange(valuebegin, valuebegin + valuecount):
received = 0
start = fmt % 0
value = values[valueidx % len(values)]
expressions = [IndexExpression(column_name='C1', op=IndexOperator.EQ, value=value)]
while received < expected_per_value:
clause = IndexClause(start_key=start, count=options.rangecount, expressions=expressions)
begin = time.time()
try:
r = self.cclient.get_indexed_slices(parent, clause, p, consistency)
if not r: raise RuntimeError("No indexed values from offset received:", start)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
continue
else:
raise
received += len(r)
# convert max key found back to an integer, and increment it
start = fmt % (1 + max([int(keyslice.key) for keyslice in r]))
self.latencies[self.idx] += time.time() - begin
self.opcounts[self.idx] += 1
self.keycounts[self.idx] += len(r)
class MultiGetter(Operation):
def run(self):
p = SlicePredicate(slice_range=SliceRange('', '', False, columns_per_key))
offset = self.idx * keys_per_thread
count = (((self.idx+1) * keys_per_thread) - offset) / options.rangecount
if 'super' == options.cftype:
for x in xrange(count):
keys = [key_generator() for i in xrange(offset, offset + options.rangecount)]
for j in xrange(supers_per_key):
parent = ColumnParent('Super1', 'S' + str(j))
start = time.time()
try:
r = self.cclient.multiget_slice(keys, parent, p, consistency)
if not r: raise RuntimeError("Keys %s not found" % keys)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.opcounts[self.idx] += 1
self.keycounts[self.idx] += len(keys)
offset += options.rangecount
else:
parent = ColumnParent('Standard1')
for x in xrange(count):
keys = [key_generator() for i in xrange(offset, offset + options.rangecount)]
start = time.time()
try:
r = self.cclient.multiget_slice(keys, parent, p, consistency)
if not r: raise RuntimeError("Keys %s not found" % keys)
except KeyboardInterrupt:
raise
except Exception, e:
if options.ignore:
print e
else:
raise
self.latencies[self.idx] += time.time() - start
self.opcounts[self.idx] += 1
self.keycounts[self.idx] += len(keys)
offset += options.rangecount
class OperationFactory:
@staticmethod
def create(type, i, opcounts, keycounts, latencies):
if type == 'read':
return Reader(i, opcounts, keycounts, latencies)
elif type == 'insert':
return Inserter(i, opcounts, keycounts, latencies)
elif type == 'rangeslice':
return RangeSlicer(i, opcounts, keycounts, latencies)
elif type == 'indexedrangeslice':
return IndexedRangeSlicer(i, opcounts, keycounts, latencies)
elif type == 'multiget':
return MultiGetter(i, opcounts, keycounts, latencies)
else:
raise RuntimeError, 'Unsupported op!'
class Stress(object):
opcounts = array('i', [0] * n_threads)
latencies = array('d', [0] * n_threads)
keycounts = array('i', [0] * n_threads)
def create_threads(self,type):
threads = []
for i in xrange(n_threads):
th = OperationFactory.create(type, i, self.opcounts, self.keycounts, self.latencies)
threads.append(th)
th.start()
return threads
def run_test(self,filename,threads):
start_t = time.time()
if filename:
outf = open(filename,'w')
else:
outf = sys.stdout
outf.write('total,interval_op_rate,interval_key_rate,avg_latency,elapsed_time\n')
epoch = total = old_total = latency = keycount = old_keycount = old_latency = 0
epoch_intervals = (options.interval * 10) # 1 epoch = 1 tenth of a second
terminate = False
while not terminate:
time.sleep(0.1)
if not [th for th in threads if th.isAlive()]:
terminate = True
epoch = epoch + 1
if terminate or epoch > epoch_intervals:
epoch = 0
old_total, old_latency, old_keycount = total, latency, keycount
total = sum(self.opcounts[th.idx] for th in threads)
latency = sum(self.latencies[th.idx] for th in threads)
keycount = sum(self.keycounts[th.idx] for th in threads)
opdelta = total - old_total
keydelta = keycount - old_keycount
delta_latency = latency - old_latency
if opdelta > 0:
delta_formatted = (delta_latency / opdelta)
else:
delta_formatted = 'NaN'
elapsed_t = int(time.time() - start_t)
outf.write('%d,%d,%d,%s,%d\n'
% (total, opdelta / options.interval, keydelta / options.interval, delta_formatted, elapsed_t))
def insert(self):
threads = self.create_threads('insert')
self.run_test(options.file,threads);
def read(self):
threads = self.create_threads('read')
self.run_test(options.file,threads);
def rangeslice(self):
threads = self.create_threads('rangeslice')
self.run_test(options.file,threads);
def indexedrangeslice(self):
threads = self.create_threads('indexedrangeslice')
self.run_test(options.file,threads);
def multiget(self):
threads = self.create_threads('multiget')
self.run_test(options.file,threads);
stresser = Stress()
benchmark = getattr(stresser, options.operation, None)
if not have_multiproc:
print """WARNING: multiprocessing not present, threading will be used.
Benchmark may not be accurate!"""
if options.operation == 'insert':
make_keyspaces()
benchmark()
|
|
#!/usr/bin/env python
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(key, obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
import os
import re
try:
import cPickle as pickle
except ImportError:
import pickle
from binascii import crc32 # zlib version is not cross-platform
def cmemcache_hash(key):
return((((crc32(key) & 0xffffffff) >> 16) & 0x7fff) or 1)
serverHashFunction = cmemcache_hash
def useOldServerHashFunction():
"""Use the old python-memcache server hash function."""
global serverHashFunction
serverHashFunction = crc32
try:
from zlib import compress, decompress
_supports_compress = True
except ImportError:
_supports_compress = False
# quickly define a decompress just in case we recv compressed data.
def decompress(val):
raise _Error("received compressed data but I don't support compression (import error)")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Original author: Evan Martin of Danga Interactive
__author__ = "Sean Reifschneider <[email protected]>"
__version__ = "1.47"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
# http://en.wikipedia.org/wiki/Python_Software_Foundation_License
__license__ = "Python Software Foundation License"
SERVER_MAX_KEY_LENGTH = 250
# Storing values larger than 1MB requires recompiling memcached. If you do,
# this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N"
# after importing this module.
SERVER_MAX_VALUE_LENGTH = 1024*1024
class _Error(Exception):
pass
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# TODO: add the pure-python local implementation
class local(object):
pass
class Client(local):
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace, set_multi
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete, delete_multi
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
"""
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_FLAG_COMPRESSED = 1<<3
_SERVER_RETRIES = 10 # how many times to try finding a free server.
# exceptions for Client
class MemcachedKeyError(Exception):
pass
class MemcachedKeyLengthError(MemcachedKeyError):
pass
class MemcachedKeyCharacterError(MemcachedKeyError):
pass
class MemcachedKeyNoneError(MemcachedKeyError):
pass
class MemcachedKeyTypeError(MemcachedKeyError):
pass
class MemcachedStringEncodingError(Exception):
pass
def __init__(self, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None, server_max_key_length=SERVER_MAX_KEY_LENGTH,
server_max_value_length=SERVER_MAX_VALUE_LENGTH):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
@param pickleProtocol: number to mandate protocol used by (c)Pickle.
@param pickler: optional override of default Pickler to allow subclassing.
@param unpickler: optional override of default Unpickler to allow subclassing.
@param pload: optional persistent_load function to call on pickle loading.
Useful for cPickle since subclassing isn't allowed.
@param pid: optional persistent_id function to call on pickle storing.
Useful for cPickle since subclassing isn't allowed.
"""
local.__init__(self)
self.debug = debug
self.set_servers(servers)
self.stats = {}
self.cas_ids = {}
# Allow users to modify pickling/unpickling behavior
self.pickleProtocol = pickleProtocol
self.pickler = pickler
self.unpickler = unpickler
self.persistent_load = pload
self.persistent_id = pid
self.server_max_key_length = server_max_key_length
self.server_max_value_length = server_max_value_length
# figure out the pickler style
file = StringIO()
try:
pickler = self.pickler(file, protocol = self.pickleProtocol)
self.picklerIsKeyword = True
except TypeError:
self.picklerIsKeyword = False
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debug) for s in servers]
self._init_buckets()
def get_stats(self, stat_args = None):
'''Get statistics from each of the servers.
@param stat_args: Additional arguments to pass to the memcache
"stats" command.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
if not stat_args:
s.send_cmd('stats')
else:
s.send_cmd('stats ' + stat_args)
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def get_slabs(self):
data = []
for s in self.servers:
if not s.connect(): continue
if s.family == socket.AF_INET:
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
else:
name = 'unix:%s (%s)' % ( s.address, s.weight )
serverData = {}
data.append(( name, serverData ))
s.send_cmd('stats items')
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
item = line.split(' ', 2)
#0 = STAT, 1 = ITEM, 2 = Value
slab = item[1].split(':', 2)
#0 = items, 1 = Slab #, 2 = Name
if slab[1] not in serverData:
serverData[slab[1]] = {}
serverData[slab[1]][slab[2]] = item[2]
return data
def flush_all(self):
'Expire all data currently in the memcache servers.'
for s in self.servers:
if not s.connect(): continue
s.send_cmd('flush_all')
s.expect("OK")
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if func not in self.stats:
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.deaduntil = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if isinstance(key, tuple):
serverhash, key = key
else:
serverhash = serverHashFunction(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
#print "(using server %s)" % server,
return server, key
serverhash = serverHashFunction(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete_multi(self, keys, time=0, key_prefix=''):
'''
Delete multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
>>> mc.delete_multi(['key1', 'key2'])
1
>>> mc.get_multi(['key1', 'key2']) == {}
1
This method is recommended over iterated regular L{delete}s as it reduces total latency, since
your app doesn't have to wait for each round-trip of L{delete} before sending
the next one.
@param keys: An iterable of keys to clear
@param time: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
@param key_prefix: Optional string to prepend to each key when sending to memcache.
See docs for L{get_multi} and L{set_multi}.
@return: 1 if no failure in communication with any memcacheds.
@rtype: int
'''
self._statlog('delete_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
rc = 1
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
if time != None:
for key in server_keys[server]: # These are mangled keys
write("delete %s %d\r\n" % (key, time))
else:
for key in server_keys[server]: # These are mangled keys
write("delete %s\r\n" % key)
try:
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
rc = 0
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
for server, keys in server_keys.iteritems():
try:
for key in keys:
server.expect("DELETED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
rc = 0
return rc
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@param time: number of seconds any subsequent set / update commands
should fail. Defaults to 0 for no delay.
@rtype: int
'''
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
line = server.readline()
if line and line.strip() in ['DELETED', 'NOT_FOUND']: return 1
self.debuglog('Delete expected DELETED or NOT_FOUND, got: %s'
% repr(line))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value
for C{key} by C{delta}, or by 1 if C{delta} is unspecified.
Returns None if C{key} doesn't exist on server, otherwise it
returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache,
and it must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
if line == None or line.strip() =='NOT_FOUND': return None
return int(line)
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
def add(self, key, val, time = 0, min_compress_len = 0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn't already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time, min_compress_len)
def append(self, key, val, time=0, min_compress_len=0):
'''Append the value to the end of the existing key's value.
Only stores in memcache if key already exists.
Also see L{prepend}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("append", key, val, time, min_compress_len)
def prepend(self, key, val, time=0, min_compress_len=0):
'''Prepend the value to the beginning of the existing key's value.
Only stores in memcache if key already exists.
Also see L{append}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("prepend", key, val, time, min_compress_len)
def replace(self, key, val, time=0, min_compress_len=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time, min_compress_len)
def set(self, key, val, time=0, min_compress_len=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
'''
return self._set("set", key, val, time, min_compress_len)
def cas(self, key, val, time=0, min_compress_len=0):
'''Sets a key to a given value in the memcache if it hasn't been
altered since last fetched. (See L{gets}).
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire,
either as a delta number of seconds, or an absolute unix
time-since-the-epoch value. See the memcached protocol docs section
"Storage Commands" for more info on <exptime>. We default to
0 == cache forever.
@param min_compress_len: The threshold length to kick in
auto-compression of the value using the zlib.compress() routine. If
the value being cached is a string, then the length of the string is
measured, else if the value is an object, then the length of the
pickle result is measured. If the resulting attempt at compression
yeilds a larger string than the input, then it is discarded. For
backwards compatability, this parameter defaults to 0, indicating
don't ever try to compress.
'''
return self._set("cas", key, val, time, min_compress_len)
def _map_and_prefix_keys(self, key_iterable, key_prefix):
"""Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
prefixed key -> original key.
"""
# Check it just once ...
key_extra_len=len(key_prefix)
if key_prefix:
self.check_key(key_prefix)
# server (_Host) -> list of unprefixed server keys in mapping
server_keys = {}
prefixed_to_orig_key = {}
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if isinstance(orig_key, tuple):
# Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
# Ensure call to _get_server gets a Tuple as well.
str_orig_key = str(orig_key[1])
server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
else:
str_orig_key = str(orig_key) # set_multi supports int / long keys.
server, key = self._get_server(key_prefix + str_orig_key)
# Now check to make sure key length is proper ...
self.check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if server not in server_keys:
server_keys[server] = []
server_keys[server].append(key)
prefixed_to_orig_key[key] = orig_key
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
'''
Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
This method is recommended over regular L{set} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{set} before sending
the next one.
@param mapping: A dict of key/value pairs to set.
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
>>> len(notset_keys) == 0
True
>>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
True
Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
In this case, the return result would be the list of notset original keys, prefix not applied.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
@return: List of keys which failed to be stored [ memcache out of memory, etc. ].
@rtype: list
'''
self._statlog('set_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
notstored = [] # original keys.
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(
mapping[prefixed_to_orig_key[key]],
min_compress_len)
if store_info:
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0],
time, store_info[1], store_info[2]))
else:
notstored.append(prefixed_to_orig_key[key])
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
# short-circuit if there are no servers, just return all keys
if not server_keys: return(mapping.keys())
for server, keys in server_keys.iteritems():
try:
for key in keys:
line = server.readline()
if line == 'STORED':
continue
else:
notstored.append(prefixed_to_orig_key[key]) #un-mangle.
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
"""
Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
"""
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
file = StringIO()
if self.picklerIsKeyword:
pickler = self.pickler(file, protocol = self.pickleProtocol)
else:
pickler = self.pickler(file, self.pickleProtocol)
if self.persistent_id:
pickler.persistent_id = self.persistent_id
pickler.dump(val)
val = file.getvalue()
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could
# import zlib and this string is longer than our min threshold.
if min_compress_len and _supports_compress and lv > min_compress_len:
comp_val = compress(val)
# Only retain the result if the compression result is smaller
# than the original.
if len(comp_val) < lv:
flags |= Client._FLAG_COMPRESSED
val = comp_val
# silently do not store if value length exceeds maximum
if self.server_max_value_length != 0 and \
len(val) >= self.server_max_value_length: return(0)
return (flags, len(val), val)
def _set(self, cmd, key, val, time, min_compress_len = 0):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info: return(0)
if cmd == 'cas':
if key not in self.cas_ids:
return self._set('set', key, val, time, min_compress_len)
fullcmd = "%s %s %d %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1],
self.cas_ids[key], store_info[2])
else:
fullcmd = "%s %s %d %d %d\r\n%s" % (
cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
server.send_cmd(fullcmd)
return(server.expect("STORED") == "STORED")
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return 0
def _get(self, cmd, key):
self.check_key(key)
server, key = self._get_server(key)
if not server:
return None
self._statlog(cmd)
try:
server.send_cmd("%s %s" % (cmd, key))
rkey = flags = rlen = cas_id = None
if cmd == 'gets':
rkey, flags, rlen, cas_id, = self._expect_cas_value(server)
if rkey:
self.cas_ids[rkey] = cas_id
else:
rkey, flags, rlen, = self._expectvalue(server)
if not rkey:
return None
value = self._recv_value(server, flags, rlen)
server.expect("END")
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return None
return value
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
return self._get('get', key)
def gets(self, key):
'''Retrieves a key from the memcache. Used in conjunction with 'cas'.
@return: The value or None.
'''
return self._get('gets', key)
def get_multi(self, keys, key_prefix=''):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
>>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
>>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
1
>>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{get} before sending
the next one.
See also L{set_multi}.
@param keys: An array of keys.
@param key_prefix: A string to prefix each key when we communicate with memcache.
Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
@return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
'''
self._statlog('get_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.iterkeys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
line = server.readline()
except (_Error, socket.error), msg:
if isinstance(msg, tuple): msg = msg[1]
server.mark_dead(msg)
return retvals
def _expect_cas_value(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len, cas_id = line.split()
return (rkey, int(flags), int(len), int(cas_id))
else:
return (None, None, None, None)
def _expectvalue(self, server, line=None):
if not line:
line = server.readline()
if line and line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d"
% (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags & Client._FLAG_COMPRESSED:
buf = decompress(buf)
if flags == 0 or flags == Client._FLAG_COMPRESSED:
# Either a bare string or a compressed string now decompressed...
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif flags & Client._FLAG_PICKLE:
try:
file = StringIO(buf)
unpickler = self.unpickler(file)
if self.persistent_load:
unpickler.persistent_load = self.persistent_load
val = unpickler.load()
except Exception, e:
self.debuglog('Pickle error: %s\n' % e)
val = None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
def check_key(self, key, key_extra_len=0):
"""Checks sanity of key. Fails if:
Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
Contains control characters (Raises MemcachedKeyCharacterError).
Is not a string (Raises MemcachedStringEncodingError)
Is an unicode string (Raises MemcachedStringEncodingError)
Is not a string (Raises MemcachedKeyError)
Is None (Raises MemcachedKeyError)
"""
if isinstance(key, tuple): key = key[1]
if not key:
raise Client.MemcachedKeyNoneError("Key is None")
if isinstance(key, unicode):
raise Client.MemcachedStringEncodingError(
"Keys must be str()'s, not unicode. Convert your unicode "
"strings using mystring.encode(charset)!")
if not isinstance(key, str):
raise Client.MemcachedKeyTypeError("Key must be str()'s")
if isinstance(key, basestring):
if self.server_max_key_length != 0 and \
len(key) + key_extra_len > self.server_max_key_length:
raise Client.MemcachedKeyLengthError("Key length is > %s"
% self.server_max_key_length)
for char in key:
if ord(char) < 33 or ord(char) == 127:
raise Client.MemcachedKeyCharacterError(
"Control characters not allowed")
class _Host(object):
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
_SOCKET_TIMEOUT = 3 # number of seconds before sockets timeout.
def __init__(self, host, debug=0):
self.debug = debug
if isinstance(host, tuple):
host, self.weight = host
else:
self.weight = 1
# parse the connection string
m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
if not m:
m = re.match(r'^(?P<proto>inet):'
r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m: m = re.match(r'^(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
if not m:
raise ValueError('Unable to parse connection string: "%s"' % host)
hostData = m.groupdict()
if hostData.get('proto') == 'unix':
self.family = socket.AF_UNIX
self.address = hostData['path']
else:
self.family = socket.AF_INET
self.ip = hostData['host']
self.port = int(hostData.get('port', 11211))
self.address = ( self.ip, self.port )
self.deaduntil = 0
self.socket = None
self.buffer = ''
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + _Host._DEAD_RETRY
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(self.family, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'): s.settimeout(self._SOCKET_TIMEOUT)
try:
s.connect(self.address)
except socket.timeout, msg:
self.mark_dead("connect: %s" % msg)
return None
except socket.error, msg:
if isinstance(msg, tuple): msg = msg[1]
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
self.buffer = ''
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
self.socket.sendall(cmd + '\r\n')
def send_cmds(self, cmds):
""" cmds already has trailing \r\n's applied """
self.socket.sendall(cmds)
def readline(self):
buf = self.buffer
recv = self.socket.recv
while True:
index = buf.find('\r\n')
if index >= 0:
break
data = recv(4096)
if not data:
self.mark_dead('Connection closed while reading from %s'
% repr(self))
self.buffer = ''
return ''
buf += data
self.buffer = buf[index+2:]
return buf[:index]
def expect(self, text):
line = self.readline()
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'"
% (text, line))
return line
def recv(self, rlen):
self_socket_recv = self.socket.recv
buf = self.buffer
while len(buf) < rlen:
foo = self_socket_recv(max(rlen - len(buf), 4096))
buf += foo
if not foo:
raise _Error( 'Read %d bytes, expecting %d, '
'read returned 0 length bytes' % ( len(buf), rlen ))
self.buffer = buf[rlen:]
return buf[:rlen]
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
if self.family == socket.AF_INET:
return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
else:
return "unix:%s%s" % (self.address, d)
def _doctest():
import doctest
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(globs=globs)
if __name__ == "__main__":
failures = 0
print "Testing docstrings..."
_doctest()
print "Running tests:"
print
serverList = [["127.0.0.1:11211"]]
if '--do-unix' in sys.argv:
serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
for servers in serverList:
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, basestring):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print "OK"
return 1
else:
print "FAIL"; failures = failures + 1
return 0
class FooStruct(object):
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print "Testing delete ...",
if mc.delete("long"):
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Checking results of delete ..."
if mc.get("long") == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing get_multi ...",
print mc.get_multi(["a_string", "an_integer"])
print "Testing get(unknown value) ...",
print to_s(mc.get("unknown_value"))
f = FooStruct()
test_setget("foostruct", f)
print "Testing incr ...",
x = mc.incr("an_integer", 1)
if x == 43:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing decr ...",
x = mc.decr("an_integer", 1)
if x == 42:
print "OK"
else:
print "FAIL"; failures = failures + 1
sys.stdout.flush()
# sanity tests
print "Testing sending spaces...",
sys.stdout.flush()
try:
x = mc.set("this has spaces", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending control characters...",
try:
x = mc.set("this\x10has\x11control characters\x02", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using insanely long key...",
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'aaaa', 1)
except Client.MemcachedKeyLengthError, msg:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing sending a unicode-string key...",
try:
x = mc.set(u'keyhere', 1)
except Client.MemcachedStringEncodingError, msg:
print "OK",
else:
print "FAIL",; failures = failures + 1
try:
x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except:
print "FAIL",; failures = failures + 1
else:
print "OK",
import pickle
s = pickle.loads('V\\u4f1a\np0\n.')
try:
x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except Client.MemcachedKeyLengthError:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing using a value larger than the memcached value limit...",
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
if mc.get('keyhere') == None:
print "OK",
else:
print "FAIL",; failures = failures + 1
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
if mc.get('keyhere') == None:
print "OK"
else:
print "FAIL"; failures = failures + 1
print "Testing set_multi() with no memcacheds running",
mc.disconnect_all()
errors = mc.set_multi({'keyhere' : 'a', 'keythere' : 'b'})
if errors != []:
print "FAIL"; failures = failures + 1
else:
print "OK"
print "Testing delete_multi() with no memcacheds running",
mc.disconnect_all()
ret = mc.delete_multi({'keyhere' : 'a', 'keythere' : 'b'})
if ret != 1:
print "FAIL"; failures = failures + 1
else:
print "OK"
if failures > 0:
print '*** THERE WERE FAILED TESTS'
sys.exit(1)
sys.exit(0)
# vim: ts=4 sw=4 et :
|
|
"""The tests for the pilight component."""
from datetime import timedelta
import logging
import socket
import unittest
import pytest
from homeassistant import core as ha
from homeassistant.components import pilight
from homeassistant.setup import setup_component
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.common import assert_setup_component, get_test_home_assistant
_LOGGER = logging.getLogger(__name__)
class PilightDaemonSim:
"""Class to fake the interface of the pilight python package.
Is used in an asyncio loop, thus the mock cannot be accessed to
determine if methods where called?!
This is solved here in a hackish way by printing errors
that can be checked using logging.error mocks.
"""
callback = None
called = None
test_message = {
"protocol": "kaku_switch",
"uuid": "1-2-3-4",
"message": {"id": 0, "unit": 0, "off": 1},
}
def __init__(self, host, port):
"""Init pilight client, ignore parameters."""
def send_code(self, call): # pylint: disable=no-self-use
"""Handle pilight.send service callback."""
_LOGGER.error("PilightDaemonSim payload: %s", call)
def start(self):
"""Handle homeassistant.start callback.
Also sends one test message after start up
"""
_LOGGER.error("PilightDaemonSim start")
# Fake one code receive after daemon started
if not self.called:
self.callback(self.test_message)
self.called = True
def stop(self): # pylint: disable=no-self-use
"""Handle homeassistant.stop callback."""
_LOGGER.error("PilightDaemonSim stop")
def set_callback(self, function):
"""Handle pilight.pilight_received event callback."""
self.callback = function
_LOGGER.error("PilightDaemonSim callback: %s", function)
@pytest.mark.skip("Flaky")
class TestPilight(unittest.TestCase):
"""Test the Pilight component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.skip_teardown_stop = False
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
if not self.skip_teardown_stop:
self.hass.stop()
@patch("homeassistant.components.pilight._LOGGER.error")
def test_connection_failed_error(self, mock_error):
"""Try to connect at 127.0.0.1:5001 with socket error."""
with assert_setup_component(4):
with patch(
"pilight.pilight.Client", side_effect=socket.error
) as mock_client:
assert not setup_component(
self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("homeassistant.components.pilight._LOGGER.error")
def test_connection_timeout_error(self, mock_error):
"""Try to connect at 127.0.0.1:5001 with socket timeout."""
with assert_setup_component(4):
with patch(
"pilight.pilight.Client", side_effect=socket.timeout
) as mock_client:
assert not setup_component(
self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}
)
mock_client.assert_called_once_with(
host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT
)
assert mock_error.call_count == 1
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.error")
@patch("homeassistant.components.pilight._LOGGER.error")
def test_send_code_no_protocol(self, mock_pilight_error, mock_error):
"""Try to send data without protocol information, should give error."""
with assert_setup_component(4):
assert setup_component(self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call without protocol info, should be ignored with error
self.hass.services.call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data={"noprotocol": "test", "value": 42},
blocking=True,
)
self.hass.block_till_done()
error_log_call = mock_error.call_args_list[-1]
assert "required key not provided @ data['protocol']" in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.components.pilight._LOGGER.error")
def test_send_code(self, mock_pilight_error):
"""Try to send proper data."""
with assert_setup_component(4):
assert setup_component(self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
self.hass.services.call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
self.hass.block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
service_data["protocol"] = [service_data["protocol"]]
assert str(service_data) in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.components.pilight._LOGGER.error")
def test_send_code_fail(self, mock_pilight_error):
"""Check IOError exception error message."""
with assert_setup_component(4):
with patch("pilight.pilight.Client.send_code", side_effect=IOError):
assert setup_component(self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Call with protocol info, should not give error
service_data = {"protocol": "test", "value": 42}
self.hass.services.call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data,
blocking=True,
)
self.hass.block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert "Pilight send failed" in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.components.pilight._LOGGER.error")
def test_send_code_delay(self, mock_pilight_error):
"""Try to send proper data with delay afterwards."""
with assert_setup_component(4):
assert setup_component(
self.hass,
pilight.DOMAIN,
{pilight.DOMAIN: {pilight.CONF_SEND_DELAY: 5.0}},
)
# Call with protocol info, should not give error
service_data1 = {"protocol": "test11", "value": 42}
service_data2 = {"protocol": "test22", "value": 42}
self.hass.services.call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data1,
blocking=True,
)
self.hass.services.call(
pilight.DOMAIN,
pilight.SERVICE_NAME,
service_data=service_data2,
blocking=True,
)
service_data1["protocol"] = [service_data1["protocol"]]
service_data2["protocol"] = [service_data2["protocol"]]
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: dt_util.utcnow()})
self.hass.block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data1) in str(error_log_call)
new_time = dt_util.utcnow() + timedelta(seconds=5)
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: new_time})
self.hass.block_till_done()
error_log_call = mock_pilight_error.call_args_list[-1]
assert str(service_data2) in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.components.pilight._LOGGER.error")
def test_start_stop(self, mock_pilight_error):
"""Check correct startup and stop of pilight daemon."""
with assert_setup_component(4):
assert setup_component(self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Test startup
self.hass.start()
self.hass.block_till_done()
error_log_call = mock_pilight_error.call_args_list[-2]
assert "PilightDaemonSim callback" in str(error_log_call)
error_log_call = mock_pilight_error.call_args_list[-1]
assert "PilightDaemonSim start" in str(error_log_call)
# Test stop
self.skip_teardown_stop = True
self.hass.stop()
error_log_call = mock_pilight_error.call_args_list[-1]
assert "PilightDaemonSim stop" in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.info")
def test_receive_code(self, mock_info):
"""Check if code receiving via pilight daemon works."""
with assert_setup_component(4):
assert setup_component(self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}})
# Test startup
self.hass.start()
self.hass.block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
error_log_call = mock_info.call_args_list[-1]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(error_log_call)
assert str(value) in str(error_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.info")
def test_whitelist_exact_match(self, mock_info):
"""Check whitelist filter with matched data."""
with assert_setup_component(4):
whitelist = {
"protocol": [PilightDaemonSim.test_message["protocol"]],
"uuid": [PilightDaemonSim.test_message["uuid"]],
"id": [PilightDaemonSim.test_message["message"]["id"]],
"unit": [PilightDaemonSim.test_message["message"]["unit"]],
}
assert setup_component(
self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
self.hass.start()
self.hass.block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
info_log_call = mock_info.call_args_list[-1]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(info_log_call)
assert str(value) in str(info_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.info")
def test_whitelist_partial_match(self, mock_info):
"""Check whitelist filter with partially matched data, should work."""
with assert_setup_component(4):
whitelist = {
"protocol": [PilightDaemonSim.test_message["protocol"]],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert setup_component(
self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
self.hass.start()
self.hass.block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
info_log_call = mock_info.call_args_list[-1]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(info_log_call)
assert str(value) in str(info_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.info")
def test_whitelist_or_match(self, mock_info):
"""Check whitelist filter with several subsection, should work."""
with assert_setup_component(4):
whitelist = {
"protocol": [
PilightDaemonSim.test_message["protocol"],
"other_protocol",
],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert setup_component(
self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
self.hass.start()
self.hass.block_till_done()
expected_message = dict(
{
"protocol": PilightDaemonSim.test_message["protocol"],
"uuid": PilightDaemonSim.test_message["uuid"],
},
**PilightDaemonSim.test_message["message"],
)
info_log_call = mock_info.call_args_list[-1]
# Check if all message parts are put on event bus
for key, value in expected_message.items():
assert str(key) in str(info_log_call)
assert str(value) in str(info_log_call)
@patch("pilight.pilight.Client", PilightDaemonSim)
@patch("homeassistant.core._LOGGER.info")
def test_whitelist_no_match(self, mock_info):
"""Check whitelist filter with unmatched data, should not work."""
with assert_setup_component(4):
whitelist = {
"protocol": ["wrong_protocol"],
"id": [PilightDaemonSim.test_message["message"]["id"]],
}
assert setup_component(
self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}
)
self.hass.start()
self.hass.block_till_done()
info_log_call = mock_info.call_args_list[-1]
assert not ("Event pilight_received" in info_log_call)
class TestPilightCallrateThrottler(unittest.TestCase):
"""Test the Throttler used to throttle calls to send_code."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.hass.stop)
def test_call_rate_delay_throttle_disabled(self):
"""Test that the limiter is a noop if no delay set."""
runs = []
limit = pilight.CallRateDelayThrottle(self.hass, 0.0)
action = limit.limited(lambda x: runs.append(x))
for i in range(3):
action(i)
assert runs == [0, 1, 2]
def test_call_rate_delay_throttle_enabled(self):
"""Test that throttling actually work."""
runs = []
delay = 5.0
limit = pilight.CallRateDelayThrottle(self.hass, delay)
action = limit.limited(lambda x: runs.append(x))
for i in range(3):
action(i)
assert runs == []
exp = []
now = dt_util.utcnow()
for i in range(3):
exp.append(i)
shifted_time = now + (timedelta(seconds=delay + 0.1) * i)
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: shifted_time})
self.hass.block_till_done()
assert runs == exp
|
|
# encoding: utf-8
"""The shape tree, the structure that holds a slide's shapes."""
import os
from pptx.compat import BytesIO
from pptx.enum.shapes import PP_PLACEHOLDER, PROG_ID
from pptx.media import SPEAKER_IMAGE_BYTES, Video
from pptx.opc.constants import CONTENT_TYPE as CT
from pptx.oxml.ns import qn
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.oxml.simpletypes import ST_Direction
from pptx.shapes.autoshape import AutoShapeType, Shape
from pptx.shapes.base import BaseShape
from pptx.shapes.connector import Connector
from pptx.shapes.freeform import FreeformBuilder
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.group import GroupShape
from pptx.shapes.picture import Movie, Picture
from pptx.shapes.placeholder import (
ChartPlaceholder,
LayoutPlaceholder,
MasterPlaceholder,
NotesSlidePlaceholder,
PicturePlaceholder,
PlaceholderGraphicFrame,
PlaceholderPicture,
SlidePlaceholder,
TablePlaceholder,
)
from pptx.shared import ParentedElementProxy
from pptx.util import Emu, lazyproperty
# +-- _BaseShapes
# | |
# | +-- _BaseGroupShapes
# | | |
# | | +-- GroupShapes
# | | |
# | | +-- SlideShapes
# | |
# | +-- LayoutShapes
# | |
# | +-- MasterShapes
# | |
# | +-- NotesSlideShapes
# | |
# | +-- BasePlaceholders
# | |
# | +-- LayoutPlaceholders
# | |
# | +-- MasterPlaceholders
# | |
# | +-- NotesSlidePlaceholders
# |
# +-- SlidePlaceholders
class _BaseShapes(ParentedElementProxy):
"""
Base class for a shape collection appearing in a slide-type object,
include Slide, SlideLayout, and SlideMaster, providing common methods.
"""
def __init__(self, spTree, parent):
super(_BaseShapes, self).__init__(spTree, parent)
self._spTree = spTree
self._cached_max_shape_id = None
def __getitem__(self, idx):
"""
Return shape at *idx* in sequence, e.g. ``shapes[2]``.
"""
shape_elms = list(self._iter_member_elms())
try:
shape_elm = shape_elms[idx]
except IndexError:
raise IndexError("shape index out of range")
return self._shape_factory(shape_elm)
def __iter__(self):
"""
Generate a reference to each shape in the collection, in sequence.
"""
for shape_elm in self._iter_member_elms():
yield self._shape_factory(shape_elm)
def __len__(self):
"""
Return count of shapes in this shape tree. A group shape contributes
1 to the total, without regard to the number of shapes contained in
the group.
"""
shape_elms = list(self._iter_member_elms())
return len(shape_elms)
def clone_placeholder(self, placeholder):
"""Add a new placeholder shape based on *placeholder*."""
sp = placeholder.element
ph_type, orient, sz, idx = (sp.ph_type, sp.ph_orient, sp.ph_sz, sp.ph_idx)
id_ = self._next_shape_id
name = self._next_ph_name(ph_type, id_, orient)
self._spTree.add_placeholder(id_, name, ph_type, orient, sz, idx)
def ph_basename(self, ph_type):
"""
Return the base name for a placeholder of *ph_type* in this shape
collection. There is some variance between slide types, for example
a notes slide uses a different name for the body placeholder, so this
method can be overriden by subclasses.
"""
return {
PP_PLACEHOLDER.BITMAP: "ClipArt Placeholder",
PP_PLACEHOLDER.BODY: "Text Placeholder",
PP_PLACEHOLDER.CENTER_TITLE: "Title",
PP_PLACEHOLDER.CHART: "Chart Placeholder",
PP_PLACEHOLDER.DATE: "Date Placeholder",
PP_PLACEHOLDER.FOOTER: "Footer Placeholder",
PP_PLACEHOLDER.HEADER: "Header Placeholder",
PP_PLACEHOLDER.MEDIA_CLIP: "Media Placeholder",
PP_PLACEHOLDER.OBJECT: "Content Placeholder",
PP_PLACEHOLDER.ORG_CHART: "SmartArt Placeholder",
PP_PLACEHOLDER.PICTURE: "Picture Placeholder",
PP_PLACEHOLDER.SLIDE_NUMBER: "Slide Number Placeholder",
PP_PLACEHOLDER.SUBTITLE: "Subtitle",
PP_PLACEHOLDER.TABLE: "Table Placeholder",
PP_PLACEHOLDER.TITLE: "Title",
}[ph_type]
@property
def turbo_add_enabled(self):
"""True if "turbo-add" mode is enabled. Read/Write.
EXPERIMENTAL: This feature can radically improve performance when
adding large numbers (hundreds of shapes) to a slide. It works by
caching the last shape ID used and incrementing that value to assign
the next shape id. This avoids repeatedly searching all shape ids in
the slide each time a new ID is required.
Performance is not noticeably improved for a slide with a relatively
small number of shapes, but because the search time rises with the
square of the shape count, this option can be useful for optimizing
generation of a slide composed of many shapes.
Shape-id collisions can occur (causing a repair error on load) if
more than one |Slide| object is used to interact with the same slide
in the presentation. Note that the |Slides| collection creates a new
|Slide| object each time a slide is accessed
(e.g. `slide = prs.slides[0]`, so you must be careful to limit use to
a single |Slide| object.
"""
return self._cached_max_shape_id is not None
@turbo_add_enabled.setter
def turbo_add_enabled(self, value):
enable = bool(value)
self._cached_max_shape_id = self._spTree.max_shape_id if enable else None
@staticmethod
def _is_member_elm(shape_elm):
"""
Return true if *shape_elm* represents a member of this collection,
False otherwise.
"""
return True
def _iter_member_elms(self):
"""
Generate each child of the ``<p:spTree>`` element that corresponds to
a shape, in the sequence they appear in the XML.
"""
for shape_elm in self._spTree.iter_shape_elms():
if self._is_member_elm(shape_elm):
yield shape_elm
def _next_ph_name(self, ph_type, id, orient):
"""
Next unique placeholder name for placeholder shape of type *ph_type*,
with id number *id* and orientation *orient*. Usually will be standard
placeholder root name suffixed with id-1, e.g.
_next_ph_name(ST_PlaceholderType.TBL, 4, 'horz') ==>
'Table Placeholder 3'. The number is incremented as necessary to make
the name unique within the collection. If *orient* is ``'vert'``, the
placeholder name is prefixed with ``'Vertical '``.
"""
basename = self.ph_basename(ph_type)
# prefix rootname with 'Vertical ' if orient is 'vert'
if orient == ST_Direction.VERT:
basename = "Vertical %s" % basename
# increment numpart as necessary to make name unique
numpart = id - 1
names = self._spTree.xpath("//p:cNvPr/@name")
while True:
name = "%s %d" % (basename, numpart)
if name not in names:
break
numpart += 1
return name
@property
def _next_shape_id(self):
"""Return a unique shape id suitable for use with a new shape.
The returned id is 1 greater than the maximum shape id used so far.
In practice, the minimum id is 2 because the spTree element is always
assigned id="1".
"""
# ---presence of cached-max-shape-id indicates turbo mode is on---
if self._cached_max_shape_id is not None:
self._cached_max_shape_id += 1
return self._cached_max_shape_id
return self._spTree.max_shape_id + 1
def _shape_factory(self, shape_elm):
"""
Return an instance of the appropriate shape proxy class for
*shape_elm*.
"""
return BaseShapeFactory(shape_elm, self)
class _BaseGroupShapes(_BaseShapes):
"""Base class for shape-trees that can add shapes."""
def __init__(self, grpSp, parent):
super(_BaseGroupShapes, self).__init__(grpSp, parent)
self._grpSp = grpSp
def add_chart(self, chart_type, x, y, cx, cy, chart_data):
"""Add a new chart of *chart_type* to the slide.
The chart is positioned at (*x*, *y*), has size (*cx*, *cy*), and
depicts *chart_data*. *chart_type* is one of the :ref:`XlChartType`
enumeration values. *chart_data* is a |ChartData| object populated
with the categories and series values for the chart.
Note that a |GraphicFrame| shape object is returned, not the |Chart|
object contained in that graphic frame shape. The chart object may be
accessed using the :attr:`chart` property of the returned
|GraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._add_chart_graphicFrame(rId, x, y, cx, cy)
self._recalculate_extents()
return self._shape_factory(graphicFrame)
def add_connector(self, connector_type, begin_x, begin_y, end_x, end_y):
"""Add a newly created connector shape to the end of this shape tree.
*connector_type* is a member of the :ref:`MsoConnectorType`
enumeration and the end-point values are specified as EMU values. The
returned connector is of type *connector_type* and has begin and end
points as specified.
"""
cxnSp = self._add_cxnSp(connector_type, begin_x, begin_y, end_x, end_y)
self._recalculate_extents()
return self._shape_factory(cxnSp)
def add_group_shape(self, shapes=[]):
"""Return a |GroupShape| object newly appended to this shape tree.
The group shape is empty and must be populated with shapes using
methods on its shape tree, available on its `.shapes` property. The
position and extents of the group shape are determined by the shapes
it contains; its position and extents are recalculated each time
a shape is added to it.
"""
grpSp = self._element.add_grpSp()
for shape in shapes:
grpSp.insert_element_before(shape._element, "p:extLst")
if shapes:
grpSp.recalculate_extents()
return self._shape_factory(grpSp)
def add_ole_object(
self, object_file, prog_id, left, top, width=None, height=None, icon_file=None
):
"""Return newly-created GraphicFrame shape embedding `object_file`.
The returned graphic-frame shape contains `object_file` as an embedded OLE
object. It is displayed as an icon at `left`, `top` with size `width`, `height`.
`width` and `height` may be omitted when `prog_id` is a member of `PROG_ID`, in
which case the default icon size is used. This is advised for best appearance
where applicable because it avoids an icon with a "stretched" appearance.
`object_file` may either be a str path to the file or a file-like
object (such as `io.BytesIO`) containing the bytes of the object file.
`prog_id` can be either a member of `pptx.enum.shapes.PROG_ID` or a str value
like `"Adobe.Exchange.7"` determined by inspecting the XML generated by
PowerPoint for an object of the desired type.
`icon_file` may either be a str path to an image file or a file-like object
containing the image. The image provided will be displayed in lieu of the OLE
object; double-clicking on the image opens the object (subject to
operating-system limitations). The image file can be any supported image file.
Those produced by PowerPoint itself are generally EMF and can be harvested from
a PPTX package that embeds such an object. PNG and JPG also work fine.
"""
graphicFrame = _OleObjectElementCreator.graphicFrame(
self,
self._next_shape_id,
object_file,
prog_id,
left,
top,
width,
height,
icon_file,
)
self._spTree.append(graphicFrame)
self._recalculate_extents()
return self._shape_factory(graphicFrame)
def add_picture(self, image_file, left, top, width=None, height=None):
"""Add picture shape displaying image in *image_file*.
*image_file* can be either a path to a file (a string) or a file-like
object. The picture is positioned with its top-left corner at (*top*,
*left*). If *width* and *height* are both |None|, the native size of
the image is used. If only one of *width* or *height* is used, the
unspecified dimension is calculated to preserve the aspect ratio of
the image. If both are specified, the picture is stretched to fit,
without regard to its native aspect ratio.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
pic = self._add_pic_from_image_part(image_part, rId, left, top, width, height)
self._recalculate_extents()
return self._shape_factory(pic)
def add_shape(self, autoshape_type_id, left, top, width, height):
"""Return new |Shape| object appended to this shape tree.
*autoshape_type_id* is a member of :ref:`MsoAutoShapeType` e.g.
``MSO_SHAPE.RECTANGLE`` specifying the type of shape to be added. The
remaining arguments specify the new shape's position and size.
"""
autoshape_type = AutoShapeType(autoshape_type_id)
sp = self._add_sp(autoshape_type, left, top, width, height)
self._recalculate_extents()
return self._shape_factory(sp)
def add_textbox(self, left, top, width, height):
"""Return newly added text box shape appended to this shape tree.
The text box is of the specified size, located at the specified
position on the slide.
"""
sp = self._add_textbox_sp(left, top, width, height)
self._recalculate_extents()
return self._shape_factory(sp)
def build_freeform(self, start_x=0, start_y=0, scale=1.0):
"""Return |FreeformBuilder| object to specify a freeform shape.
The optional *start_x* and *start_y* arguments specify the starting
pen position in local coordinates. They will be rounded to the
nearest integer before use and each default to zero.
The optional *scale* argument specifies the size of local coordinates
proportional to slide coordinates (EMU). If the vertical scale is
different than the horizontal scale (local coordinate units are
"rectangular"), a pair of numeric values can be provided as the
*scale* argument, e.g. `scale=(1.0, 2.0)`. In this case the first
number is interpreted as the horizontal (X) scale and the second as
the vertical (Y) scale.
A convenient method for calculating scale is to divide a |Length|
object by an equivalent count of local coordinate units, e.g.
`scale = Inches(1)/1000` for 1000 local units per inch.
"""
try:
x_scale, y_scale = scale
except TypeError:
x_scale = y_scale = scale
return FreeformBuilder.new(self, start_x, start_y, x_scale, y_scale)
def index(self, shape):
"""Return the index of *shape* in this sequence.
Raises |ValueError| if *shape* is not in the collection.
"""
shape_elms = list(self._element.iter_shape_elms())
return shape_elms.index(shape.element)
def _add_chart_graphicFrame(self, rId, x, y, cx, cy):
"""Return new `p:graphicFrame` element appended to this shape tree.
The `p:graphicFrame` element has the specified position and size and
refers to the chart part identified by *rId*.
"""
shape_id = self._next_shape_id
name = "Chart %d" % (shape_id - 1)
graphicFrame = CT_GraphicalObjectFrame.new_chart_graphicFrame(
shape_id, name, rId, x, y, cx, cy
)
self._spTree.append(graphicFrame)
return graphicFrame
def _add_cxnSp(self, connector_type, begin_x, begin_y, end_x, end_y):
"""Return a newly-added `p:cxnSp` element as specified.
The `p:cxnSp` element is for a connector of *connector_type*
beginning at (*begin_x*, *begin_y*) and extending to
(*end_x*, *end_y*).
"""
id_ = self._next_shape_id
name = "Connector %d" % (id_ - 1)
flipH, flipV = begin_x > end_x, begin_y > end_y
x, y = min(begin_x, end_x), min(begin_y, end_y)
cx, cy = abs(end_x - begin_x), abs(end_y - begin_y)
return self._element.add_cxnSp(
id_, name, connector_type, x, y, cx, cy, flipH, flipV
)
def _add_pic_from_image_part(self, image_part, rId, x, y, cx, cy):
"""Return a newly appended `p:pic` element as specified.
The `p:pic` element displays the image in *image_part* with size and
position specified by *x*, *y*, *cx*, and *cy*. The element is
appended to the shape tree, causing it to be displayed first in
z-order on the slide.
"""
id_ = self._next_shape_id
scaled_cx, scaled_cy = image_part.scale(cx, cy)
name = "Picture %d" % (id_ - 1)
desc = image_part.desc
pic = self._grpSp.add_pic(id_, name, desc, rId, x, y, scaled_cx, scaled_cy)
return pic
def _add_sp(self, autoshape_type, x, y, cx, cy):
"""Return newly-added `p:sp` element as specified.
`p:sp` element is of *autoshape_type* at position (*x*, *y*) and of
size (*cx*, *cy*).
"""
id_ = self._next_shape_id
name = "%s %d" % (autoshape_type.basename, id_ - 1)
sp = self._grpSp.add_autoshape(id_, name, autoshape_type.prst, x, y, cx, cy)
return sp
def _add_textbox_sp(self, x, y, cx, cy):
"""Return newly-appended textbox `p:sp` element.
Element has position (*x*, *y*) and size (*cx*, *cy*).
"""
id_ = self._next_shape_id
name = "TextBox %d" % (id_ - 1)
sp = self._spTree.add_textbox(id_, name, x, y, cx, cy)
return sp
def _recalculate_extents(self):
"""Adjust position and size to incorporate all contained shapes.
This would typically be called when a contained shape is added,
removed, or its position or size updated.
"""
# ---default behavior is to do nothing, GroupShapes overrides to
# produce the distinctive behavior of groups and subgroups.---
pass
class GroupShapes(_BaseGroupShapes):
"""The sequence of child shapes belonging to a group shape.
Note that this collection can itself contain a group shape, making this
part of a recursive, tree data structure (acyclic graph).
"""
def _recalculate_extents(self):
"""Adjust position and size to incorporate all contained shapes.
This would typically be called when a contained shape is added,
removed, or its position or size updated.
"""
self._grpSp.recalculate_extents()
class SlideShapes(_BaseGroupShapes):
"""Sequence of shapes appearing on a slide.
The first shape in the sequence is the backmost in z-order and the last
shape is topmost. Supports indexed access, len(), index(), and iteration.
"""
def add_movie(
self,
movie_file,
left,
top,
width,
height,
poster_frame_image=None,
mime_type=CT.VIDEO,
):
"""Return newly added movie shape displaying video in *movie_file*.
**EXPERIMENTAL.** This method has important limitations:
* The size must be specified; no auto-scaling such as that provided
by :meth:`add_picture` is performed.
* The MIME type of the video file should be specified, e.g.
'video/mp4'. The provided video file is not interrogated for its
type. The MIME type `video/unknown` is used by default (and works
fine in tests as of this writing).
* A poster frame image must be provided, it cannot be automatically
extracted from the video file. If no poster frame is provided, the
default "media loudspeaker" image will be used.
Return a newly added movie shape to the slide, positioned at (*left*,
*top*), having size (*width*, *height*), and containing *movie_file*.
Before the video is started, *poster_frame_image* is displayed as
a placeholder for the video.
"""
movie_pic = _MoviePicElementCreator.new_movie_pic(
self,
self._next_shape_id,
movie_file,
left,
top,
width,
height,
poster_frame_image,
mime_type,
)
self._spTree.append(movie_pic)
self._add_video_timing(movie_pic)
return self._shape_factory(movie_pic)
def add_table(self, rows, cols, left, top, width, height):
"""
Add a |GraphicFrame| object containing a table with the specified
number of *rows* and *cols* and the specified position and size.
*width* is evenly distributed between the columns of the new table.
Likewise, *height* is evenly distributed between the rows. Note that
the ``.table`` property on the returned |GraphicFrame| shape must be
used to access the enclosed |Table| object.
"""
graphicFrame = self._add_graphicFrame_containing_table(
rows, cols, left, top, width, height
)
graphic_frame = self._shape_factory(graphicFrame)
return graphic_frame
def clone_layout_placeholders(self, slide_layout):
"""
Add placeholder shapes based on those in *slide_layout*. Z-order of
placeholders is preserved. Latent placeholders (date, slide number,
and footer) are not cloned.
"""
for placeholder in slide_layout.iter_cloneable_placeholders():
self.clone_placeholder(placeholder)
@property
def placeholders(self):
"""
Instance of |SlidePlaceholders| containing sequence of placeholder
shapes in this slide.
"""
return self.parent.placeholders
@property
def title(self):
"""
The title placeholder shape on the slide or |None| if the slide has
no title placeholder.
"""
for elm in self._spTree.iter_ph_elms():
if elm.ph_idx == 0:
return self._shape_factory(elm)
return None
def _add_graphicFrame_containing_table(self, rows, cols, x, y, cx, cy):
"""
Return a newly added ``<p:graphicFrame>`` element containing a table
as specified by the parameters.
"""
_id = self._next_shape_id
name = "Table %d" % (_id - 1)
graphicFrame = self._spTree.add_table(_id, name, rows, cols, x, y, cx, cy)
return graphicFrame
def _add_video_timing(self, pic):
"""Add a `p:video` element under `p:sld/p:timing`.
The element will refer to the specified *pic* element by its shape
id, and cause the video play controls to appear for that video.
"""
sld = self._spTree.xpath("/p:sld")[0]
childTnLst = sld.get_or_add_childTnLst()
childTnLst.add_video(pic.shape_id)
def _shape_factory(self, shape_elm):
"""
Return an instance of the appropriate shape proxy class for
*shape_elm*.
"""
return SlideShapeFactory(shape_elm, self)
class LayoutShapes(_BaseShapes):
"""
Sequence of shapes appearing on a slide layout. The first shape in the
sequence is the backmost in z-order and the last shape is topmost.
Supports indexed access, len(), index(), and iteration.
"""
def _shape_factory(self, shape_elm):
"""
Return an instance of the appropriate shape proxy class for
*shape_elm*.
"""
return _LayoutShapeFactory(shape_elm, self)
class MasterShapes(_BaseShapes):
"""
Sequence of shapes appearing on a slide master. The first shape in the
sequence is the backmost in z-order and the last shape is topmost.
Supports indexed access, len(), and iteration.
"""
def _shape_factory(self, shape_elm):
"""
Return an instance of the appropriate shape proxy class for
*shape_elm*.
"""
return _MasterShapeFactory(shape_elm, self)
class NotesSlideShapes(_BaseShapes):
"""
Sequence of shapes appearing on a notes slide. The first shape in the
sequence is the backmost in z-order and the last shape is topmost.
Supports indexed access, len(), index(), and iteration.
"""
def ph_basename(self, ph_type):
"""
Return the base name for a placeholder of *ph_type* in this shape
collection. A notes slide uses a different name for the body
placeholder and has some unique placeholder types, so this
method overrides the default in the base class.
"""
return {
PP_PLACEHOLDER.BODY: "Notes Placeholder",
PP_PLACEHOLDER.DATE: "Date Placeholder",
PP_PLACEHOLDER.FOOTER: "Footer Placeholder",
PP_PLACEHOLDER.HEADER: "Header Placeholder",
PP_PLACEHOLDER.SLIDE_IMAGE: "Slide Image Placeholder",
PP_PLACEHOLDER.SLIDE_NUMBER: "Slide Number Placeholder",
}[ph_type]
def _shape_factory(self, shape_elm):
"""
Return an instance of the appropriate shape proxy class for
*shape_elm* appearing on a notes slide.
"""
return _NotesSlideShapeFactory(shape_elm, self)
class BasePlaceholders(_BaseShapes):
"""
Base class for placeholder collections that differentiate behaviors for
a master, layout, and slide. By default, placeholder shapes are
constructed using |BaseShapeFactory|. Subclasses should override
:method:`_shape_factory` to use custom placeholder classes.
"""
@staticmethod
def _is_member_elm(shape_elm):
"""
True if *shape_elm* is a placeholder shape, False otherwise.
"""
return shape_elm.has_ph_elm
class LayoutPlaceholders(BasePlaceholders):
"""
Sequence of |LayoutPlaceholder| instances representing the placeholder
shapes on a slide layout.
"""
def get(self, idx, default=None):
"""
Return the first placeholder shape with matching *idx* value, or
*default* if not found.
"""
for placeholder in self:
if placeholder.element.ph_idx == idx:
return placeholder
return default
def _shape_factory(self, shape_elm):
"""
Return an instance of the appropriate shape proxy class for
*shape_elm*.
"""
return _LayoutShapeFactory(shape_elm, self)
class MasterPlaceholders(BasePlaceholders):
"""
Sequence of _MasterPlaceholder instances representing the placeholder
shapes on a slide master.
"""
def get(self, ph_type, default=None):
"""
Return the first placeholder shape with type *ph_type* (e.g. 'body'),
or *default* if no such placeholder shape is present in the
collection.
"""
for placeholder in self:
if placeholder.ph_type == ph_type:
return placeholder
return default
def _shape_factory(self, shape_elm):
"""
Return an instance of the appropriate shape proxy class for
*shape_elm*.
"""
return _MasterShapeFactory(shape_elm, self)
class NotesSlidePlaceholders(MasterPlaceholders):
"""
Sequence of placeholder shapes on a notes slide.
"""
def _shape_factory(self, placeholder_elm):
"""
Return an instance of the appropriate placeholder proxy class for
*placeholder_elm*.
"""
return _NotesSlideShapeFactory(placeholder_elm, self)
class SlidePlaceholders(ParentedElementProxy):
"""
Collection of placeholder shapes on a slide. Supports iteration,
:func:`len`, and dictionary-style lookup on the `idx` value of the
placeholders it contains.
"""
def __getitem__(self, idx):
"""
Access placeholder shape having *idx*. Note that while this looks
like list access, idx is actually a dictionary key and will raise
|KeyError| if no placeholder with that idx value is in the
collection.
"""
for e in self._element.iter_ph_elms():
if e.ph_idx == idx:
return SlideShapeFactory(e, self)
raise KeyError("no placeholder on this slide with idx == %d" % idx)
def __iter__(self):
"""
Generate placeholder shapes in `idx` order.
"""
ph_elms = sorted(
[e for e in self._element.iter_ph_elms()], key=lambda e: e.ph_idx
)
return (SlideShapeFactory(e, self) for e in ph_elms)
def __len__(self):
"""
Return count of placeholder shapes.
"""
return len(list(self._element.iter_ph_elms()))
def BaseShapeFactory(shape_elm, parent):
"""
Return an instance of the appropriate shape proxy class for *shape_elm*.
"""
tag = shape_elm.tag
if tag == qn("p:pic"):
videoFiles = shape_elm.xpath("./p:nvPicPr/p:nvPr/a:videoFile")
if videoFiles:
return Movie(shape_elm, parent)
return Picture(shape_elm, parent)
shape_cls = {
qn("p:cxnSp"): Connector,
qn("p:grpSp"): GroupShape,
qn("p:sp"): Shape,
qn("p:graphicFrame"): GraphicFrame,
}.get(tag, BaseShape)
return shape_cls(shape_elm, parent)
def _LayoutShapeFactory(shape_elm, parent):
"""
Return an instance of the appropriate shape proxy class for *shape_elm*
on a slide layout.
"""
tag_name = shape_elm.tag
if tag_name == qn("p:sp") and shape_elm.has_ph_elm:
return LayoutPlaceholder(shape_elm, parent)
return BaseShapeFactory(shape_elm, parent)
def _MasterShapeFactory(shape_elm, parent):
"""
Return an instance of the appropriate shape proxy class for *shape_elm*
on a slide master.
"""
tag_name = shape_elm.tag
if tag_name == qn("p:sp") and shape_elm.has_ph_elm:
return MasterPlaceholder(shape_elm, parent)
return BaseShapeFactory(shape_elm, parent)
def _NotesSlideShapeFactory(shape_elm, parent):
"""
Return an instance of the appropriate shape proxy class for *shape_elm*
on a notes slide.
"""
tag_name = shape_elm.tag
if tag_name == qn("p:sp") and shape_elm.has_ph_elm:
return NotesSlidePlaceholder(shape_elm, parent)
return BaseShapeFactory(shape_elm, parent)
def _SlidePlaceholderFactory(shape_elm, parent):
"""
Return a placeholder shape of the appropriate type for *shape_elm*.
"""
tag = shape_elm.tag
if tag == qn("p:sp"):
Constructor = {
PP_PLACEHOLDER.BITMAP: PicturePlaceholder,
PP_PLACEHOLDER.CHART: ChartPlaceholder,
PP_PLACEHOLDER.PICTURE: PicturePlaceholder,
PP_PLACEHOLDER.TABLE: TablePlaceholder,
}.get(shape_elm.ph_type, SlidePlaceholder)
elif tag == qn("p:graphicFrame"):
Constructor = PlaceholderGraphicFrame
elif tag == qn("p:pic"):
Constructor = PlaceholderPicture
else:
Constructor = BaseShapeFactory
return Constructor(shape_elm, parent)
def SlideShapeFactory(shape_elm, parent):
"""
Return an instance of the appropriate shape proxy class for *shape_elm*
on a slide.
"""
if shape_elm.has_ph_elm:
return _SlidePlaceholderFactory(shape_elm, parent)
return BaseShapeFactory(shape_elm, parent)
class _MoviePicElementCreator(object):
"""Functional service object for creating a new movie p:pic element.
It's entire external interface is its :meth:`new_movie_pic` class method
that returns a new `p:pic` element containing the specified video. This
class is not intended to be constructed or an instance of it retained by
the caller; it is a "one-shot" object, really a function wrapped in
a object such that its helper methods can be organized here.
"""
def __init__(
self, shapes, shape_id, movie_file, x, y, cx, cy, poster_frame_file, mime_type
):
super(_MoviePicElementCreator, self).__init__()
self._shapes = shapes
self._shape_id = shape_id
self._movie_file = movie_file
self._x, self._y, self._cx, self._cy = x, y, cx, cy
self._poster_frame_file = poster_frame_file
self._mime_type = mime_type
@classmethod
def new_movie_pic(
cls, shapes, shape_id, movie_file, x, y, cx, cy, poster_frame_image, mime_type
):
"""Return a new `p:pic` element containing video in *movie_file*.
If *mime_type* is None, 'video/unknown' is used. If
*poster_frame_file* is None, the default "media loudspeaker" image is
used.
"""
return cls(
shapes, shape_id, movie_file, x, y, cx, cy, poster_frame_image, mime_type
)._pic
return
@property
def _media_rId(self):
"""Return the rId of RT.MEDIA relationship to video part.
For historical reasons, there are two relationships to the same part;
one is the video rId and the other is the media rId.
"""
return self._video_part_rIds[0]
@lazyproperty
def _pic(self):
"""Return the new `p:pic` element referencing the video."""
return CT_Picture.new_video_pic(
self._shape_id,
self._shape_name,
self._video_rId,
self._media_rId,
self._poster_frame_rId,
self._x,
self._y,
self._cx,
self._cy,
)
@lazyproperty
def _poster_frame_image_file(self):
"""Return the image file for video placeholder image.
If no poster frame file is provided, the default "media loudspeaker"
image is used.
"""
poster_frame_file = self._poster_frame_file
if poster_frame_file is None:
return BytesIO(SPEAKER_IMAGE_BYTES)
return poster_frame_file
@lazyproperty
def _poster_frame_rId(self):
"""Return the rId of relationship to poster frame image.
The poster frame is the image used to represent the video before it's
played.
"""
_, poster_frame_rId = self._slide_part.get_or_add_image_part(
self._poster_frame_image_file
)
return poster_frame_rId
@property
def _shape_name(self):
"""Return the appropriate shape name for the p:pic shape.
A movie shape is named with the base filename of the video.
"""
return self._video.filename
@property
def _slide_part(self):
"""Return SlidePart object for slide containing this movie."""
return self._shapes.part
@lazyproperty
def _video(self):
"""Return a |Video| object containing the movie file."""
return Video.from_path_or_file_like(self._movie_file, self._mime_type)
@lazyproperty
def _video_part_rIds(self):
"""Return the rIds for relationships to media part for video.
This is where the media part and its relationships to the slide are
actually created.
"""
media_rId, video_rId = self._slide_part.get_or_add_video_media_part(self._video)
return media_rId, video_rId
@property
def _video_rId(self):
"""Return the rId of RT.VIDEO relationship to video part.
For historical reasons, there are two relationships to the same part;
one is the video rId and the other is the media rId.
"""
return self._video_part_rIds[1]
class _OleObjectElementCreator(object):
"""Functional service object for creating a new OLE-object p:graphicFrame element.
It's entire external interface is its :meth:`graphicFrame` class method that returns
a new `p:graphicFrame` element containing the specified embedded OLE-object shape.
This class is not intended to be constructed or an instance of it retained by the
caller; it is a "one-shot" object, really a function wrapped in a object such that
its helper methods can be organized here.
"""
def __init__(
self, shapes, shape_id, ole_object_file, prog_id, x, y, cx, cy, icon_file
):
self._shapes = shapes
self._shape_id = shape_id
self._ole_object_file = ole_object_file
self._prog_id_arg = prog_id
self._x = x
self._y = y
self._cx_arg = cx
self._cy_arg = cy
self._icon_file_arg = icon_file
@classmethod
def graphicFrame(
cls, shapes, shape_id, ole_object_file, prog_id, x, y, cx, cy, icon_file
):
"""Return new `p:graphicFrame` element containing embedded `ole_object_file`."""
return cls(
shapes, shape_id, ole_object_file, prog_id, x, y, cx, cy, icon_file
)._graphicFrame
@lazyproperty
def _graphicFrame(self):
"""Newly-created `p:graphicFrame` element referencing embedded OLE-object."""
return CT_GraphicalObjectFrame.new_ole_object_graphicFrame(
self._shape_id,
self._shape_name,
self._ole_object_rId,
self._progId,
self._icon_rId,
self._x,
self._y,
self._cx,
self._cy,
)
@lazyproperty
def _cx(self):
"""Emu object specifying width of "show-as-icon" image for OLE shape."""
# --- a user-specified width overrides any default ---
if self._cx_arg is not None:
return self._cx_arg
# --- the default width is specified by the PROG_ID member if prog_id is one,
# --- otherwise it gets the default icon width.
return (
Emu(self._prog_id_arg.width)
if self._prog_id_arg in PROG_ID
else Emu(965200)
)
@lazyproperty
def _cy(self):
"""Emu object specifying height of "show-as-icon" image for OLE shape."""
# --- a user-specified width overrides any default ---
if self._cy_arg is not None:
return self._cy_arg
# --- the default height is specified by the PROG_ID member if prog_id is one,
# --- otherwise it gets the default icon height.
return (
Emu(self._prog_id_arg.height)
if self._prog_id_arg in PROG_ID
else Emu(609600)
)
@lazyproperty
def _icon_image_file(self):
"""Reference to image file containing icon to show in lieu of this object.
This can be either a str path or a file-like object (io.BytesIO typically).
"""
# --- a user-specified icon overrides any default ---
if self._icon_file_arg is not None:
return self._icon_file_arg
# --- A prog_id belonging to PROG_ID gets its icon filename from there. A
# --- user-specified (str) prog_id gets the default icon.
icon_filename = (
self._prog_id_arg.icon_filename
if self._prog_id_arg in PROG_ID
else "generic-icon.emf"
)
_thisdir = os.path.split(__file__)[0]
return os.path.abspath(os.path.join(_thisdir, "..", "templates", icon_filename))
@lazyproperty
def _icon_rId(self):
"""str rId like "rId7" of rel to icon (image) representing OLE-object part."""
_, rId = self._slide_part.get_or_add_image_part(self._icon_image_file)
return rId
@lazyproperty
def _ole_object_rId(self):
"""str rId like "rId6" of relationship to embedded ole_object part.
This is where the ole_object part and its relationship to the slide are actually
created.
"""
return self._slide_part.add_embedded_ole_object_part(
self._prog_id_arg, self._ole_object_file
)
@lazyproperty
def _progId(self):
"""str like "Excel.Sheet.12" identifying program used to open object.
This value appears in the `progId` attribute of the `p:oleObj` element for the
object.
"""
prog_id_arg = self._prog_id_arg
# --- member of PROG_ID enumeration knows its progId keyphrase, otherwise caller
# --- has specified it explicitly (as str)
return prog_id_arg.progId if prog_id_arg in PROG_ID else prog_id_arg
@lazyproperty
def _shape_name(self):
"""str name like "Object 1" for the embedded ole_object shape.
The name is formed from the prefix "Object " and the shape-id decremented by 1.
"""
return "Object %d" % (self._shape_id - 1)
@lazyproperty
def _slide_part(self):
"""SlidePart object for this slide."""
return self._shapes.part
|
|
# -*- coding: utf-8 -*-
"""tests for app.utils"""
from mock import MagicMock
from tests.unit import UnitTestCase
from app import utils
class UtilsTestCase(UnitTestCase):
def test_instance_folder_path(self):
self.assertIsNotNone(utils.INSTANCE_FOLDER_PATH,
'utils.INSTANCE_FOLDER_PATH must not be None')
self.assertEqual(utils.INSTANCE_FOLDER_PATH, '/tmp/instance',
'utils.INSTANCE_FOLDER_PATH must be {}'.format('/tmp/instance'))
def test_merge_dict(self):
result = utils.merge_dict({'a': 1}, {'b': 2})
self.assertEqual(result, {'a': 1, 'b': 2})
result = utils.merge_dict({'c': 3}, None)
self.assertEqual(result, {'c': 3})
self.assertRaises(TypeError, utils.merge_dict, {'d': 4}, ['a', 'b'])
def test_extract_dict_invalid(self):
with self.assertRaises(ValueError) as ve:
utils.extract_dict(None)
self.assertEqual(ve.exception.message, 'origin_dict must be a dict')
with self.assertRaises(ValueError) as ve:
utils.extract_dict({}, extracted_keys=1)
self.assertEqual(ve.exception.message, 'extracted_keys must be a sequence')
with self.assertRaises(ValueError) as ve:
utils.extract_dict({}, ignored_keys=2)
self.assertEqual(ve.exception.message, 'ignored_keys must be a sequence')
with self.assertRaises(ValueError) as ve:
utils.extract_dict({}, ignored_values=3)
self.assertEqual(ve.exception.message, 'ignored_values must be a sequence')
with self.assertRaises(ValueError) as ve:
utils.extract_dict({}, func=4)
self.assertEqual(ve.exception.message, 'func must be a function')
def test_extract_dict_origin_dict(self):
self.assertEqual(utils.extract_dict({}), {})
origin_dict = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5
}
expected_dict = origin_dict
self.assertEqual(utils.extract_dict(origin_dict), expected_dict)
def test_extract_dict_extracted_keys(self):
origin_dict = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5
}
expected_dict = {
'a': 1,
'b': 2,
'c': 3
}
extracted_keys = ('a', 'b', 'c')
self.assertEqual(utils.extract_dict({}, extracted_keys=extracted_keys), {})
self.assertEqual(utils.extract_dict(origin_dict, extracted_keys=extracted_keys),
expected_dict)
def test_extract_dict_ignored_keys(self):
origin_dict = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5
}
expected_dict = {
'd': 4,
'e': 5
}
ignored_keys = ('a', 'b', 'c')
self.assertEqual(utils.extract_dict({}, ignored_keys=ignored_keys), {})
self.assertEqual(utils.extract_dict(origin_dict, ignored_keys=ignored_keys), expected_dict)
def test_extract_dict_ignored_values(self):
origin_dict = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5
}
expected_dict = {
'b': 2,
'd': 4
}
ignored_values = (1, 3, 5)
self.assertEqual(utils.extract_dict({}, ignored_values=ignored_values), {})
self.assertEqual(utils.extract_dict(origin_dict, ignored_values=ignored_values),
expected_dict)
def text_extract_dict_func(self):
origin_dict = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5
}
expected_dict = {
'a': 1,
'e': 5
}
def filter_func(key, value):
if key in ('b', 'c') or value in (4,):
return False
return True
self.assertEqual(utils.extract_dict({}, func=filter_func), {})
self.assertEqual(utils.extract_dict(origin_dict, func=filter_func), expected_dict)
def test_extract_dict_full(self):
origin_dict = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5
}
expected_dict = {
'a': 1,
'e': 5
}
extracted_keys = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h')
ignored_keys = ('b', 'j', 'k', 'l', 'm')
ignored_values = (3, 10, 20, 30)
func = lambda key, value: False if key == 'd' else True
self.assertEqual(
utils.extract_dict({}, extracted_keys, ignored_keys, ignored_values, func),
{}
)
self.assertEqual(
utils.extract_dict(origin_dict, extracted_keys, ignored_keys, ignored_values, func),
expected_dict
)
def test_add_filters_invalid(self):
with self.assertRaises(ValueError) as ve:
utils.add_filters(None, None, None)
self.assertEqual(ve.exception.message, 'query must not be None')
with self.assertRaises(ValueError) as ve:
utils.add_filters(MagicMock(), None, None)
self.assertEqual(ve.exception.message, 'op_list must be a sequence')
with self.assertRaises(ValueError) as ve:
utils.add_filters(MagicMock(), 2, None)
self.assertEqual(ve.exception.message, 'op_list must be a sequence')
def test_add_filers_accepted_keys(self):
mock_query = MagicMock()
op_list = [
{
'key': 'age',
'op': 'ge',
'value': 20
}
]
self.assertEqual(utils.add_filters(mock_query, op_list, None), mock_query)
self.assertEqual(utils.add_filters(mock_query, op_list, []), mock_query)
self.assertEqual(utils.add_filters(mock_query, op_list, ['name']), mock_query)
self.assertFalse(mock_query.filter.called)
def test_add_filters_query(self):
mock_query = MagicMock()
mock_model_class = MagicMock()
mock_query.column_descriptions = [
{
'type': mock_model_class
}
]
mock_query_return = 'returned'
mock_query.filter.return_value = mock_query_return
op_sequence = [
{
'key': 'name',
'op': 'eq',
'value': 'test'
}
]
mock_model_class.name = None
with self.assertRaises(ValueError) as ve:
utils.add_filters(mock_query, op_sequence, ('name',))
self.assertEqual(ve.exception.message, 'Invalid filter column: name')
column_mock = MagicMock()
del column_mock.eq
del column_mock.eq_
del column_mock.__eq__
mock_model_class.name = column_mock
with self.assertRaises(ValueError) as ve:
utils.add_filters(mock_query, op_sequence, ['name'])
self.assertEqual(ve.exception.message, 'Invalid filter operator: eq')
mock_eq = MagicMock()
column_mock.__eq__ = mock_eq
query = utils.add_filters(mock_query, op_sequence, ['name'])
mock_eq.assert_called_once_with('test')
mock_query.filter.assert_called_once_with(mock_eq('test'))
self.assertEqual(query, mock_query_return)
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import inspect
import pickle
import pytest
from ..decorators import (deprecated_attribute, deprecated, wraps,
sharedmethod, classproperty,
format_doc, deprecated_renamed_argument)
from ..exceptions import AstropyDeprecationWarning, AstropyUserWarning
from ...tests.helper import catch_warnings
def test_wraps():
"""
Tests the compatibility replacement for functools.wraps which supports
argument preservation across all supported Python versions.
"""
def foo(a, b, c=1, d=2, e=3, **kwargs):
"""A test function."""
return a, b, c, d, e, kwargs
@wraps(foo)
def bar(*args, **kwargs):
return ('test',) + foo(*args, **kwargs)
expected = ('test', 1, 2, 3, 4, 5, {'f': 6, 'g': 7})
assert bar(1, 2, 3, 4, 5, f=6, g=7) == expected
assert bar.__name__ == 'foo'
if foo.__doc__ is not None:
# May happen if using optimized opcode
assert bar.__doc__ == "A test function."
if hasattr(foo, '__qualname__'):
assert bar.__qualname__ == foo.__qualname__
argspec = inspect.getfullargspec(bar)
assert argspec.varkw == 'kwargs'
assert argspec.args == ['a', 'b', 'c', 'd', 'e']
assert argspec.defaults == (1, 2, 3)
def test_wraps_exclude_names():
"""
Test the optional ``exclude_names`` argument to the wraps decorator.
"""
# This particular test demonstrates wrapping an instance method
# as a function and excluding the "self" argument:
class TestClass:
def method(self, a, b, c=1, d=2, **kwargs):
return (a, b, c, d, kwargs)
test = TestClass()
@wraps(test.method, exclude_args=('self',))
def func(*args, **kwargs):
return test.method(*args, **kwargs)
argspec = inspect.getfullargspec(func)
assert argspec.args == ['a', 'b', 'c', 'd']
assert func('a', 'b', e=3) == ('a', 'b', 1, 2, {'e': 3})
def test_wraps_keep_orig_name():
"""
Test that when __name__ is excluded from the ``assigned`` argument
to ``wrap`` that the function being wrapped keeps its original name.
Regression test for https://github.com/astropy/astropy/pull/4016
"""
def foo():
pass
assigned = list(functools.WRAPPER_ASSIGNMENTS)
assigned.remove('__name__')
def bar():
pass
orig_bar = bar
bar = wraps(foo, assigned=assigned)(bar)
assert bar is not orig_bar
assert bar.__name__ == 'bar'
def test_deprecated_attribute():
class DummyClass:
def __init__(self):
self._foo = 42
def set_private(self):
self._foo = 100
foo = deprecated_attribute('foo', '0.2')
dummy = DummyClass()
with catch_warnings(AstropyDeprecationWarning) as w:
x = dummy.foo
assert len(w) == 1
assert str(w[0].message) == ("The foo attribute is deprecated and may be "
"removed in a future version.")
with catch_warnings() as w:
dummy.set_private()
assert len(w) == 0
# This needs to be defined outside of the test function, because we
# want to try to pickle it.
@deprecated('100.0')
class TA:
"""
This is the class docstring.
"""
def __init__(self):
"""
This is the __init__ docstring
"""
pass
class TMeta(type):
metaclass_attr = 1
@deprecated('100.0')
class TB(metaclass=TMeta):
pass
def test_deprecated_class():
orig_A = TA.__bases__[0]
# The only thing that should be different about the new class
# is __doc__, __init__, __bases__ and __subclasshook__.
# and __init_subclass__ for Python 3.6+.
for x in dir(orig_A):
if x not in ('__doc__', '__init__', '__bases__', '__dict__',
'__subclasshook__', '__init_subclass__'):
assert getattr(TA, x) == getattr(orig_A, x)
with catch_warnings(AstropyDeprecationWarning) as w:
TA()
assert len(w) == 1
if TA.__doc__ is not None:
assert 'function' not in TA.__doc__
assert 'deprecated' in TA.__doc__
assert 'function' not in TA.__init__.__doc__
assert 'deprecated' in TA.__init__.__doc__
# Make sure the object is picklable
pickle.dumps(TA)
def test_deprecated_class_with_new_method():
"""
Test that a class with __new__ method still works even if it accepts
additional arguments.
This previously failed because the deprecated decorator would wrap objects
__init__ which takes no arguments.
"""
@deprecated('1.0')
class A:
def __new__(cls, a):
return super().__new__(cls)
# Creating an instance should work but raise a DeprecationWarning
with catch_warnings(AstropyDeprecationWarning) as w:
A(1)
assert len(w) == 1
@deprecated('1.0')
class B:
def __new__(cls, a):
return super().__new__(cls)
def __init__(self, a):
pass
# Creating an instance should work but raise a DeprecationWarning
with catch_warnings(AstropyDeprecationWarning) as w:
B(1)
assert len(w) == 1
def test_deprecated_class_with_super():
"""
Regression test for an issue where classes that used `super()` in their
``__init__`` did not actually call the correct class's ``__init__`` in the
MRO.
"""
@deprecated('100.0')
class TB:
def __init__(self, a, b):
super().__init__()
with catch_warnings(AstropyDeprecationWarning) as w:
TB(1, 2)
assert len(w) == 1
if TB.__doc__ is not None:
assert 'function' not in TB.__doc__
assert 'deprecated' in TB.__doc__
assert 'function' not in TB.__init__.__doc__
assert 'deprecated' in TB.__init__.__doc__
def test_deprecated_class_with_custom_metaclass():
"""
Regression test for an issue where deprecating a class with a metaclass
other than type did not restore the metaclass properly.
"""
with catch_warnings(AstropyDeprecationWarning) as w:
TB()
assert len(w) == 1
assert type(TB) is TMeta
assert TB.metaclass_attr == 1
def test_deprecated_static_and_classmethod():
"""
Regression test for issue introduced by
https://github.com/astropy/astropy/pull/2811 and mentioned also here:
https://github.com/astropy/astropy/pull/2580#issuecomment-51049969
where it appears that deprecated staticmethods didn't work on Python 2.6.
"""
class A:
"""Docstring"""
@deprecated('1.0')
@staticmethod
def B():
pass
@deprecated('1.0')
@classmethod
def C(cls):
pass
with catch_warnings(AstropyDeprecationWarning) as w:
A.B()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.B.__doc__
with catch_warnings(AstropyDeprecationWarning) as w:
A.C()
assert len(w) == 1
if A.__doc__ is not None:
assert 'deprecated' in A.C.__doc__
def test_deprecated_argument():
# Tests the decorator with function, method, staticmethod and classmethod.
class Test:
@classmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(cls, overwrite):
return overwrite
@staticmethod
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test2(overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test3(self, overwrite):
return overwrite
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=False)
def test1(overwrite):
return overwrite
for method in [Test().test1, Test().test2, Test().test3, test1]:
# As positional argument only
assert method(1) == 1
# As new keyword argument
assert method(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyDeprecationWarning) as w:
assert method(clobber=1) == 1
assert len(w) == 1
assert '1.3' in str(w[0].message)
assert 'test_decorators.py' in str(w[0].filename)
# Using both. Both keyword
with pytest.raises(TypeError):
method(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError):
method(1, clobber=2)
def test_deprecated_argument_in_kwargs():
# To rename an argument that is consumed by "kwargs" the "arg_in_kwargs"
# parameter is used.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3',
arg_in_kwargs=True)
def test(**kwargs):
return kwargs['overwrite']
# As positional argument only
with pytest.raises(TypeError):
test(1)
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert '1.3' in str(w[0].message)
assert 'test_decorators.py' in str(w[0].filename)
# Using both. Both keyword
with pytest.raises(TypeError):
test(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError):
test(1, clobber=2)
def test_deprecated_argument_relaxed():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', relax=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert '1.3' in str(w[0].message)
# Using both. Both keyword
with catch_warnings(AstropyUserWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 1
# One positional, one keyword
with catch_warnings(AstropyUserWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 1
def test_deprecated_argument_pending():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument('clobber', 'overwrite', '1.3', pending=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w:
assert test(clobber=1) == 1
assert len(w) == 0
# Using both. Both keyword
with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 0
# One positional, one keyword
with catch_warnings(AstropyUserWarning, AstropyDeprecationWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 0
def test_deprecated_argument_multi_deprecation():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=True)
def test(a, b, c):
return a, b, c
with catch_warnings(AstropyDeprecationWarning) as w:
assert test(x=1, y=2, z=3) == (1, 2, 3)
assert len(w) == 3
# Make sure relax is valid for all arguments
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
assert len(w) == 1
def test_deprecated_argument_multi_deprecation_2():
@deprecated_renamed_argument(['x', 'y', 'z'], ['a', 'b', 'c'],
[1.3, 1.2, 1.3], relax=[True, True, False])
def test(a, b, c):
return a, b, c
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 1
with catch_warnings(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 1
with pytest.raises(TypeError):
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
def test_deprecated_argument_not_allowed_use():
# If the argument is supposed to be inside the kwargs one needs to set the
# arg_in_kwargs parameter. Without it it raises a TypeError.
with pytest.raises(TypeError):
@deprecated_renamed_argument('clobber', 'overwrite', '1.3')
def test1(**kwargs):
return kwargs['overwrite']
# Cannot replace "*args".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'args', '1.3')
def test2(*args):
return args
# Cannot replace "**kwargs".
with pytest.raises(TypeError):
@deprecated_renamed_argument('overwrite', 'kwargs', '1.3')
def test3(**kwargs):
return kwargs
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5
def test_classproperty_docstring():
"""
Tests that the docstring is set correctly on classproperties.
This failed previously due to a bug in Python that didn't always
set __doc__ properly on instances of property subclasses.
"""
class A:
# Inherits docstring from getter
@classproperty
def foo(cls):
"""The foo."""
return 1
assert A.__dict__['foo'].__doc__ == "The foo."
class B:
# Use doc passed to classproperty constructor
def _get_foo(cls): return 1
foo = classproperty(_get_foo, doc="The foo.")
assert B.__dict__['foo'].__doc__ == "The foo."
def test_format_doc_stringInput_simple():
# Simple tests with string input
docstring_fail = ''
# Raises an valueerror if input is empty
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
docstring = 'test'
# A first test that replaces an empty docstring
@format_doc(docstring)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == docstring
# Test that it replaces an existing docstring
@format_doc(docstring)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == docstring
def test_format_doc_stringInput_format():
# Tests with string input and formatting
docstring = 'yes {0} no {opt}'
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc1():
pass
# Test that the formatting is done right
@format_doc(docstring, '/', opt='= life')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'yes / no = life'
# Test that we can include the original docstring
docstring2 = 'yes {0} no {__doc__}'
@format_doc(docstring2, '/')
def testfunc3():
'''= 2 / 2 * life'''
pass
assert inspect.getdoc(testfunc3) == 'yes / no = 2 / 2 * life'
def test_format_doc_objectInput_simple():
# Simple tests with object input
def docstring_fail():
pass
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
def docstring0():
'''test'''
pass
# A first test that replaces an empty docstring
@format_doc(docstring0)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == inspect.getdoc(docstring0)
# Test that it replaces an existing docstring
@format_doc(docstring0)
def testfunc_2():
'''not test'''
pass
assert inspect.getdoc(testfunc_2) == inspect.getdoc(docstring0)
def test_format_doc_objectInput_format():
# Tests with object input and formatting
def docstring():
'''test {0} test {opt}'''
pass
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc_fail():
pass
# Test that the formatting is done right
@format_doc(docstring, '+', opt='= 2 * test')
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == 'test + test = 2 * test'
# Test that we can include the original docstring
def docstring2():
'''test {0} test {__doc__}'''
pass
@format_doc(docstring2, '+')
def testfunc3():
'''= 4 / 2 * test'''
pass
assert inspect.getdoc(testfunc3) == 'test + test = 4 / 2 * test'
def test_format_doc_selfInput_simple():
# Simple tests with self input
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(None)
def testfunc_fail():
pass
# Test that it keeps an existing docstring
@format_doc(None)
def testfunc_1():
'''not test'''
pass
assert inspect.getdoc(testfunc_1) == 'not test'
def test_format_doc_selfInput_format():
# Tests with string input which is '__doc__' (special case) and formatting
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(None)
def testfunc_fail():
'''dum {0} dum {opt}'''
pass
# Test that the formatting is done right
@format_doc(None, 'di', opt='da dum')
def testfunc1():
'''dum {0} dum {opt}'''
pass
assert inspect.getdoc(testfunc1) == 'dum di dum da dum'
# Test that we cannot recursively insert the original documentation
@format_doc(None, 'di')
def testfunc2():
'''dum {0} dum {__doc__}'''
pass
assert inspect.getdoc(testfunc2) == 'dum di dum '
def test_format_doc_onMethod():
# Check if the decorator works on methods too, to spice it up we try double
# decorator
docstring = 'what we do {__doc__}'
class TestClass:
@format_doc(docstring)
@format_doc(None, 'strange.')
def test_method(self):
'''is {0}'''
pass
assert inspect.getdoc(TestClass.test_method) == 'what we do is strange.'
def test_format_doc_onClass():
# Check if the decorator works on classes too
docstring = 'what we do {__doc__} {0}{opt}'
@format_doc(docstring, 'strange', opt='.')
class TestClass:
'''is'''
pass
assert inspect.getdoc(TestClass) == 'what we do is strange.'
|
|
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Union
from typing_extensions import Deque, TypedDict
from django.utils.translation import ugettext as _
from django.conf import settings
from collections import deque
import os
import time
import logging
import ujson
import requests
import atexit
import sys
import signal
import tornado.ioloop
import random
from zerver.models import UserProfile, Client, Realm
from zerver.decorator import cachify
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
finish_handler, handler_stats_string
from zerver.lib.utils import statsd
from zerver.middleware import async_request_timer_restart
from zerver.lib.message import MessageDict
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.request import JsonableError
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
from zerver.tornado.exceptions import BadEventQueueIdError
from zerver.tornado.sharding import get_tornado_uri, get_tornado_port, \
notify_tornado_queue_name
from zerver.tornado.autoreload import add_reload_hook
import copy
requests_client = requests.Session()
for host in ['127.0.0.1', 'localhost']:
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
# This seems like the only working solution to ignore proxy in
# requests library.
requests_client.trust_env = False
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
# We garbage-collect every minute; this is totally fine given that the
# GC scan takes ~2ms with 1000 event queues.
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 1
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor:
def __init__(self,
user_profile_id: int,
user_profile_email: str,
realm_id: int, event_queue: 'EventQueue',
event_types: Optional[Sequence[str]],
client_type_name: str,
apply_markdown: bool=True,
client_gravatar: bool=True,
all_public_streams: bool=False,
lifespan_secs: int=0,
narrow: Iterable[Sequence[str]]=[]) -> None:
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.user_profile_email = user_profile_email
self.realm_id = realm_id
self.current_handler_id = None # type: Optional[int]
self.current_client_name = None # type: Optional[str]
self.event_queue = event_queue
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.client_gravatar = client_gravatar
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.call_later
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Default for lifespan_secs is DEFAULT_EVENT_QUEUE_TIMEOUT_SECS;
# but users can set it as high as MAX_QUEUE_TIMEOUT_SECS.
if lifespan_secs == 0:
lifespan_secs = DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
self.queue_timeout = min(lifespan_secs, MAX_QUEUE_TIMEOUT_SECS)
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
user_profile_email=self.user_profile_email,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
client_gravatar=self.client_gravatar,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name)
def __repr__(self) -> str:
return "ClientDescriptor<%s>" % (self.event_queue.id,)
@classmethod
def from_dict(cls, d: MutableMapping[str, Any]) -> 'ClientDescriptor':
if 'user_profile_email' not in d:
# Temporary migration for the addition of the new user_profile_email field
from zerver.models import get_user_profile_by_id
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
if 'client_type' in d:
# Temporary migration for the rename of client_type to client_type_name
d['client_type_name'] = d['client_type']
if 'client_gravatar' not in d:
# Temporary migration for the addition of the client_gravatar field
d['client_gravatar'] = False
ret = cls(
d['user_profile_id'],
d['user_profile_email'],
d['realm_id'],
EventQueue.from_dict(d['event_queue']),
d['event_types'],
d['client_type_name'],
d['apply_markdown'],
d['client_gravatar'],
d['all_public_streams'],
d['queue_timeout'],
d.get('narrow', [])
)
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self) -> None:
self.current_handler_id = None
self._timeout_handle = None
def add_event(self, event: Dict[str, Any]) -> None:
# Any dictionary passed into this function must be a unique
# dictionary (potentially a shallow copy of a shared data
# structure), since the event_queue data structures will
# mutate it to add the queue-specific unique `id` of that
# event to the outer event dictionary.
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_timer_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self) -> bool:
if self.current_handler_id is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event: Mapping[str, Any]) -> bool:
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self) -> bool:
return self.event_types is None or "message" in self.event_types
def expired(self, now: float) -> bool:
return (self.current_handler_id is None and
now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler_id: int, client_name: str) -> None:
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback() -> None:
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
interval = HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != 'API: heartbeat test':
self._timeout_handle = ioloop.call_later(interval, timeout_callback)
def disconnect_handler(self, client_closed: bool=False) -> None:
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id, None)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info("Client disconnected for queue %s (%s via %s)" %
(self.event_queue.id, self.user_profile_email,
self.current_client_name))
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self) -> None:
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can cleanup their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
{self.realm_id})
def compute_full_event_type(event: Mapping[str, Any]) -> str:
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue:
def __init__(self, id: str) -> None:
# When extending this list of properties, one must be sure to
# update to_dict and from_dict.
self.queue = deque() # type: Deque[Dict[str, Any]]
self.next_event_id = 0 # type: int
self.newest_pruned_id = -1 # type: Optional[int] # will only be None for migration from old versions
self.id = id # type: str
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
def to_dict(self) -> Dict[str, Any]:
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
d = dict(
id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events,
)
if self.newest_pruned_id is not None:
d['newest_pruned_id'] = self.newest_pruned_id
return d
@classmethod
def from_dict(cls, d: Dict[str, Any]) -> 'EventQueue':
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.newest_pruned_id = d.get('newest_pruned_id', None)
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event: Dict[str, Any]) -> None:
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self) -> Dict[str, Any]:
return self.queue.popleft()
def empty(self) -> bool:
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id: int) -> None:
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.newest_pruned_id = self.queue[0]['id']
self.pop()
def contents(self) -> List[Dict[str, Any]]:
contents = [] # type: List[Dict[str, Any]]
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {} # type: Dict[str, ClientDescriptor]
# maps user id to list of client descriptors
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
next_queue_id = 0
def clear_client_event_queues_for_testing() -> None:
assert(settings.TEST_SUITE)
clients.clear()
user_clients.clear()
realm_clients_all_streams.clear()
gc_hooks.clear()
global next_queue_id
next_queue_id = 0
def add_client_gc_hook(hook: Callable[[int, ClientDescriptor, bool], None]) -> None:
gc_hooks.append(hook)
def get_client_descriptor(queue_id: str) -> ClientDescriptor:
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id: int) -> List[ClientDescriptor]:
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id: int) -> List[ClientDescriptor]:
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client: ClientDescriptor) -> None:
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data: MutableMapping[str, Any]) -> ClientDescriptor:
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove: AbstractSet[str], affected_users: AbstractSet[int],
affected_realms: AbstractSet[int]) -> None:
def filter_client_dict(client_dict: MutableMapping[int, List[ClientDescriptor]], key: int) -> None:
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues(port: int) -> None:
start = time.time()
to_remove = set() # type: Set[str]
affected_users = set() # type: Set[int]
affected_realms = set() # type: Set[int]
for (id, client) in clients.items():
if client.expired(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle (because
# they are expired) and thus not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
if settings.PRODUCTION:
logging.info(('Tornado %d removed %d expired event queues owned by %d users in %.3fs.' +
' Now %d active queues, %s')
% (port, len(to_remove), len(affected_users), time.time() - start,
len(clients), handler_stats_string()))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def persistent_queue_filename(port: int, last: bool=False) -> str:
if settings.TORNADO_PROCESSES == 1:
# Use non-port-aware, legacy version.
if last:
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('',) + '.last'
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('',)
if last:
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('.' + str(port) + '.last',)
return settings.JSON_PERSISTENT_QUEUE_FILENAME_PATTERN % ('.' + str(port),)
def dump_event_queues(port: int) -> None:
start = time.time()
with open(persistent_queue_filename(port), "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in clients.items()],
stored_queues)
logging.info('Tornado %d dumped %d event queues in %.3fs'
% (port, len(clients), time.time() - start))
def load_event_queues(port: int) -> None:
global clients
start = time.time()
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with open(persistent_queue_filename(port), "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Tornado %d could not deserialize event queues" % (port,))
except (IOError, EOFError):
pass
for client in clients.values():
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado %d loaded %d event queues in %.3fs'
% (port, len(clients), time.time() - start))
def send_restart_events(immediate: bool=False) -> None:
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
if immediate:
event['immediate'] = True
for client in clients.values():
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue(port: int) -> None:
if not settings.TEST_SUITE:
load_event_queues(port)
atexit.register(dump_event_queues, port)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1))
add_reload_hook(lambda: dump_event_queues(port))
try:
os.rename(persistent_queue_filename(port), persistent_queue_filename(port, last=True))
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(lambda: gc_event_queues(port),
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query: Mapping[str, Any]) -> Dict[str, Any]:
queue_id = query["queue_id"] # type: str
dont_block = query["dont_block"] # type: bool
last_event_id = query["last_event_id"] # type: int
user_profile_id = query["user_profile_id"] # type: int
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
user_profile_email = query["user_profile_email"] # type: str
client_type_name = query["client_type_name"] # type: str
handler_id = query["handler_id"] # type: int
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if client is None:
raise BadEventQueueIdError(queue_id)
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
if (
client.event_queue.newest_pruned_id is not None
and last_event_id < client.event_queue.newest_pruned_id
):
raise JsonableError(_("An event newer than %s has already been pruned!") % (last_event_id,))
client.event_queue.prune(last_event_id)
if (
client.event_queue.newest_pruned_id is not None
and last_event_id != client.event_queue.newest_pruned_id
):
raise JsonableError(_("Event %s was not in this queue") % (last_event_id,))
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response = dict(events=client.event_queue.contents(),
handler_id=handler_id) # type: Dict[str, Any]
if orig_queue_id is None:
response['queue_id'] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
response["events"][0]["type"])
else:
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_email,
client_type_name))
except JsonableError as e:
return dict(type="error", exception=e)
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
# The following functions are called from Django
def request_event_queue(user_profile: UserProfile, user_client: Client, apply_markdown: bool,
client_gravatar: bool, queue_lifespan_secs: int,
event_types: Optional[Iterable[str]]=None,
all_public_streams: bool=False,
narrow: Iterable[Sequence[str]]=[]) -> Optional[str]:
if settings.TORNADO_SERVER:
tornado_uri = get_tornado_uri(user_profile.realm)
req = {'dont_block': 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'client_gravatar': ujson.dumps(client_gravatar),
'all_public_streams': ujson.dumps(all_public_streams),
'client': 'internal',
'user_profile_id': user_profile.id,
'user_client': user_client.name,
'narrow': ujson.dumps(narrow),
'secret': settings.SHARED_SECRET,
'lifespan_secs': queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
try:
resp = requests_client.post(tornado_uri + '/api/v1/events/internal',
data=req)
except requests.adapters.ConnectionError:
logging.error('Tornado server does not seem to be running, check %s '
'and %s for more information.' %
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
raise requests.adapters.ConnectionError(
"Django cannot connect to Tornado server (%s); try restarting" %
(tornado_uri,))
resp.raise_for_status()
return resp.json()['queue_id']
return None
def get_user_events(user_profile: UserProfile, queue_id: str, last_event_id: int) -> List[Dict[str, Any]]:
if settings.TORNADO_SERVER:
tornado_uri = get_tornado_uri(user_profile.realm)
post_data = {
'queue_id': queue_id,
'last_event_id': last_event_id,
'dont_block': 'true',
'user_profile_id': user_profile.id,
'secret': settings.SHARED_SECRET,
'client': 'internal'
} # type: Dict[str, Any]
resp = requests_client.post(tornado_uri + '/api/v1/events/internal',
data=post_data)
resp.raise_for_status()
return resp.json()['events']
return []
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id: int, message_id: int) -> Dict[str, Any]:
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"type": "add",
"timestamp": time.time()}
def missedmessage_hook(user_profile_id: int, client: ClientDescriptor, last_for_client: bool) -> None:
"""The receiver_is_off_zulip logic used to determine whether a user
has no active client suffers from a somewhat fundamental race
condition. If the client is no longer on the Internet,
receiver_is_off_zulip will still return true for
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS, until the queue is
garbage-collected. This would cause us to reliably miss
push/email notifying users for messages arriving during the
DEFAULT_EVENT_QUEUE_TIMEOUT_SECS after they suspend their laptop (for
example). We address this by, when the queue is garbage-collected
at the end of those 10 minutes, checking to see if it's the last
one, and if so, potentially triggering notifications to the user
at that time, resulting in at most a DEFAULT_EVENT_QUEUE_TIMEOUT_SECS
delay in the arrival of their notifications.
As Zulip's APIs get more popular and the mobile apps start using
long-lived event queues for perf optimization, future versions of
this will likely need to replace checking `last_for_client` with
something more complicated, so that we only consider clients like
web browsers, not the mobile apps or random API scripts.
"""
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
for event in client.event_queue.contents():
if event['type'] != 'message':
continue
assert 'flags' in event
flags = event.get('flags')
mentioned = 'mentioned' in flags and 'read' not in flags
private_message = event['message']['type'] == 'private'
# stream_push_notify is set in process_message_event.
stream_push_notify = event.get('stream_push_notify', False)
stream_email_notify = event.get('stream_email_notify', False)
stream_name = None
if not private_message:
stream_name = event['message']['display_recipient']
# Since one is by definition idle, we don't need to check always_push_notify
always_push_notify = False
# Since we just GC'd the last event queue, the user is definitely idle.
idle = True
message_id = event['message']['id']
# Pass on the information on whether a push or email notification was already sent.
already_notified = dict(
push_notified = event.get("push_notified", False),
email_notified = event.get("email_notified", False),
)
maybe_enqueue_notifications(user_profile_id, message_id, private_message, mentioned,
stream_push_notify, stream_email_notify, stream_name,
always_push_notify, idle, already_notified)
def receiver_is_off_zulip(user_profile_id: int) -> bool:
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
return off_zulip
def maybe_enqueue_notifications(user_profile_id: int, message_id: int, private_message: bool,
mentioned: bool, stream_push_notify: bool,
stream_email_notify: bool, stream_name: Optional[str],
always_push_notify: bool, idle: bool,
already_notified: Dict[str, bool]) -> Dict[str, bool]:
"""This function has a complete unit test suite in
`test_enqueue_notifications` that should be expanded as we add
more features here."""
notified = dict() # type: Dict[str, bool]
if (idle or always_push_notify) and (private_message or mentioned or stream_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
if private_message:
notice['trigger'] = 'private_message'
elif mentioned:
notice['trigger'] = 'mentioned'
elif stream_push_notify:
notice['trigger'] = 'stream_push_notify'
else:
raise AssertionError("Unknown notification trigger!")
notice['stream_name'] = stream_name
if not already_notified.get("push_notified"):
queue_json_publish("missedmessage_mobile_notifications", notice)
notified['push_notified'] = True
# Send missed_message emails if a private message or a
# mention. Eventually, we'll add settings to allow email
# notifications to match the model of push notifications
# above.
if idle and (private_message or mentioned or stream_email_notify):
notice = build_offline_notification(user_profile_id, message_id)
if private_message:
notice['trigger'] = 'private_message'
elif mentioned:
notice['trigger'] = 'mentioned'
elif stream_email_notify:
notice['trigger'] = 'stream_email_notify'
else:
raise AssertionError("Unknown notification trigger!")
notice['stream_name'] = stream_name
if not already_notified.get("email_notified"):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
return notified
ClientInfo = TypedDict('ClientInfo', {
'client': ClientDescriptor,
'flags': Optional[Iterable[str]],
'is_sender': bool,
})
def get_client_info_for_message_event(event_template: Mapping[str, Any],
users: Iterable[Mapping[str, Any]]) -> Dict[str, ClientInfo]:
'''
Return client info for all the clients interested in a message.
This basically includes clients for users who are recipients
of the message, with some nuances for bots that auto-subscribe
to all streams, plus users who may be mentioned, etc.
'''
send_to_clients = {} # type: Dict[str, ClientInfo]
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
def is_sender_client(client: ClientDescriptor) -> bool:
return (sender_queue_id is not None) and client.event_queue.id == sender_queue_id
# If we're on a public stream, look for clients (typically belonging to
# bots) that are registered to get events for ALL streams.
if 'stream_name' in event_template and not event_template.get("invite_only"):
realm_id = event_template['realm_id']
for client in get_client_descriptors_for_realm_all_streams(realm_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=[],
is_sender=is_sender_client(client)
)
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = dict(
client=client,
flags=flags,
is_sender=is_sender_client(client)
)
return send_to_clients
def process_message_event(event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]) -> None:
"""See
https://zulip.readthedocs.io/en/latest/subsystems/sending-messages.html
for high-level documentation on this subsystem.
"""
send_to_clients = get_client_info_for_message_event(event_template, users)
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
wide_dict = event_template['message_dict'] # type: Dict[str, Any]
sender_id = wide_dict['sender_id'] # type: int
message_id = wide_dict['id'] # type: int
message_type = wide_dict['type'] # type: str
sending_client = wide_dict['client'] # type: str
@cachify
def get_client_payload(apply_markdown: bool, client_gravatar: bool) -> Dict[str, Any]:
dct = copy.deepcopy(wide_dict)
MessageDict.finalize_payload(dct, apply_markdown, client_gravatar)
return dct
# Extra user-specific data to include
extra_user_data = {} # type: Dict[int, Any]
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
# If the recipient was offline and the message was a single or group PM to them
# or they were @-notified potentially notify more immediately
private_message = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags and 'read' not in flags
stream_push_notify = user_data.get('stream_push_notify', False)
stream_email_notify = user_data.get('stream_email_notify', False)
# We first check if a message is potentially mentionable,
# since receiver_is_off_zulip is somewhat expensive.
if private_message or mentioned or stream_push_notify or stream_email_notify:
idle = receiver_is_off_zulip(user_profile_id) or (user_profile_id in presence_idle_user_ids)
always_push_notify = user_data.get('always_push_notify', False)
stream_name = event_template.get('stream_name')
result = maybe_enqueue_notifications(user_profile_id, message_id, private_message,
mentioned, stream_push_notify, stream_email_notify,
stream_name, always_push_notify, idle, {})
result['stream_push_notify'] = stream_push_notify
result['stream_email_notify'] = stream_email_notify
extra_user_data[user_profile_id] = result
for client_data in send_to_clients.values():
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False) # type: bool
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
message_dict = get_client_payload(client.apply_markdown, client.client_gravatar)
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type_name.lower()):
continue
# We don't need to create a new dict here, since the
# `user_event` was already constructed from scratch above.
client.add_event(user_event)
def process_event(event: Mapping[str, Any], users: Iterable[int]) -> None:
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(dict(event))
def process_userdata_event(event_template: Mapping[str, Any], users: Iterable[Mapping[str, Any]]) -> None:
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
# We need to do another shallow copy, or we risk
# sending the same event to multiple clients.
client.add_event(dict(user_event))
def process_message_update_event(event_template: Mapping[str, Any],
users: Iterable[Mapping[str, Any]]) -> None:
prior_mention_user_ids = set(event_template.get('prior_mention_user_ids', []))
mention_user_ids = set(event_template.get('mention_user_ids', []))
presence_idle_user_ids = set(event_template.get('presence_idle_user_ids', []))
stream_push_user_ids = set(event_template.get('stream_push_user_ids', []))
stream_email_user_ids = set(event_template.get('stream_email_user_ids', []))
push_notify_user_ids = set(event_template.get('push_notify_user_ids', []))
stream_name = event_template.get('stream_name')
message_id = event_template['message_id']
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
maybe_enqueue_notifications_for_message_update(
user_profile_id=user_profile_id,
message_id=message_id,
stream_name=stream_name,
prior_mention_user_ids=prior_mention_user_ids,
mention_user_ids=mention_user_ids,
presence_idle_user_ids=presence_idle_user_ids,
stream_push_user_ids=stream_push_user_ids,
stream_email_user_ids=stream_email_user_ids,
push_notify_user_ids=push_notify_user_ids,
)
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def maybe_enqueue_notifications_for_message_update(user_profile_id: UserProfile,
message_id: int,
stream_name: str,
prior_mention_user_ids: Set[int],
mention_user_ids: Set[int],
presence_idle_user_ids: Set[int],
stream_push_user_ids: Set[int],
stream_email_user_ids: Set[int],
push_notify_user_ids: Set[int]) -> None:
private_message = (stream_name is None)
if private_message:
# We don't do offline notifications for PMs, because
# we already notified the user of the original message
return
if (user_profile_id in prior_mention_user_ids):
# Don't spam people with duplicate mentions. This is
# especially important considering that most message
# edits are simple typo corrections.
return
stream_push_notify = (user_profile_id in stream_push_user_ids)
stream_email_notify = (user_profile_id in stream_email_user_ids)
if stream_push_notify or stream_email_notify:
# Currently we assume that if this flag is set to True, then
# the user already was notified about the earlier message,
# so we short circuit. We may handle this more rigorously
# in the future by looking at something like an AlreadyNotified
# model.
return
# We can have newly mentioned people in an updated message.
mentioned = (user_profile_id in mention_user_ids)
always_push_notify = user_profile_id in push_notify_user_ids
idle = (user_profile_id in presence_idle_user_ids) or \
receiver_is_off_zulip(user_profile_id)
maybe_enqueue_notifications(
user_profile_id=user_profile_id,
message_id=message_id,
private_message=private_message,
mentioned=mentioned,
stream_push_notify=stream_push_notify,
stream_email_notify=stream_email_notify,
stream_name=stream_name,
always_push_notify=always_push_notify,
idle=idle,
already_notified={},
)
def process_notification(notice: Mapping[str, Any]) -> None:
event = notice['event'] # type: Mapping[str, Any]
users = notice['users'] # type: Union[List[int], List[Mapping[str, Any]]]
start_time = time.time()
if event['type'] == "message":
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "update_message":
process_message_update_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "delete_message":
process_userdata_event(event, cast(Iterable[Mapping[str, Any]], users))
else:
process_event(event, cast(Iterable[int], users))
logging.debug("Tornado: Event %s for %s users took %sms" % (
event['type'], len(users), int(1000 * (time.time() - start_time))))
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(realm: Realm, data: Mapping[str, Any]) -> None:
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
tornado_uri = get_tornado_uri(realm)
requests_client.post(tornado_uri + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_event(realm: Realm, event: Mapping[str, Any],
users: Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None:
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
port = get_tornado_port(realm)
queue_json_publish(notify_tornado_queue_name(port),
dict(event=event, users=users),
lambda *args, **kwargs: send_notification_http(realm, *args, **kwargs))
|
|
from tkinter import *
from tkinter import messagebox
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from search import *
import utils
import numpy as np
distances = {}
class TSP_problem(Problem):
""" subclass of Problem to define various functions """
def two_opt(self, state):
""" Neighbour generating function for Traveling Salesman Problem """
neighbour_state = state[:]
left = random.randint(0, len(neighbour_state) - 1)
right = random.randint(0, len(neighbour_state) - 1)
if left > right:
left, right = right, left
neighbour_state[left: right + 1] = reversed(neighbour_state[left: right + 1])
return neighbour_state
def actions(self, state):
""" action that can be excuted in given state """
return [self.two_opt]
def result(self, state, action):
""" result after applying the given action on the given state """
return action(state)
def path_cost(self, c, state1, action, state2):
""" total distance for the Traveling Salesman to be covered if in state2 """
cost = 0
for i in range(len(state2) - 1):
cost += distances[state2[i]][state2[i + 1]]
cost += distances[state2[0]][state2[-1]]
return cost
def value(self, state):
""" value of path cost given negative for the given state """
return -1 * self.path_cost(None, None, None, state)
class TSP_Gui():
""" Class to create gui of Traveling Salesman using simulated annealing where one can
select cities, change speed and temperature. Distances between cities are euclidean
distances between them.
"""
def __init__(self, root, all_cities):
self.root = root
self.vars = []
self.frame_locations = {}
self.calculate_canvas_size()
self.button_text = StringVar()
self.button_text.set("Start")
self.algo_var = StringVar()
self.all_cities = all_cities
self.frame_select_cities = Frame(self.root)
self.frame_select_cities.grid(row=1)
self.frame_canvas = Frame(self.root)
self.frame_canvas.grid(row=2)
Label(self.root, text="Map of Romania", font="Times 13 bold").grid(row=0, columnspan=10)
def create_checkboxes(self, side=LEFT, anchor=W):
""" To select cities which are to be a part of Traveling Salesman Problem """
row_number = 0
column_number = 0
for city in self.all_cities:
var = IntVar()
var.set(1)
Checkbutton(self.frame_select_cities, text=city, variable=var).grid(
row=row_number, column=column_number, sticky=W)
self.vars.append(var)
column_number += 1
if column_number == 10:
column_number = 0
row_number += 1
def create_buttons(self):
""" Create start and quit button """
Button(self.frame_select_cities, textvariable=self.button_text,
command=self.run_traveling_salesman).grid(row=5, column=4, sticky=E + W)
Button(self.frame_select_cities, text='Quit', command=self.on_closing).grid(
row=5, column=5, sticky=E + W)
def create_dropdown_menu(self):
""" Create dropdown menu for algorithm selection """
choices = {'Simulated Annealing', 'Genetic Algorithm', 'Hill Climbing'}
self.algo_var.set('Simulated Annealing')
dropdown_menu = OptionMenu(self.frame_select_cities, self.algo_var, *choices)
dropdown_menu.grid(row=4, column=4, columnspan=2, sticky=E + W)
dropdown_menu.config(width=19)
def run_traveling_salesman(self):
""" Choose selected citites """
cities = []
for i in range(len(self.vars)):
if self.vars[i].get() == 1:
cities.append(self.all_cities[i])
tsp_problem = TSP_problem(cities)
self.button_text.set("Reset")
self.create_canvas(tsp_problem)
def calculate_canvas_size(self):
""" Width and height for canvas """
minx, maxx = sys.maxsize, -1 * sys.maxsize
miny, maxy = sys.maxsize, -1 * sys.maxsize
for value in romania_map.locations.values():
minx = min(minx, value[0])
maxx = max(maxx, value[0])
miny = min(miny, value[1])
maxy = max(maxy, value[1])
# New locations squeezed to fit inside the map of romania
for name, coordinates in romania_map.locations.items():
self.frame_locations[name] = (coordinates[0] / 1.2 - minx +
150, coordinates[1] / 1.2 - miny + 165)
canvas_width = maxx - minx + 200
canvas_height = maxy - miny + 200
self.canvas_width = canvas_width
self.canvas_height = canvas_height
def create_canvas(self, problem):
""" creating map with cities """
map_canvas = Canvas(self.frame_canvas, width=self.canvas_width, height=self.canvas_height)
map_canvas.grid(row=3, columnspan=10)
current = Node(problem.initial)
map_canvas.delete("all")
self.romania_image = PhotoImage(file="../images/romania_map.png")
map_canvas.create_image(self.canvas_width / 2, self.canvas_height / 2,
image=self.romania_image)
cities = current.state
for city in cities:
x = self.frame_locations[city][0]
y = self.frame_locations[city][1]
map_canvas.create_oval(x - 3, y - 3, x + 3, y + 3,
fill="red", outline="red")
map_canvas.create_text(x - 15, y - 10, text=city)
self.cost = StringVar()
Label(self.frame_canvas, textvariable=self.cost, relief="sunken").grid(
row=2, columnspan=10)
self.speed = IntVar()
speed_scale = Scale(self.frame_canvas, from_=500, to=1, orient=HORIZONTAL,
variable=self.speed, label="Speed ----> ", showvalue=0, font="Times 11",
relief="sunken", cursor="gumby")
speed_scale.grid(row=1, columnspan=5, sticky=N + S + E + W)
if self.algo_var.get() == 'Simulated Annealing':
self.temperature = IntVar()
temperature_scale = Scale(self.frame_canvas, from_=100, to=0, orient=HORIZONTAL,
length=200, variable=self.temperature, label="Temperature ---->",
font="Times 11", relief="sunken", showvalue=0, cursor="gumby")
temperature_scale.grid(row=1, column=5, columnspan=5, sticky=N + S + E + W)
self.simulated_annealing_with_tunable_T(problem, map_canvas)
elif self.algo_var.get() == 'Genetic Algorithm':
self.mutation_rate = DoubleVar()
self.mutation_rate.set(0.05)
mutation_rate_scale = Scale(self.frame_canvas, from_=0, to=1, orient=HORIZONTAL,
length=200, variable=self.mutation_rate, label='Mutation Rate ---->',
font='Times 11', relief='sunken', showvalue=0, cursor='gumby', resolution=0.001)
mutation_rate_scale.grid(row=1, column=5, columnspan=5, sticky='nsew')
self.genetic_algorithm(problem, map_canvas)
elif self.algo_var.get() == 'Hill Climbing':
self.no_of_neighbors = IntVar()
self.no_of_neighbors.set(100)
no_of_neighbors_scale = Scale(self.frame_canvas, from_=10, to=1000, orient=HORIZONTAL,
length=200, variable=self.no_of_neighbors, label='Number of neighbors ---->',
font='Times 11',relief='sunken', showvalue=0, cursor='gumby')
no_of_neighbors_scale.grid(row=1, column=5, columnspan=5, sticky='nsew')
self.hill_climbing(problem, map_canvas)
def exp_schedule(k=100, lam=0.03, limit=1000):
""" One possible schedule function for simulated annealing """
return lambda t: (k * math.exp(-lam * t) if t < limit else 0)
def simulated_annealing_with_tunable_T(self, problem, map_canvas, schedule=exp_schedule()):
""" Simulated annealing where temperature is taken as user input """
current = Node(problem.initial)
while(1):
T = schedule(self.temperature.get())
if T == 0:
return current.state
neighbors = current.expand(problem)
if not neighbors:
return current.state
next = random.choice(neighbors)
delta_e = problem.value(next.state) - problem.value(current.state)
if delta_e > 0 or probability(math.exp(delta_e / T)):
map_canvas.delete("poly")
current = next
self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state))))
points = []
for city in current.state:
points.append(self.frame_locations[city][0])
points.append(self.frame_locations[city][1])
map_canvas.create_polygon(points, outline='red', width=3, fill='', tag="poly")
map_canvas.update()
map_canvas.after(self.speed.get())
def genetic_algorithm(self, problem, map_canvas):
""" Genetic Algorithm modified for the given problem """
def init_population(pop_number, gene_pool, state_length):
""" initialize population """
population = []
for i in range(pop_number):
population.append(utils.shuffled(gene_pool))
return population
def recombine(state_a, state_b):
""" recombine two problem states """
start = random.randint(0, len(state_a) - 1)
end = random.randint(start + 1, len(state_a))
new_state = state_a[start:end]
for city in state_b:
if city not in new_state:
new_state.append(city)
return new_state
def mutate(state, mutation_rate):
""" mutate problem states """
if random.uniform(0, 1) < mutation_rate:
sample = random.sample(range(len(state)), 2)
state[sample[0]], state[sample[1]] = state[sample[1]], state[sample[0]]
return state
def fitness_fn(state):
""" calculate fitness of a particular state """
fitness = problem.value(state)
return int((5600 + fitness) ** 2)
current = Node(problem.initial)
population = init_population(100, current.state, len(current.state))
all_time_best = current.state
while(1):
population = [mutate(recombine(*select(2, population, fitness_fn)), self.mutation_rate.get()) for i in range(len(population))]
current_best = utils.argmax(population, key=fitness_fn)
if fitness_fn(current_best) > fitness_fn(all_time_best):
all_time_best = current_best
self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(all_time_best))))
map_canvas.delete('poly')
points = []
for city in current_best:
points.append(self.frame_locations[city][0])
points.append(self.frame_locations[city][1])
map_canvas.create_polygon(points, outline='red', width=1, fill='', tag='poly')
best_points = []
for city in all_time_best:
best_points.append(self.frame_locations[city][0])
best_points.append(self.frame_locations[city][1])
map_canvas.create_polygon(best_points, outline='red', width=3, fill='', tag='poly')
map_canvas.update()
map_canvas.after(self.speed.get())
def hill_climbing(self, problem, map_canvas):
""" hill climbing where number of neighbors is taken as user input """
def find_neighbors(state, number_of_neighbors=100):
""" finds neighbors using two_opt method """
neighbors = []
for i in range(number_of_neighbors):
new_state = problem.two_opt(state)
neighbors.append(Node(new_state))
state = new_state
return neighbors
current = Node(problem.initial)
while(1):
neighbors = find_neighbors(current.state, self.no_of_neighbors.get())
neighbor = utils.argmax_random_tie(neighbors, key=lambda node: problem.value(node.state))
map_canvas.delete('poly')
points = []
for city in current.state:
points.append(self.frame_locations[city][0])
points.append(self.frame_locations[city][1])
map_canvas.create_polygon(points, outline='red', width=3, fill='', tag='poly')
neighbor_points = []
for city in neighbor.state:
neighbor_points.append(self.frame_locations[city][0])
neighbor_points.append(self.frame_locations[city][1])
map_canvas.create_polygon(neighbor_points, outline='red', width=1, fill='', tag='poly')
map_canvas.update()
map_canvas.after(self.speed.get())
if problem.value(neighbor.state) > problem.value(current.state):
current.state = neighbor.state
self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state))))
def on_closing(self):
if messagebox.askokcancel('Quit', 'Do you want to quit?'):
self.root.destroy()
def main():
all_cities = []
for city in romania_map.locations.keys():
distances[city] = {}
all_cities.append(city)
all_cities.sort()
# distances['city1']['city2'] contains euclidean distance between their coordinates
for name_1, coordinates_1 in romania_map.locations.items():
for name_2, coordinates_2 in romania_map.locations.items():
distances[name_1][name_2] = np.linalg.norm(
[coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]])
distances[name_2][name_1] = np.linalg.norm(
[coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]])
root = Tk()
root.title("Traveling Salesman Problem")
cities_selection_panel = TSP_Gui(root, all_cities)
cities_selection_panel.create_checkboxes()
cities_selection_panel.create_buttons()
cities_selection_panel.create_dropdown_menu()
root.protocol('WM_DELETE_WINDOW', cities_selection_panel.on_closing)
root.mainloop()
if __name__ == '__main__':
main()
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
def appendDefinitions( menuDefinition, prefix="" ) :
menuDefinition.append( prefix + "/Undo", { "command" : undo, "shortCut" : "Ctrl+Z", "active" : __undoAvailable } )
menuDefinition.append( prefix + "/Redo", { "command" : redo, "shortCut" : "Shift+Ctrl+Z", "active" : __redoAvailable } )
menuDefinition.append( prefix + "/UndoDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Cut", { "command" : cut, "shortCut" : "Ctrl+X", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Copy", { "command" : copy, "shortCut" : "Ctrl+C", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Paste", { "command" : paste, "shortCut" : "Ctrl+V", "active" : __pasteAvailable } )
menuDefinition.append( prefix + "/Delete", { "command" : delete, "shortCut" : "Backspace, Delete", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/CutCopyPasteDeleteDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Find...", { "command" : find, "shortCut" : "Ctrl+F" } )
menuDefinition.append( prefix + "/FindDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Arrange", { "command" : arrange, "shortCut" : "Ctrl+L" } )
menuDefinition.append( prefix + "/ArrangeDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Select All", { "command" : selectAll, "shortCut" : "Ctrl+A" } )
menuDefinition.append( prefix + "/Select None", { "command" : selectNone, "shortCut" : "Shift+Ctrl+A", "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Inputs", { "command" : selectInputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Add Inputs", { "command" : selectAddInputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/InputsDivider", { "divider" : True } )
menuDefinition.append( prefix + "/Select Connected/Outputs", { "command" : selectOutputs, "active" : __selectionAvailable } )
menuDefinition.append( prefix + "/Select Connected/Add Outputs", { "command" : selectAddOutputs, "active" : __selectionAvailable } )
## A function suitable as the command for an Edit/Undo menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def undo( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.undo()
## A function suitable as the command for an Edit/Redo menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def redo( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.redo()
## A function suitable as the command for an Edit/Cut menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def cut( menu ) :
script, parent = __scriptAndParent( menu )
with Gaffer.UndoContext( script ) :
script.cut( parent, script.selection() )
## A function suitable as the command for an Edit/Copy menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def copy( menu ) :
script, parent = __scriptAndParent( menu )
script.copy( parent, script.selection() )
## A function suitable as the command for an Edit/Paste menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def paste( menu ) :
script, parent = __scriptAndParent( menu )
originalSelection = Gaffer.StandardSet( iter( script.selection() ) )
with Gaffer.UndoContext( script ) :
script.paste( parent )
# try to get the new nodes connected to the original selection
nodeGraph = __nodeGraph( menu, focussedOnly=False )
if nodeGraph is None :
return
nodeGraph.graphGadget().getLayout().connectNodes( nodeGraph.graphGadget(), script.selection(), originalSelection )
# position the new nodes sensibly
bound = nodeGraph.bound()
mousePosition = GafferUI.Widget.mousePosition()
if bound.intersects( mousePosition ) :
fallbackPosition = mousePosition - bound.min
else :
fallbackPosition = bound.center() - bound.min
fallbackPosition = nodeGraph.graphGadgetWidget().getViewportGadget().rasterToGadgetSpace(
IECore.V2f( fallbackPosition.x, fallbackPosition.y ),
gadget = nodeGraph.graphGadget()
).p0
fallbackPosition = IECore.V2f( fallbackPosition.x, fallbackPosition.y )
nodeGraph.graphGadget().getLayout().positionNodes( nodeGraph.graphGadget(), script.selection(), fallbackPosition )
nodeGraph.frame( script.selection(), extend = True )
## A function suitable as the command for an Edit/Delete menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def delete( menu ) :
script, parent = __scriptAndParent( menu )
with Gaffer.UndoContext( script ) :
script.deleteNodes( parent, script.selection() )
## A function suitable as the command for an Edit/Find menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def find( menu ) :
script, parent = __scriptAndParent( menu )
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
try :
findDialogue = scriptWindow.__findDialogue
except AttributeError :
findDialogue = GafferUI.NodeFinderDialogue( parent )
scriptWindow.addChildWindow( findDialogue )
scriptWindow.__findDialogue = findDialogue
findDialogue.setScope( parent )
findDialogue.setVisible( True )
## A function suitable as the command for an Edit/Arrange menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def arrange( menu ) :
script, parent = __scriptAndParent( menu )
nodeGraph = __nodeGraph( menu, focussedOnly=False )
if not nodeGraph :
return
graph = nodeGraph.graphGadget()
nodes = script.selection()
if not nodes :
nodes = Gaffer.StandardSet( graph.getRoot().children( Gaffer.Node.staticTypeId() ) )
with Gaffer.UndoContext( script ) :
graph.getLayout().layoutNodes( graph, nodes )
## A function suitable as the command for an Edit/Select All menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAll( menu ) :
script, parent = __scriptAndParent( menu )
for c in parent.children( Gaffer.Node.staticTypeId() ) :
script.selection().add( c )
## A function suitable as the command for an Edit/Select None menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectNone( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
script.selection().clear()
## The command function for the default "Edit/Select Connected/Inputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectInputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
inputs = Gaffer.StandardSet()
for node in script.selection() :
__inputNodes( node, inputs )
selection = script.selection()
selection.clear()
for node in inputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Add Inputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAddInputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
inputs = Gaffer.StandardSet()
for node in script.selection() :
__inputNodes( node, inputs )
selection = script.selection()
for node in inputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Outputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectOutputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
outputs = Gaffer.StandardSet()
for node in script.selection() :
__outputNodes( node, outputs )
selection = script.selection()
selection.clear()
for node in outputs :
selection.add( node )
## The command function for the default "Edit/Select Connected/Add Outputs" menu item. It must
# be invoked from a menu that has a ScriptWindow in its ancestry.
def selectAddOutputs( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
outputs = Gaffer.StandardSet()
for node in script.selection() :
__outputNodes( node, outputs )
selection = script.selection()
for node in outputs :
selection.add( node )
def __selectionAvailable( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
return True if scriptWindow.scriptNode().selection().size() else False
def __pasteAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
root = scriptNode.ancestor( Gaffer.ApplicationRoot.staticTypeId() )
return isinstance( root.getClipboardContents(), IECore.StringData )
def __nodeGraph( menu, focussedOnly=True ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
nodeGraph = None
## \todo Does this belong as a Window.focussedChild() method?
focusWidget = GafferUI.Widget._owner( scriptWindow._qtWidget().focusWidget() )
if focusWidget is not None :
nodeGraph = focusWidget.ancestor( GafferUI.NodeGraph )
if nodeGraph is not None or focussedOnly :
return nodeGraph
nodeGraphs = scriptWindow.getLayout().editors( GafferUI.NodeGraph )
return nodeGraphs[0] if nodeGraphs else None
def __scriptAndParent( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
script = scriptWindow.scriptNode()
nodeGraph = __nodeGraph( menu )
if nodeGraph is not None :
parent = nodeGraph.graphGadget().getRoot()
else :
parent = script
return script, parent
def __undoAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
return scriptNode.undoAvailable()
def __redoAvailable( menu ) :
scriptNode = menu.ancestor( GafferUI.ScriptWindow ).scriptNode()
return scriptNode.redoAvailable()
def __inputNodes( node, inputNodes ) :
def __walkPlugs( parent ) :
for plug in parent :
if isinstance( plug, Gaffer.Plug ) :
inputPlug = plug.getInput()
if inputPlug is not None :
inputNode = inputPlug.node()
if inputNode is not None and not inputNode.isSame( node ) :
inputNodes.add( inputNode )
else :
__walkPlugs( plug )
__walkPlugs( node )
def __outputNodes( node, outputNodes ) :
def __walkPlugs( parent ) :
for plug in parent :
if isinstance( plug, Gaffer.Plug ) :
outputPlugs = plug.outputs()
if outputPlugs :
for outputPlug in outputPlugs :
outputNode = outputPlug.node()
if outputNode is not None and not outputNode.isSame( node ) :
outputNodes.add( outputNode )
else :
__walkPlugs( plug )
__walkPlugs( node )
|
|
from collections import defaultdict
import hashlib
import json
import logging
from pprint import pformat
import random
import sys
from threading import Thread
import traceback
import xmlrpclib
import gnupg
import obelisk
from bitcoin.main import privkey_to_pubkey, random_key
from pysqlcipher.dbapi2 import OperationalError, DatabaseError
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.ioloop import PeriodicCallback
import connection
from crypto_util import Cryptor
from dht import DHT
import network_util
from protocol import proto_response_pubkey
class TransportLayer(object):
"""TransportLayer manages a list of peers."""
def __init__(self, ob_ctx, guid, nickname=None):
self.peers = {}
self.callbacks = defaultdict(list)
self.timeouts = []
self.port = ob_ctx.server_port
self.ip = ob_ctx.server_ip
self.guid = guid
self.market_id = ob_ctx.market_id
self.nickname = nickname
self.handler = None
self.uri = network_util.get_peer_url(self.ip, self.port)
self.listener = None
# Create one ZeroMQ context to be reused and reduce overhead
self.ctx = zmq.Context.instance()
self.log = logging.getLogger(
'[%s] %s' % (ob_ctx.market_id, self.__class__.__name__)
)
def add_callbacks(self, callbacks):
for section, callback in callbacks:
self.callbacks[section] = []
self.add_callback(section, callback)
def set_websocket_handler(self, handler):
self.handler = handler
def add_callback(self, section, callback):
if callback not in self.callbacks[section]:
self.callbacks[section].append(callback)
def trigger_callbacks(self, section, *data):
"""Run all callbacks in specified section."""
for cb in self.callbacks[section]:
if cb['validator_cb'](*data):
cb['cb'](*data)
# Run all callbacks registered under the 'all' section. Don't duplicate
# calls if the specified section was 'all'.
if not section == 'all':
for cb in self.callbacks['all']:
if cb['validator_cb'](*data):
cb['cb'](*data)
class CryptoTransportLayer(TransportLayer):
def __init__(self, ob_ctx, db):
self.ob_ctx = ob_ctx
self.log = logging.getLogger(
'[%s] %s' % (ob_ctx.market_id, self.__class__.__name__)
)
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
self.db = db
self.bitmessage_api = None
if (ob_ctx.bm_user, ob_ctx.bm_pass, ob_ctx.bm_port) != (None, None, -1):
if not self._connect_to_bitmessage():
self.log.info('Bitmessage not installed or started')
self.market_id = ob_ctx.market_id
self.nick_mapping = {}
self.uri = network_util.get_peer_url(ob_ctx.server_ip, ob_ctx.server_port)
self.ip = ob_ctx.server_ip
self.nickname = ""
self.dev_mode = ob_ctx.dev_mode
self.all_messages = (
'hello',
'findNode',
'findNodeResponse',
'store'
)
self._setup_settings()
ob_ctx.market_id = self.market_id
self.dht = DHT(self, self.market_id, self.settings, self.db)
TransportLayer.__init__(self, ob_ctx, self.guid, self.nickname)
self.start_listener()
if ob_ctx.enable_ip_checker and not ob_ctx.seed_mode and not ob_ctx.dev_mode:
self.start_ip_address_checker()
def start_listener(self):
self.add_callbacks([
(
msg,
{
'cb': getattr(self, 'on_%s' % msg),
'validator_cb': getattr(self, 'validate_on_%s' % msg)
}
)
for msg in self.all_messages
])
self.listener = connection.CryptoPeerListener(
self.ip, self.port, self.pubkey, self.secret, self.ctx,
self.guid,
self._on_message
)
self.listener.set_ok_msg({
'type': 'ok',
'senderGUID': self.guid,
'pubkey': self.pubkey,
'senderNick': self.nickname
})
self.listener.listen()
def start_ip_address_checker(self):
'''Checks for possible public IP change'''
if self.ob_ctx.enable_ip_checker:
self.caller = PeriodicCallback(self._ip_updater_periodic_callback, 5000, ioloop.IOLoop.instance())
self.caller.start()
self.log.info("IP_CHECKER_ENABLED: Periodic IP Address Checker started.")
def _ip_updater_periodic_callback(self):
if self.ob_ctx.enable_ip_checker:
new_ip = network_util.get_my_ip()
if not new_ip or new_ip == self.ip:
return
self.ob_ctx.server_ip = new_ip
self.ip = new_ip
if self.listener is not None:
self.listener.set_ip_address(new_ip)
self.dht._iterativeFind(self.guid, [], 'findNode')
def save_peer_to_db(self, peer_tuple):
uri = peer_tuple[0]
pubkey = peer_tuple[1]
guid = peer_tuple[2]
nickname = peer_tuple[3]
# Update query
self.db.deleteEntries("peers", {"uri": uri, "guid": guid}, "OR")
if guid is not None:
self.db.insertEntry("peers", {
"uri": uri,
"pubkey": pubkey,
"guid": guid,
"nickname": nickname,
"market_id": self.market_id
})
def _connect_to_bitmessage(self):
# Get bitmessage going
# First, try to find a local instance
result = False
bm_user = self.ob_ctx.bm_user
bm_pass = self.ob_ctx.bm_pass
bm_port = self.ob_ctx.bm_port
try:
self.log.info(
'[_connect_to_bitmessage] Connecting to Bitmessage on port %s',
bm_port
)
self.bitmessage_api = xmlrpclib.ServerProxy(
"http://{}:{}@localhost:{}/".format(bm_user, bm_pass, bm_port),
verbose=0
)
result = self.bitmessage_api.add(2, 3)
self.log.info(
"[_connect_to_bitmessage] Bitmessage API is live: %s",
result
)
# If we failed, fall back to starting our own
except Exception as e:
self.log.info("Failed to connect to bitmessage instance: %s", e)
self.bitmessage_api = None
return result
def validate_on_hello(self, msg):
self.log.debug('Validating ping message.')
return True
def on_hello(self, msg):
self.log.info('Pinged %s', json.dumps(msg, ensure_ascii=False))
def validate_on_store(self, msg):
self.log.debug('Validating store value message.')
return True
def on_store(self, msg):
self.dht._on_storeValue(msg)
def validate_on_findNode(self, msg):
self.log.debug('Validating find node message.')
return True
def on_findNode(self, msg):
self.dht.on_find_node(msg)
def validate_on_findNodeResponse(self, msg):
self.log.debug('Validating find node response message.')
return True
def on_findNodeResponse(self, msg):
self.dht.on_findNodeResponse(self, msg)
def _setup_settings(self):
try:
self.settings = self.db.selectEntries("settings", {"market_id": self.market_id})
except (OperationalError, DatabaseError) as e:
print e
raise SystemExit("database file %s corrupt or empty - cannot continue" % self.db.db_path)
if len(self.settings) == 0:
self.settings = {"market_id": self.market_id, "welcome": "enable"}
self.db.insertEntry("settings", self.settings)
else:
self.settings = self.settings[0]
# Generate PGP key during initial setup or if previous PGP gen failed
if not self.settings.get('PGPPubKey'):
try:
self.log.info('Generating PGP keypair. This may take several minutes...')
print 'Generating PGP keypair. This may take several minutes...'
gpg = gnupg.GPG()
input_data = gpg.gen_key_input(key_type="RSA",
key_length=2048,
name_email='[email protected]',
name_comment="Autogenerated by Open Bazaar",
passphrase="P@ssw0rd")
assert input_data is not None
key = gpg.gen_key(input_data)
assert key is not None
pubkey_text = gpg.export_keys(key.fingerprint)
newsettings = {"PGPPubKey": pubkey_text, "PGPPubkeyFingerprint": key.fingerprint}
self.db.updateEntries("settings", newsettings, {"market_id": self.market_id})
self.settings.update(newsettings)
self.log.info('PGP keypair generated.')
except Exception as e:
sys.exit("Encountered a problem with GPG: %s" % e)
if not self.settings.get('pubkey'):
# Generate Bitcoin keypair
self._generate_new_keypair()
if not self.settings.get('nickname'):
newsettings = {'nickname': 'Default'}
self.db.updateEntries('settings', newsettings, {"market_id": self.market_id})
self.settings.update(newsettings)
self.nickname = self.settings.get('nickname', '')
self.secret = self.settings.get('secret', '')
self.pubkey = self.settings.get('pubkey', '')
self.privkey = self.settings.get('privkey')
self.btc_pubkey = privkey_to_pubkey(self.privkey)
self.guid = self.settings.get('guid', '')
self.sin = self.settings.get('sin', '')
self.bitmessage = self.settings.get('bitmessage', '')
if not self.settings.get('bitmessage'):
# Generate Bitmessage address
if self.bitmessage_api is not None:
self._generate_new_bitmessage_address()
self.cryptor = Cryptor(pubkey_hex=self.pubkey, privkey_hex=self.secret)
# In case user wants to override with command line passed bitmessage values
if self.ob_ctx.bm_user is not None and \
self.ob_ctx.bm_pass is not None and \
self.ob_ctx.bm_port is not None:
self._connect_to_bitmessage()
def _generate_new_keypair(self):
secret = str(random.randrange(2 ** 256))
self.secret = hashlib.sha256(secret).hexdigest()
self.pubkey = privkey_to_pubkey(self.secret)
self.privkey = random_key()
self.btc_pubkey = privkey_to_pubkey(self.privkey)
print 'PUBLIC KEY: ', self.btc_pubkey
# Generate SIN
sha_hash = hashlib.sha256()
sha_hash.update(self.pubkey)
ripe_hash = hashlib.new('ripemd160')
ripe_hash.update(sha_hash.digest())
self.guid = ripe_hash.hexdigest()
self.sin = obelisk.EncodeBase58Check('\x0F\x02%s' % ripe_hash.digest())
newsettings = {
"secret": self.secret,
"pubkey": self.pubkey,
"privkey": self.privkey,
"guid": self.guid,
"sin": self.sin
}
self.db.updateEntries("settings", newsettings, {"market_id": self.market_id})
self.settings.update(newsettings)
def _generate_new_bitmessage_address(self):
# Use the guid generated previously as the key
self.bitmessage = self.bitmessage_api.createRandomAddress(
self.guid.encode('base64'),
False,
1.05,
1.1111
)
newsettings = {"bitmessage": self.bitmessage}
self.db.updateEntries("settings", newsettings, {"market_id": self.market_id})
self.settings.update(newsettings)
def join_network(self, seeds=None, callback=None):
if seeds is None:
seeds = []
self.log.info('Joining network')
# Connect up through seed servers
for idx, seed in enumerate(seeds):
seeds[idx] = network_util.get_peer_url(seed, "12345")
# Connect to persisted peers
db_peers = self.get_past_peers()
known_peers = list(set(seeds).union(db_peers))
for known_peer in known_peers:
self.dht.add_peer(self, known_peer)
# Populate routing table by searching for self
if known_peers:
# Check every one second if we are connected
# We could use a PeriodicCallback but I think this is simpler
# since this will be repeated in most cases less than 10 times
def join_callback():
# If we are not connected to any node, reschedule a check
if not self.dht.activePeers:
ioloop.IOLoop.instance().call_later(1, join_callback)
else:
self.search_for_my_node()
join_callback()
if callback is not None:
callback('Joined')
def get_past_peers(self):
result = self.db.selectEntries("peers", {"market_id": self.market_id})
return [peer['uri'] for peer in result]
def search_for_my_node(self):
self.log.info('Searching for myself')
self.dht._iterativeFind(self.guid, self.dht.knownNodes, 'findNode')
def get_crypto_peer(self, guid=None, uri=None, pubkey=None, nickname=None):
if guid == self.guid:
self.log.error('Cannot get CryptoPeerConnection for your own node')
return
self.log.debug(
'Getting CryptoPeerConnection'
'\nGUID: %s'
'\nURI: %s'
'\nPubkey:%s'
'\nNickname:%s',
guid, uri, pubkey, nickname
)
return connection.CryptoPeerConnection(
self, uri, pubkey, guid=guid, nickname=nickname
)
def respond_pubkey_if_mine(self, nickname, ident_pubkey):
if ident_pubkey != self.pubkey:
self.log.info("Public key does not match your identity")
return
# Return signed pubkey
pubkey = self.cryptor.pubkey # XXX: A Cryptor does not have such a field.
ec_key = obelisk.EllipticCurveKey()
ec_key.set_secret(self.secret)
digest = obelisk.Hash(pubkey)
signature = ec_key.sign(digest)
# Send array of nickname, pubkey, signature to transport layer
self.send(proto_response_pubkey(nickname, pubkey, signature))
def send(self, data, send_to=None, callback=None):
self.log.debug("Outgoing Data: %s %s", data, send_to)
# Directed message
if send_to is not None:
peer = self.dht.routingTable.getContact(send_to)
if peer is None:
for activePeer in self.dht.activePeers:
if activePeer.guid == send_to:
peer = activePeer
break
if peer:
self.log.debug('Directed Data (%s): %s', send_to, data)
try:
peer.send(data, callback=callback)
except Exception as e:
self.log.error('Not sending message directly to peer %s', e)
else:
self.log.error('No peer found')
else:
# FindKey and then send
for peer in self.dht.activePeers:
try:
routing_peer = self.dht.routingTable.getContact(peer.guid)
if routing_peer is None:
self.dht.routingTable.addContact(peer)
routing_peer = peer
data['senderGUID'] = self.guid
data['pubkey'] = self.pubkey
def cb(msg):
self.log.debug('Message Back: \n%s', pformat(msg))
routing_peer.send(data, cb)
except Exception:
self.log.info("Error sending over peer!")
traceback.print_exc()
def _on_message(self, msg):
# here goes the application callbacks
# we get a "clean" msg which is a dict holding whatever
pubkey = msg.get('pubkey')
uri = msg.get('uri')
guid = msg.get('senderGUID')
nickname = msg.get('senderNick')[:120]
self.log.info('On Message: %s', json.dumps(msg, ensure_ascii=False))
self.dht.add_peer(self, uri, pubkey, guid, nickname)
t = Thread(target=self.trigger_callbacks, args=(msg['type'], msg,))
t.start()
def store(self, *args, **kwargs):
"""
Store or republish data.
Refer to the dht module (iterativeStore()) for further details.
"""
self.dht.iterativeStore(*args, **kwargs)
def shutdown(self):
print "CryptoTransportLayer.shutdown()!"
print "Notice: explicit DHT Shutdown not implemented."
try:
if self.bitmessage_api is not None:
self.bitmessage_api.close()
except Exception as e:
# It might not even be open; we can't do much more on our
# way out if exception is thrown here.
self.log.error(
"Could not shutdown bitmessage_api's ServerProxy: %s", e.message
)
|
|
#!/usr/bin/env pypy
import math
perm = [151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180,
151,160,137,91,90,15,
131,13,201,95,96,53,194,233,7,225,140,36,103,30,69,142,8,99,37,240,21,10,23,
190, 6,148,247,120,234,75,0,26,197,62,94,252,219,203,117,35,11,32,57,177,33,
88,237,149,56,87,174,20,125,136,171,168, 68,175,74,165,71,134,139,48,27,166,
77,146,158,231,83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,
102,143,54, 65,25,63,161, 1,216,80,73,209,76,132,187,208, 89,18,169,200,196,
135,130,116,188,159,86,164,100,109,198,173,186, 3,64,52,217,226,250,124,123,
5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,58,17,182,189,28,42,
223,183,170,213,119,248,152, 2,44,154,163, 70,221,153,101,155,167, 43,172,9,
129,22,39,253, 19,98,108,110,79,113,224,232,178,185, 112,104,218,246,97,228,
251,34,242,193,238,210,144,12,191,179,162,241, 81,51,145,235,249,14,239,107,
49,192,214, 31,181,199,106,157,184, 84,204,176,115,121,50,45,127, 4,150,254,
138,236,205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180]
grad3 = [[0,1,1],[0,1,-1],[0,-1,1],[0,-1,-1],
[1,0,1],[1,0,-1],[-1,0,1],[-1,0,-1],
[1,1,0],[1,-1,0],[-1,1,0],[-1,-1,0], # 12 cube edges
[1,0,-1],[-1,0,-1],[0,-1,1],[0,1,1]] # 4 more to make 16
simplex4 = [[0,64,128,192],[0,64,192,128],[0,0,0,0],
[0,128,192,64],[0,0,0,0],[0,0,0,0],[0,0,0,0],[64,128,192,0],
[0,128,64,192],[0,0,0,0],[0,192,64,128],[0,192,128,64],
[0,0,0,0],[0,0,0,0],[0,0,0,0],[64,192,128,0],
[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[64,128,0,192],[0,0,0,0],[64,192,0,128],[0,0,0,0],
[0,0,0,0],[0,0,0,0],[128,192,0,64],[128,192,64,0],
[64,0,128,192],[64,0,192,128],[0,0,0,0],[0,0,0,0],
[0,0,0,0],[128,0,192,64],[0,0,0,0],[128,64,192,0],
[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[128,0,64,192],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[192,0,64,128],[192,0,128,64],[0,0,0,0],[192,64,128,0],
[128,64,0,192],[0,0,0,0],[0,0,0,0],[0,0,0,0],
[192,64,0,128],[0,0,0,0],[192,128,0,64],[192,128,64,0]]
def step(a, b):
return 0 if (a > b) else 1;
def dot(g, x, y, z):
return g[0]*x + g[1]*y + g[2]*z;
def perm3(x, y, z):
i = perm[(x + perm[y]) & 0xFF]
return perm[(i + perm[z]) & 0xFF]
def simplex(x, y, z):
# Simple skewing factors for the 3D case
F3 = 0.333333333;
G3 = 0.166666667;
#float n0, n1, n2, n3; /* Noise contributions from the four corners */
# var n0 : float;
# var n1 : float;
# var n2 : float;
# var n3 : float;
# Skew the input space to determine which simplex cell we're in
s = (x+y+z)*F3; # Very nice and simple skew factor for 3D
i = int(math.floor(x + s));
j = int(math.floor(y + s));
k = int(math.floor(z + s));
t = (i+j+k)*G3;
X0 = i-t; # Unskew the cell origin back to (x,y,z) space
Y0 = j-t;
Z0 = k-t;
x0 = x-X0; # The x,y,z distances from the cell origin
y0 = y-Y0;
z0 = z-Z0;
# For the 3D case, the simplex shape is a slightly irregular tetrahedron.
# Determine which simplex we are in.
#int i1, j1, k1; /* Offsets for second corner of simplex in (i,j,k) coords */
#int i2, j2, k2; /* Offsets for third corner of simplex in (i,j,k) coords */
# var i1 : int;
# var j1 : int;
# var k1 : int;
# var i2 : int;
# var j2 : int;
# var k2 : int;
# /*int c1 = x>y ? 32 : 0;
# int c2 = x>z ? 16 : 0;
# int c3 = y>z ? 8 : 0;
# int offset = c1+c2+c3;
# i1 = step(96, simplex4[offset][0]);
# j1 = step(96, simplex4[offset][1]);
# k1 = step(96, simplex4[offset][2]);
# i2 = step(32, simplex4[offset][0]);
# j2 = step(32, simplex4[offset][1]);
# k2 = step(32, simplex4[offset][2]);*/
# This code would benefit from a backport from the GLSL version!
if x0>=y0:
if y0>=z0:
i1=1; j1=0; k1=0; i2=1; j2=1; k2=0; # X Y Z order
elif x0>=z0:
i1=1; j1=0; k1=0; i2=1; j2=0; k2=1; # X Z Y order
else:
i1=0; j1=0; k1=1; i2=1; j2=0; k2=1; # // Z X Y order
else:
if y0<z0:
i1=0; j1=0; k1=1; i2=0; j2=1; k2=1; # Z Y X order
elif x0<z0:
i1=0; j1=1; k1=0; i2=0; j2=1; k2=1; # Y Z X order
else:
i1=0; j1=1; k1=0; i2=1; j2=1; k2=0; # Y X Z order
# A step of (1,0,0) in (i,j,k) means a step of (1-c,-c,-c) in (x,y,z),
# a step of (0,1,0) in (i,j,k) means a step of (-c,1-c,-c) in (x,y,z), and
# a step of (0,0,1) in (i,j,k) means a step of (-c,-c,1-c) in (x,y,z), where
# c = 1/6.
x1 = x0 - i1 + G3; # Offsets for second corner in (x,y,z) coords
y1 = y0 - j1 + G3;
z1 = z0 - k1 + G3;
x2 = x0 - i2 + 2.0*G3; # Offsets for third corner in (x,y,z) coords
y2 = y0 - j2 + 2.0*G3;
z2 = z0 - k2 + 2.0*G3;
x3 = x0 - 1.0 + 3.0*G3; # Offsets for last corner in (x,y,z) coords
y3 = y0 - 1.0 + 3.0*G3;
z3 = z0 - 1.0 + 3.0*G3;
# Wrap the integer indices at 256, to avoid indexing perm[] out of bounds
ii = i % 256;
jj = j % 256;
kk = k % 256;
if ii < 0:
ii += 256;
if jj < 0:
jj += 256;
if kk < 0:
kk += 256;
gi0 = perm3(ii, jj, kk) % 12;
gi1 = perm3(ii+i1, jj+j1, kk+k1) % 12;
gi2 = perm3(ii+i2, jj+j2, kk+k2) % 12;
gi3 = perm3(ii+1, jj+1, kk+1) % 12;
# Calculate the contribution from the four corners
t0 = 0.6 - x0*x0 - y0*y0 - z0*z0;
if t0 < 0.0:
n0 = 0.0;
else:
t0 *= t0;
#n0 = t0 * t0 * grad(perm[ii+perm[jj+perm[kk]]], x0, y0, z0);
n0 = t0 * t0 * dot(grad3[gi0], x0, y0, z0);
t1 = 0.6 - x1*x1 - y1*y1 - z1*z1;
if t1 < 0.0:
n1 = 0.0;
else:
t1 *= t1;
#n1 = t1 * t1 * grad(perm[ii+i1+perm[jj+j1+perm[kk+k1]]], x1, y1, z1);
n1 = t1 * t1 * dot(grad3[gi1], x1, y1, z1);
t2 = 0.6 - x2*x2 - y2*y2 - z2*z2;
if t2 < 0.0:
n2 = 0.0;
else:
t2 *= t2;
#n2 = t2 * t2 * grad(perm[ii+i2+perm[jj+j2+perm[kk+k2]]], x2, y2, z2);
n2 = t2 * t2 * dot(grad3[gi2], x2, y2, z2);
t3 = 0.6 - x3*x3 - y3*y3 - z3*z3;
if t3<0.0:
n3 = 0.0;
else:
t3 *= t3;
#n3 = t3 * t3 * grad(perm[ii+1+perm[jj+1+perm[kk+1]]], x3, y3, z3);
n3 = t3 * t3 * dot(grad3[gi3], x3, y3, z3);
# Add contributions from each corner to get the final noise value.
# The result is scaled to stay just inside [-1,1]
return 32.0 * (n0 + n1 + n2 + n3); # TODO: The scale factor is preliminary!
def fractal_brownian_motion(octaves, x, y, z, lacunarity, gain):
#const double lacunarity = 1.9;
#const double gain = 0.65;
sum = 0.0
amplitude = 1.0
for i in range(octaves):
sum += amplitude * simplex(x, y, z)
amplitude *= gain
x *= lacunarity
y *= lacunarity
z *= lacunarity
return sum
def ridged_multifractal(octaves, x, y, z, lacunarity, gain, offset, h, weight, freq):
#const double lacunarity = 2.0;
#const double gain = 2.0;
#const double offset = 1.0;
#const double h = 1.0;
signal = 0.0
value = 0.0
#double weight = 1.0;
#double freq = 1.0;
modulate = math.pow(freq, -h)
for i in range(octaves):
signal = (offset - abs(simplex(x, y, z)))
signal *= signal * weight
weight = signal*gain
weight = max(0.0, min(1.0, weight))
value += signal * modulate
freq *= lacunarity
x *= lacunarity
y *= lacunarity
z *= lacunarity
return value * 0.5
if __name__ == '__main__':
for i in range(100000):
print ridged_multifractal(8, i/100000.0, 0.5, 0.5, 2, 2, 1, 0.5, 1, 1)
|
|
# :[diStorm64}: Python binding
# Copyright (c) 2009, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
info = (
":[diStorm64}: by Gil Dabah, http://ragestorm.net/distorm/\n"
"Python binding by Mario Vilas, http://breakingcode.wordpress.com/\n"
)
__revision__ = "$Id: __init__.py 376 2009-08-24 16:42:29Z QvasiModo $"
__all__ = [
'Decode',
'DecodeGenerator',
'Decode16Bits',
'Decode32Bits',
'Decode64Bits',
]
from ctypes import *
from exceptions import *
from os.path import split, join
#==============================================================================
# Load the diStorm library
SUPPORT_64BIT_OFFSET = True
_OffsetType = c_ulonglong
try:
_distorm_path = split(__file__)[0]
_distorm_file = join(_distorm_path, 'distorm64.dll')
_distorm = cdll.LoadLibrary(_distorm_file)
except OSError:
raise ImportError, "Error loading diStorm: dynamic link library not found"
try:
distorm_decode = _distorm.distorm_decode64
distorm_version = _distorm.distorm_version
except AttributeError:
raise ImportError, "Error loading diStorm: exported function not found"
#==============================================================================
# diStorm C interface
distorm_version.argtypes = []
distorm_version.restype = c_uint
DISTORM_VER = distorm_version()
MAX_TEXT_SIZE = 60
MAX_INSTRUCTIONS = 1000
DECRES_NONE = 0
DECRES_SUCCESS = 1
DECRES_MEMORYERR = 2
DECRES_INPUTERR = 3
_DecodeType = c_uint
_DecodeResult = c_uint
class _WString (Structure):
_fields_ = [
('length', c_uint), # unused
('p', c_char * MAX_TEXT_SIZE),
]
class _DecodedInst (Structure):
_fields_ = [
('mnemonic', _WString),
('operands', _WString),
('instructionHex', _WString),
('size', c_uint),
('offset', _OffsetType),
]
distorm_decode.restype = _DecodeResult
distorm_decode.argtypes = [
_OffsetType, # codeOffset
c_void_p, # code
c_int, # codeLen
_DecodeType, # dt
POINTER(_DecodedInst), # result
c_uint, # maxInstructions
POINTER(c_uint) # usedInstructionsCount
]
#==============================================================================
# diStorm Python interface
Decode16Bits = 0 # 80286 decoding
Decode32Bits = 1 # IA-32 decoding
Decode64Bits = 2 # AMD64 decoding
OffsetTypeSize = sizeof(_OffsetType) * 8 # XXX why 8 ???
Version = (
(DISTORM_VER & 0xFF0000) >> 16,
(DISTORM_VER & 0xFF00) >> 8,
DISTORM_VER & 0xFF,
)
def DecodeGenerator(codeOffset, code, dt = Decode32Bits):
"""
@type codeOffset: long
@param codeOffset: Memory address where the code is located.
This is B{not} an offset into the code!
It's the actual memory address where it was read from.
@type code: str
@param code: Code to disassemble.
@type dt: int
@param dt: Disassembly type. Can be one of the following:
* L{Decode16Bits}: 80286 decoding
* L{Decode32Bits}: IA-32 decoding
* L{Decode64Bits}: AMD64 decoding
@rtype: generator of tuple( long, int, str, str )
@return: Generator of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise ValueError: Invalid arguments.
"""
# Sanitize the code parameter.
code = str(code)
# Stop the iteration if there's no code to disassemble.
if code == '':
return
# Sanitize the codeOffset parameter.
if not codeOffset:
codeOffset = 0
# Check the validity of the decode type.
if dt not in (Decode16Bits, Decode32Bits, Decode64Bits):
raise ValueError, "Invalid decode type value: %r" % (dt,)
# Prepare input buffer.
codeLen = len(code) # total bytes to disassemble
code = create_string_buffer(code) # allocate code buffer
p_code = addressof(code) # pointer to code buffer
# Prepare output buffer.
l_result = MAX_INSTRUCTIONS # length of output array
result = (_DecodedInst * l_result)() # allocate output array
p_result = pointer(result) # pointer to output array
p_result = cast(p_result, POINTER(_DecodedInst))
# Prepare used instructions counter.
usedInstructionsCount = c_uint(0)
p_usedInstructionsCount = byref(usedInstructionsCount)
# Loop while we have code left to disassemble.
while codeLen > 0:
# Call the decode function.
status = distorm_decode(codeOffset, p_code, min(codeLen, l_result), dt,
p_result, l_result, p_usedInstructionsCount)
if status == DECRES_INPUTERR:
raise ValueError, "Invalid arguments passed to distorm_decode()"
if status == DECRES_MEMORYERR:
raise MemoryError, "Not enough memory to disassemble"
used = usedInstructionsCount.value
if not used:
break
## raise AssertionError, "Internal error while disassembling"
# Yield each decoded instruction but the last one.
for index in xrange(used - 1):
di = result[index]
asm = '%s %s' % (di.mnemonic.p, di.operands.p)
pydi = ( di.offset, di.size, asm, di.instructionHex.p )
yield pydi
# Continue decoding from the last instruction found.
# This prevents truncating the last instruction.
# If there are no more instructions to decode, yield
# the last one and stop the iteration.
di = result[used - 1]
delta = di.offset - codeOffset
if delta <= 0:
asm = '%s %s' % (di.mnemonic.p, di.operands.p)
pydi = ( di.offset, di.size, asm, di.instructionHex.p )
yield pydi
break
codeOffset = codeOffset + delta
p_code = p_code + delta
codeLen = codeLen - delta
# Reset the used instructions counter.
usedInstructionsCount.value = 0
def Decode(offset, code, type = Decode32Bits):
"""
@type offset: long
@param offset: Memory address where the code is located.
This is B{not} an offset into the code!
It's the actual memory address where it was read from.
@type code: str
@param code: Code to disassemble.
@type type: int
@param type: Disassembly type. Can be one of the following:
* L{Decode16Bits}: 80286 decoding
* L{Decode32Bits}: IA-32 decoding
* L{Decode64Bits}: AMD64 decoding
@rtype: list of tuple( long, int, str, str )
@return: List of tuples. Each tuple represents an assembly instruction
and contains:
- Memory address of instruction.
- Size of instruction in bytes.
- Disassembly line of instruction.
- Hexadecimal dump of instruction.
@raise ValueError: Invalid arguments.
"""
return list( DecodeGenerator(offset, code, type) )
|
|
#!/usr/bin/env python
"""AFF4 RDFValue implementations.
This module contains all RDFValue implementations.
NOTE: This module uses the class registry to contain all implementations of
RDFValue class, regardless of where they are defined. To do this reliably, these
implementations must be imported _before_ the relevant classes are referenced
from this module.
"""
import abc
import calendar
import collections
import datetime
import functools
import posixpath
import re
import time
import zlib
import dateutil
from dateutil import parser
import logging
from grr.lib import registry
from grr.lib import utils
from grr.proto import jobs_pb2
# Factor to convert from seconds to microseconds
MICROSECONDS = 1000000
# Somewhere to keep all the late binding placeholders.
_LATE_BINDING_STORE = {}
def RegisterLateBindingCallback(target_name, callback, **kwargs):
"""Registers a callback to be invoked when the RDFValue named is declared."""
_LATE_BINDING_STORE.setdefault(target_name, []).append((callback, kwargs))
class Error(Exception):
"""Errors generated by RDFValue parsers."""
class InitializeError(Error):
"""Raised when we can not initialize from this parameter."""
class DecodeError(InitializeError, ValueError):
"""Generated when we can not decode the data."""
def __init__(self, msg):
logging.debug(msg)
super(DecodeError, self).__init__(msg)
class RDFValueMetaclass(registry.MetaclassRegistry):
"""A metaclass for managing semantic values."""
def __init__(cls, name, bases, env_dict): # pylint: disable=no-self-argument
super(RDFValueMetaclass, cls).__init__(name, bases, env_dict)
# Run and clear any late binding callbacks registered for this class.
for callback, kwargs in _LATE_BINDING_STORE.pop(name, []):
callback(target=cls, **kwargs)
class RDFValue(object):
"""Baseclass for values.
RDFValues are serialized to and from the data store.
"""
__metaclass__ = RDFValueMetaclass
# This is how the attribute will be serialized to the data store. It must
# indicate both the type emitted by SerializeToDataStore() and expected by
# ParseFromDataStore()
data_store_type = "bytes"
# URL pointing to a help page about this value type.
context_help_url = None
_age = 0
# Mark as dirty each time we modify this object.
dirty = False
# If this value was created as part of an AFF4 attribute, the attribute is
# assigned here.
attribute_instance = None
def __init__(self, initializer=None, age=None):
"""Constructor must be able to take no args.
Args:
initializer: Optional parameter to construct from.
age: The age of this entry as an RDFDatetime. If not provided, create a
new instance.
Raises:
InitializeError: if we can not be initialized from this parameter.
"""
# Default timestamp is now.
if age is None:
age = RDFDatetime(age=0)
self._age = age
# Allow an RDFValue to be initialized from an identical RDFValue.
if initializer.__class__ == self.__class__:
self.ParseFromString(initializer.SerializeToString())
elif initializer is not None:
self.ParseFromString(initializer)
def Copy(self):
"""Make a new copy of this RDFValue."""
return self.__class__(initializer=self.SerializeToString())
def __copy__(self):
return self.Copy()
@property
def age(self):
if self._age.__class__ is not RDFDatetime:
self._age = RDFDatetime(self._age, age=0)
return self._age
@age.setter
def age(self, value):
"""When assigning to this attribute it must be an RDFDatetime."""
self._age = RDFDatetime(value, age=0)
def ParseFromDataStore(self, data_store_obj):
"""Serialize from an object read from the datastore."""
return self.ParseFromString(data_store_obj)
@abc.abstractmethod
def ParseFromString(self, string):
"""Given a string, parse ourselves from it."""
pass
def SerializeToDataStore(self):
"""Serialize to a datastore compatible form."""
return self.SerializeToString()
@abc.abstractmethod
def SerializeToString(self):
"""Serialize into a string which can be parsed using ParseFromString."""
def __getstate__(self):
"""Support the pickle protocol."""
# __pickled_rdfvalue is used to mark RDFValues pickled via the new way.
return dict(__pickled_rdfvalue=True,
age=int(self.age),
data=self.SerializeToString())
def __setstate__(self, data):
"""Support the pickle protocol."""
if "__pickled_rdfvalue" in data:
self.ParseFromString(data["data"])
self.age = RDFDatetime(data["age"])
else:
self.__dict__ = data
def AsProto(self):
"""Serialize into an RDFValue protobuf."""
return jobs_pb2.EmbeddedRDFValue(age=int(self.age),
name=self.__class__.__name__,
data=self.SerializeToString())
def __iter__(self):
"""This allows every RDFValue to be iterated over."""
yield self
def __hash__(self):
return hash(self.SerializeToString())
def Summary(self):
"""Return a summary representation of the object."""
return str(self)
@classmethod
def Fields(cls):
"""Return a list of fields which can be queried from this value."""
return []
@staticmethod
def ContainsMatch(attribute, filter_implemention, regex):
return filter_implemention.PredicateContainsFilter(attribute, regex)
# The operators this type supports in the query language
operators = dict(contains=(1, "ContainsMatch"))
class RDFBytes(RDFValue):
"""An attribute which holds bytes."""
data_store_type = "bytes"
_value = ""
def ParseFromString(self, string):
# TODO(user): this needs some more test coverage, particularly around
# submitting unicode strings and byte literals in the UI forms.
if isinstance(string, unicode):
self._value = utils.SmartStr(string)
else:
self._value = string
def SerializeToString(self):
return self._value
def __str__(self):
return utils.SmartStr(self._value)
def __lt__(self, other):
if isinstance(other, self.__class__):
return self._value < other._value # pylint: disable=protected-access
else:
return self._value < other
def __gt__(self, other):
if isinstance(other, self.__class__):
return self._value > other._value # pylint: disable=protected-access
else:
return self._value > other
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._value == other._value # pylint: disable=protected-access
else:
return self._value == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._value)
def __bool__(self):
return bool(self._value)
def __nonzero__(self):
return bool(self._value)
def __len__(self):
return len(self._value)
class RDFZippedBytes(RDFBytes):
"""Zipped bytes sequence."""
def Uncompress(self):
if self:
return zlib.decompress(self._value)
else:
return ""
class RDFString(RDFBytes):
"""Represent a simple string."""
data_store_type = "string"
_value = u""
@staticmethod
def Startswith(attribute, filter_implemention, string):
return filter_implemention.PredicateContainsFilter(
attribute, "^" + utils.EscapeRegex(string))
operators = RDFValue.operators.copy()
operators["matches"] = (1, "ContainsMatch")
operators["="] = (1, "ContainsMatch")
operators["startswith"] = (1, "Startswith")
def format(self, *args, **kwargs): # pylint: disable=invalid-name
return self._value.format(*args, **kwargs)
def __unicode__(self):
return utils.SmartUnicode(self._value)
def __getitem__(self, value):
return self._value.__getitem__(value)
def ParseFromString(self, string):
# This handles the cases when we're initialized from Unicode strings.
self._value = utils.SmartStr(string)
def SerializeToString(self):
return utils.SmartStr(self._value)
def SerializeToDataStore(self):
return utils.SmartUnicode(self._value)
class HashDigest(RDFBytes):
"""Binary hash digest with hex string representation."""
data_store_type = "bytes"
def __str__(self):
return self._value.encode("hex")
def __eq__(self, other):
return (self._value == utils.SmartStr(other) or
self._value.encode("hex") == other)
def __ne__(self, other):
return not self.__eq__(other)
@functools.total_ordering
class RDFInteger(RDFString):
"""Represent an integer."""
data_store_type = "integer"
@staticmethod
def IsNumeric(value):
return isinstance(value, (int, long, float, RDFInteger))
def __init__(self, initializer=None, age=None):
super(RDFInteger, self).__init__(initializer=initializer, age=age)
if initializer is None:
self._value = 0
else:
self.ParseFromString(initializer)
def ParseFromString(self, string):
self._value = 0
if string:
try:
self._value = int(string)
except TypeError as e:
raise DecodeError(e)
def SerializeToDataStore(self):
"""Use varint to store the integer."""
return int(self._value)
def Set(self, value):
if isinstance(value, (long, int)):
self._value = value
else:
self.ParseFromString(value)
def __long__(self):
return long(self._value)
def __int__(self):
return int(self._value)
def __float__(self):
return float(self._value)
def __index__(self):
return self._value
def __eq__(self, other):
return self._value == other
def __lt__(self, other):
return self._value < other
def __and__(self, other):
return self._value & other
def __rand__(self, other):
return self._value & other
def __iand__(self, other):
self._value &= other
return self
def __or__(self, other):
return self._value | other
def __ror__(self, other):
return self._value | other
def __ior__(self, other):
self._value |= other
return self
def __add__(self, other):
return self._value + other
def __radd__(self, other):
return self._value + other
def __iadd__(self, other):
self._value += other
return self
def __sub__(self, other):
return self._value - other
def __rsub__(self, other):
return other - self._value
def __isub__(self, other):
self._value -= other
return self
def __mul__(self, other):
return self._value * other
def __rmul__(self, other):
return self._value * other
def __div__(self, other):
return self._value / other
def __rdiv__(self, other):
return other / self._value
@staticmethod
def LessThan(attribute, filter_implemention, value):
return filter_implemention.PredicateLessThanFilter(attribute, long(value))
@staticmethod
def GreaterThan(attribute, filter_implemention, value):
return filter_implemention.PredicateGreaterThanFilter(
attribute, long(value))
@staticmethod
def Equal(attribute, filter_implemention, value):
return filter_implemention.PredicateNumericEqualFilter(
attribute, long(value))
operators = {"<": (1, "LessThan"),
">": (1, "GreaterThan"),
"=": (1, "Equal")}
class RDFBool(RDFInteger):
"""Boolean value."""
data_store_type = "unsigned_integer"
class RDFDatetime(RDFInteger):
"""A date and time internally stored in MICROSECONDS."""
converter = MICROSECONDS
data_store_type = "unsigned_integer"
# A value of 0 means this object is not initialized.
_value = 0
def __init__(self, initializer=None, age=None):
super(RDFDatetime, self).__init__(None, age)
if isinstance(initializer, RDFInteger):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, (int, long, float)):
self._value = int(initializer)
elif isinstance(initializer, datetime.datetime):
seconds = calendar.timegm(initializer.utctimetuple())
self._value = (seconds * self.converter) + initializer.microsecond
elif isinstance(initializer, basestring):
try:
# Can be just a serialized integer.
self._value = int(initializer)
except ValueError:
if initializer:
# Try to parse from human readable string.
self.ParseFromHumanReadable(initializer)
elif initializer is not None:
raise InitializeError("Unknown initializer for RDFDateTime: %s." %
type(initializer))
def Now(self):
self._value = int(time.time() * self.converter)
return self
def Format(self, fmt):
"""Return the value as a string formatted as per strftime semantics."""
return time.strftime(fmt, time.gmtime(self._value / self.converter))
def __str__(self):
"""Return the date in human readable (UTC)."""
return self.Format("%Y-%m-%d %H:%M:%S")
def __unicode__(self):
return utils.SmartUnicode(str(self))
def AsDatetime(self):
"""Return the time as a python datetime object."""
return datetime.datetime.utcfromtimestamp(self._value / self.converter)
def AsSecondsFromEpoch(self):
return self._value / self.converter
def AsMicroSecondsFromEpoch(self):
return self._value
def FromSecondsFromEpoch(self, value):
self._value = value * self.converter
return self
def ParseFromHumanReadable(self, string, eoy=False):
self._value = self._ParseFromHumanReadable(string, eoy=eoy)
return self
def __add__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value + other * self.converter)
return NotImplemented
def __iadd__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value += other * self.converter
return self
return NotImplemented
def __mul__(self, other):
if isinstance(other, (int, long, float, Duration)):
return self.__class__(self._value * other)
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value - other * self.converter)
if isinstance(other, RDFDatetime):
return Duration(self.AsSecondsFromEpoch() - other.AsSecondsFromEpoch())
return NotImplemented
def __isub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value -= other * self.converter
return self
return NotImplemented
@classmethod
def _ParseFromHumanReadable(cls, string, eoy=False):
"""Parse a human readable string of a timestamp (in local time).
Args:
string: The string to parse.
eoy: If True, sets the default value to the end of the year.
Usually this method returns a timestamp where each field that is
not present in the given string is filled with values from the date
January 1st of the current year, midnight. Sometimes it makes more
sense to compare against the end of a period so if eoy is set, the
default values are copied from the 31st of December of the current
year, 23:59h.
Returns:
The parsed timestamp.
"""
# By default assume the time is given in UTC.
if eoy:
default = datetime.datetime(time.gmtime().tm_year, 12, 31, 23, 59,
tzinfo=dateutil.tz.tzutc())
else:
default = datetime.datetime(time.gmtime().tm_year, 1, 1, 0, 0,
tzinfo=dateutil.tz.tzutc())
timestamp = parser.parse(string, default=default)
return calendar.timegm(timestamp.utctimetuple()) * cls.converter
@classmethod
def LessThanEq(cls, attribute, filter_implemention, value):
return filter_implemention.PredicateLesserEqualFilter(
attribute, cls._ParseFromHumanReadable(value, eoy=True))
@classmethod
def LessThan(cls, attribute, filter_implemention, value):
"""For dates we want to recognize a variety of values."""
return filter_implemention.PredicateLesserEqualFilter(
attribute, cls._ParseFromHumanReadable(value))
@classmethod
def GreaterThanEq(cls, attribute, filter_implemention, value):
return filter_implemention.PredicateGreaterEqualFilter(
attribute, cls._ParseFromHumanReadable(value))
@classmethod
def GreaterThan(cls, attribute, filter_implemention, value):
return filter_implemention.PredicateGreaterEqualFilter(
attribute, cls._ParseFromHumanReadable(value, eoy=True))
operators = {"<": (1, "LessThan"),
">": (1, "GreaterThan"),
"<=": (1, "LessThanEq"),
">=": (1, "GreaterThanEq")}
class RDFDatetimeSeconds(RDFDatetime):
"""A DateTime class which is stored in whole seconds."""
converter = 1
class Duration(RDFInteger):
"""Duration value stored in seconds internally."""
data_store_type = "unsigned_integer"
DIVIDERS = collections.OrderedDict((
("w", 60 * 60 * 24 * 7),
("d", 60 * 60 * 24),
("h", 60 * 60),
("m", 60),
("s", 1)))
def __init__(self, initializer=None, age=None):
super(Duration, self).__init__(None, age)
if isinstance(initializer, Duration):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, basestring):
self.ParseFromHumanReadable(initializer)
elif isinstance(initializer, (int, long, float)):
self._value = initializer
elif isinstance(initializer, RDFInteger):
self._value = int(initializer)
elif initializer is None:
self._value = 0
else:
raise InitializeError("Unknown initializer for Duration: %s." %
type(initializer))
def Validate(self, value, **_):
self.ParseFromString(value)
def ParseFromString(self, string):
self.ParseFromHumanReadable(string)
def SerializeToString(self):
return str(self)
@property
def seconds(self):
return self._value
@property
def microseconds(self):
return self._value * 1000000
def __str__(self):
time_secs = self._value
for label, divider in self.DIVIDERS.items():
if time_secs % divider == 0:
return "%d%s" % (time_secs / divider, label)
def __unicode__(self):
return utils.SmartUnicode(str(self))
def __add__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value + other)
return NotImplemented
def __iadd__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value += other
return self
return NotImplemented
def __mul__(self, other):
if isinstance(other, (int, long, float, Duration)):
return self.__class__(self._value * other)
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
return self.__class__(self._value - other)
return NotImplemented
def __isub__(self, other):
if isinstance(other, (int, long, float, Duration)):
# Assume other is in seconds
self._value -= other
return self
return NotImplemented
def __abs__(self):
return Duration(abs(self._value))
def Expiry(self, base_time=None):
if base_time is None:
base_time = RDFDatetime().Now()
else:
base_time = base_time.Copy()
base_time_sec = base_time.AsSecondsFromEpoch()
return base_time.FromSecondsFromEpoch(base_time_sec + self._value)
def ParseFromHumanReadable(self, timestring):
"""Parse a human readable string of a duration.
Args:
timestring: The string to parse.
"""
if not timestring:
return
orig_string = timestring
multiplicator = 1
if timestring[-1].isdigit():
pass
else:
try:
multiplicator = self.DIVIDERS[timestring[-1]]
except KeyError:
raise RuntimeError("Invalid duration multiplicator: '%s' ('%s')." %
(timestring[-1], orig_string))
timestring = timestring[:-1]
try:
self._value = int(timestring) * multiplicator
except ValueError:
raise InitializeError("Could not parse expiration time '%s'." %
orig_string)
class ByteSize(RDFInteger):
"""A size for bytes allowing standard unit prefixes.
We use the standard IEC 60027-2 A.2 and ISO/IEC 80000:
Binary units (powers of 2): Ki, Mi, Gi
SI units (powers of 10): k, m, g
"""
data_store_type = "unsigned_integer"
DIVIDERS = dict((
("", 1),
("k", 1000),
("m", 1000 ** 2),
("g", 1000 ** 3),
("ki", 1024),
("mi", 1024 ** 2),
("gi", 1024 ** 3),
))
REGEX = re.compile("^([0-9.]+)([kmgi]*)b?$")
def __init__(self, initializer=None, age=None):
super(ByteSize, self).__init__(None, age)
if isinstance(initializer, ByteSize):
self._value = initializer._value # pylint: disable=protected-access
elif isinstance(initializer, basestring):
self.ParseFromHumanReadable(initializer)
elif isinstance(initializer, (int, long, float)):
self._value = initializer
elif isinstance(initializer, RDFInteger):
self._value = int(initializer)
elif initializer is None:
self._value = 0
else:
raise InitializeError("Unknown initializer for ByteSize: %s." %
type(initializer))
def __str__(self):
size_token = ""
if self._value > 1024 ** 3:
size_token = "Gb"
value = float(self._value) / 1024 ** 3
elif self._value > 1024 ** 2:
size_token = "Mb"
value = float(self._value) / 1024 ** 2
elif self._value > 1024:
size_token = "Kb"
value = float(self._value) / 1024
else:
return utils.SmartStr(self._value) + "b"
return "%.1f%s" % (value, size_token)
def ParseFromHumanReadable(self, string):
"""Parse a human readable string of a byte string.
Args:
string: The string to parse.
Raises:
DecodeError: If the string can not be parsed.
"""
if not string:
return None
match = self.REGEX.match(string.strip().lower())
if not match:
raise DecodeError("Unknown specification for ByteSize %s" % string)
multiplier = self.DIVIDERS.get(match.group(2))
if not multiplier:
raise DecodeError("Invalid multiplier %s" % match.group(2))
# The value may be represented as a float, but if not dont lose accuracy.
value = match.group(1)
if "." in value:
value = float(value)
else:
value = long(value)
self._value = int(value * multiplier)
@functools.total_ordering
class RDFURN(RDFValue):
"""An object to abstract URL manipulation."""
data_store_type = "string"
# Careful when changing this value, this is hardcoded a few times in this
# class for performance reasons.
scheme = "aff4"
def __init__(self, initializer=None, age=None):
"""Constructor.
Args:
initializer: A string or another RDFURN.
age: The age of this entry.
Raises:
ValueError: if no urn passed
"""
if isinstance(initializer, RDFURN):
# Make a direct copy of the other object
self._string_urn = initializer.Path()
super(RDFURN, self).__init__(None, age)
return
if initializer is None:
raise ValueError("URN cannot be None")
super(RDFURN, self).__init__(initializer=initializer, age=age)
def ParseFromString(self, initializer=None):
"""Create RDFRUN from string.
Args:
initializer: url string
"""
# Strip off the aff4: prefix if necessary.
if initializer.startswith("aff4:/"):
initializer = initializer[5:]
self._string_urn = utils.NormalizePath(initializer)
def SerializeToString(self):
return str(self)
def SerializeToDataStore(self):
return unicode(self)
def __setstate__(self, data):
"""Support the pickle protocol."""
RDFValue.__setstate__(self, data)
# NOTE: This is done for backwards compatibility with
# old pickled RDFURNs that got pickled via default pickling mechanism and
# have 'aff4:/' pickled as part of _string_urn as a result.
if self._string_urn.startswith("aff4:/"):
self._string_urn = self._string_urn[5:]
def Dirname(self):
return posixpath.dirname(self._string_urn)
def Basename(self):
return posixpath.basename(self.Path())
def Add(self, path, age=None):
"""Add a relative stem to the current value and return a new RDFURN.
If urn is a fully qualified URN, replace the current value with it.
Args:
path: A string containing a relative path.
age: The age of the object. If None set to current time.
Returns:
A new RDFURN that can be chained.
Raises:
ValueError: if the path component is not a string.
"""
if not isinstance(path, basestring):
raise ValueError("Only strings should be added to a URN.")
result = self.Copy(age)
result.Update(path=utils.JoinPath(self._string_urn, path))
return result
def Update(self, url=None, path=None):
"""Update one of the fields.
Args:
url: An optional string containing a URL.
path: If the path for this URN should be updated.
"""
if url:
self.ParseFromString(url)
if path:
self._string_urn = path
self.dirty = True
def Copy(self, age=None):
"""Make a copy of ourselves."""
if age is None:
age = int(time.time() * MICROSECONDS)
return self.__class__(self, age=age)
def __str__(self):
return utils.SmartStr("aff4:%s" % self._string_urn)
def __unicode__(self):
return utils.SmartUnicode(u"aff4:%s" % self._string_urn)
def __eq__(self, other):
if isinstance(other, basestring):
other = self.__class__(other)
elif other is None:
return False
elif not isinstance(other, RDFURN):
return NotImplemented
return self._string_urn == other.Path()
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self._string_urn < other
def Path(self):
"""Return the path of the urn."""
return self._string_urn
def Split(self, count=None):
"""Returns all the path components.
Args:
count: If count is specified, the output will be exactly this many path
components, possibly extended with the empty string. This is useful for
tuple assignments without worrying about ValueErrors:
namespace, path = urn.Split(2)
Returns:
A list of path components of this URN.
"""
if count:
result = filter(None, self._string_urn.split("/", count))
while len(result) < count:
result.append("")
return result
else:
return filter(None, self._string_urn.split("/"))
def RelativeName(self, volume):
"""Given a volume URN return the relative URN as a unicode string.
We remove the volume prefix from our own.
Args:
volume: An RDFURN or fully qualified url string.
Returns:
A string of the url relative from the volume or None if our URN does not
start with the volume prefix.
"""
string_url = utils.SmartUnicode(self)
volume_url = utils.SmartUnicode(volume)
if string_url.startswith(volume_url):
result = string_url[len(volume_url):]
# This must always return a relative path so we strip leading "/"s. The
# result is always a unicode string.
return result.lstrip("/")
return None
def __repr__(self):
return "<%s age=%s>" % (str(self), self.age)
class Subject(RDFURN):
"""A psuedo attribute representing the subject of an AFF4 object."""
@staticmethod
def ContainsMatch(unused_attribute, filter_implemention, regex):
return filter_implemention.SubjectContainsFilter(regex)
@staticmethod
def Startswith(unused_attribute, filter_implemention, string):
return filter_implemention.SubjectContainsFilter(
"^" + utils.EscapeRegex(string))
@staticmethod
def HasAttribute(unused_attribute, filter_implemention, string):
return filter_implemention.HasPredicateFilter(string)
operators = dict(matches=(1, "ContainsMatch"),
contains=(1, "ContainsMatch"),
startswith=(1, "Startswith"),
has=(1, "HasAttribute"))
DEFAULT_FLOW_QUEUE = RDFURN("F")
class SessionID(RDFURN):
"""An rdfvalue object that represents a session_id."""
def __init__(
self, initializer=None, age=None, base="aff4:/flows",
queue=DEFAULT_FLOW_QUEUE, flow_name=None):
"""Constructor.
Args:
initializer: A string or another RDFURN.
age: The age of this entry.
base: The base namespace this session id lives in.
queue: The queue to use.
flow_name: The name of this flow or its random id.
Raises:
InitializeError: The given URN cannot be converted to a SessionID.
"""
if initializer is None:
# This SessionID is being constructed from scratch.
if flow_name is None:
flow_name = utils.PRNG.GetULong()
if isinstance(flow_name, int):
initializer = RDFURN(base).Add("%s:%X" % (queue.Basename(), flow_name))
else:
initializer = RDFURN(base).Add("%s:%s" % (queue.Basename(), flow_name))
elif isinstance(initializer, RDFURN):
try:
self.ValidateID(initializer.Basename())
except ValueError as e:
raise InitializeError("Invalid URN for SessionID: %s, %s" %
(initializer, e.message))
super(SessionID, self).__init__(initializer=initializer, age=age)
def Queue(self):
return RDFURN(self.Basename().split(":")[0])
def FlowName(self):
return self.Basename().split(":")[1]
def Add(self, path, age=None):
# Adding to a SessionID results in a normal RDFURN.
return RDFURN(self).Add(path, age=age)
@classmethod
def ValidateID(cls, id_str):
# This check is weaker than it could be because we allow queues called
# "DEBUG-user1" and IDs like "TransferStore"
allowed_re = re.compile(r"^[-0-9a-zA-Z]+:[0-9a-zA-Z]+$")
if not allowed_re.match(id_str):
raise ValueError("Invalid SessionID")
class FlowSessionID(SessionID):
# TODO(user): This is code to fix some legacy issues. Remove this when all
# clients are built after Dec 2014.
def ParseFromString(self, initializer=None):
# Old clients sometimes send bare well known flow ids.
if not utils.SmartStr(initializer).startswith("aff4"):
initializer = "aff4:/flows/" + initializer
super(FlowSessionID, self).ParseFromString(initializer)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# util __init__.py
from __future__ import unicode_literals, print_function
from werkzeug.test import Client
import os, re, sys, json, hashlib, requests, traceback
from markdown2 import markdown as _markdown
from .html_utils import sanitize_html
import frappe
from frappe.utils.identicon import Identicon
from email.utils import parseaddr, formataddr
# utility functions like cint, int, flt, etc.
from frappe.utils.data import *
from six.moves.urllib.parse import quote
from six import text_type, string_types
default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by',
'parent', 'parentfield', 'parenttype', 'idx', 'docstatus']
# used in import_docs.py
# TODO: deprecate it
def getCSVelement(v):
"""
Returns the CSV value of `v`, For example:
* apple becomes "apple"
* hi"there becomes "hi""there"
"""
v = cstr(v)
if not v: return ''
if (',' in v) or ('\n' in v) or ('"' in v):
if '"' in v: v = v.replace('"', '""')
return '"'+v+'"'
else: return v or ''
def get_fullname(user=None):
"""get the full name (first name + last name) of the user from User"""
if not user:
user = frappe.session.user
if not hasattr(frappe.local, "fullnames"):
frappe.local.fullnames = {}
if not frappe.local.fullnames.get(user):
p = frappe.db.get_value("User", user, ["first_name", "last_name"], as_dict=True)
if p:
frappe.local.fullnames[user] = " ".join(filter(None,
[p.get('first_name'), p.get('last_name')])) or user
else:
frappe.local.fullnames[user] = user
return frappe.local.fullnames.get(user)
def get_email_address(user=None):
"""get the email address of the user from User"""
if not user:
user = frappe.session.user
return frappe.db.get_value("User", user, ["email"], as_dict=True).get("email")
def get_formatted_email(user):
"""get Email Address of user formatted as: `John Doe <[email protected]>`"""
fullname = get_fullname(user)
mail = get_email_address(user)
return formataddr((fullname, mail))
def extract_email_id(email):
"""fetch only the email part of the Email Address"""
email_id = parse_addr(email)[1]
if email_id and isinstance(email_id, string_types) and not isinstance(email_id, text_type):
email_id = email_id.decode("utf-8", "ignore")
return email_id
def validate_email_add(email_str, throw=False):
"""Validates the email string"""
email = email_str = (email_str or "").strip()
def _check(e):
_valid = True
if not e:
_valid = False
if 'undisclosed-recipient' in e:
return False
elif " " in e and "<" not in e:
# example: "[email protected] [email protected]" will return "[email protected]" after parseaddr!!!
_valid = False
else:
e = extract_email_id(e)
match = re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", e.lower()) if e else None
if not match:
_valid = False
else:
matched = match.group(0)
if match:
match = matched==e.lower()
if not _valid:
if throw:
frappe.throw(frappe._("{0} is not a valid Email Address").format(e),
frappe.InvalidEmailAddressError)
return None
else:
return matched
out = []
for e in email_str.split(','):
email = _check(e.strip())
if email:
out.append(email)
return ', '.join(out)
def split_emails(txt):
email_list = []
# emails can be separated by comma or newline
s = re.sub(r'[\t\n\r]', ' ', cstr(txt))
for email in re.split('''[,\\n](?=(?:[^"]|"[^"]*")*$)''', s):
email = strip(cstr(email))
if email:
email_list.append(email)
return email_list
def random_string(length):
"""generate a random string"""
import string
from random import choice
return ''.join([choice(string.ascii_letters + string.digits) for i in range(length)])
def has_gravatar(email):
'''Returns gravatar url if user has set an avatar at gravatar.com'''
if (frappe.flags.in_import
or frappe.flags.in_install
or frappe.flags.in_test):
# no gravatar if via upload
# since querying gravatar for every item will be slow
return ''
hexdigest = hashlib.md5(frappe.as_unicode(email).encode('utf-8')).hexdigest()
gravatar_url = "https://secure.gravatar.com/avatar/{hash}?d=404&s=200".format(hash=hexdigest)
try:
res = requests.get(gravatar_url)
if res.status_code==200:
return gravatar_url
else:
return ''
except requests.exceptions.ConnectionError:
return ''
def get_gravatar_url(email):
return "https://secure.gravatar.com/avatar/{hash}?d=mm&s=200".format(hash=hashlib.md5(email.encode('utf-8')).hexdigest())
def get_gravatar(email):
gravatar_url = has_gravatar(email)
if not gravatar_url:
gravatar_url = Identicon(email).base64()
return gravatar_url
def get_traceback():
"""
Returns the traceback of the Exception
"""
exc_type, exc_value, exc_tb = sys.exc_info()
trace_list = traceback.format_exception(exc_type, exc_value, exc_tb)
body = "".join(cstr(t) for t in trace_list)
return body
def log(event, details):
frappe.logger().info(details)
def dict_to_str(args, sep='&'):
"""
Converts a dictionary to URL
"""
t = []
for k in list(args):
t.append(str(k)+'='+quote(str(args[k] or '')))
return sep.join(t)
# Get Defaults
# ==============================================================================
def get_defaults(key=None):
"""
Get dictionary of default values from the defaults, or a value if key is passed
"""
return frappe.db.get_defaults(key)
def set_default(key, val):
"""
Set / add a default value to defaults`
"""
return frappe.db.set_default(key, val)
def remove_blanks(d):
"""
Returns d with empty ('' or None) values stripped
"""
empty_keys = []
for key in d:
if d[key]=='' or d[key]==None:
# del d[key] raises runtime exception, using a workaround
empty_keys.append(key)
for key in empty_keys:
del d[key]
return d
def strip_html_tags(text):
"""Remove html tags from text"""
return re.sub("\<[^>]*\>", "", text)
def get_file_timestamp(fn):
"""
Returns timestamp of the given file
"""
from frappe.utils import cint
try:
return str(cint(os.stat(fn).st_mtime))
except OSError as e:
if e.args[0]!=2:
raise
else:
return None
# to be deprecated
def make_esc(esc_chars):
"""
Function generator for Escaping special characters
"""
return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s])
# esc / unescape characters -- used for command line
def esc(s, esc_chars):
"""
Escape special characters
"""
if not s:
return ""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(c, esc_str)
return s
def unesc(s, esc_chars):
"""
UnEscape special characters
"""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(esc_str, c)
return s
def execute_in_shell(cmd, verbose=0):
# using Popen instead of os.system - as recommended by python docs
from subprocess import Popen
import tempfile
with tempfile.TemporaryFile() as stdout:
with tempfile.TemporaryFile() as stderr:
p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
p.wait()
stdout.seek(0)
out = stdout.read()
stderr.seek(0)
err = stderr.read()
if verbose:
if err: print(err)
if out: print(out)
return err, out
def get_path(*path, **kwargs):
base = kwargs.get('base')
if not base:
base = frappe.local.site_path
return os.path.join(base, *path)
def get_site_base_path(sites_dir=None, hostname=None):
return frappe.local.site_path
def get_site_path(*path):
return get_path(base=get_site_base_path(), *path)
def get_files_path(*path, **kwargs):
return get_site_path("private" if kwargs.get("is_private") else "public", "files", *path)
def get_bench_path():
return os.path.realpath(os.path.join(os.path.dirname(frappe.__file__), '..', '..', '..'))
def get_backups_path():
return get_site_path("private", "backups")
def get_request_site_address(full_address=False):
return get_url(full_address=full_address)
def encode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], string_types) and isinstance(d[key], text_type):
d[key] = d[key].encode(encoding)
return d
def decode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], string_types) and not isinstance(d[key], text_type):
d[key] = d[key].decode(encoding, "ignore")
return d
def get_site_name(hostname):
return hostname.split(':')[0]
def get_disk_usage():
"""get disk usage of files folder"""
files_path = get_files_path()
if not os.path.exists(files_path):
return 0
err, out = execute_in_shell("du -hsm {files_path}".format(files_path=files_path))
return cint(out.split("\n")[-2].split("\t")[0])
def touch_file(path):
with open(path, 'a'):
os.utime(path, None)
return path
def get_test_client():
from frappe.app import application
return Client(application)
def get_hook_method(hook_name, fallback=None):
method = (frappe.get_hooks().get(hook_name))
if method:
method = frappe.get_attr(method[0])
return method
if fallback:
return fallback
def call_hook_method(hook, *args, **kwargs):
out = None
for method_name in frappe.get_hooks(hook):
out = out or frappe.get_attr(method_name)(*args, **kwargs)
return out
def update_progress_bar(txt, i, l):
if not getattr(frappe.local, 'request', None):
lt = len(txt)
if lt < 36:
txt = txt + " "*(36-lt)
complete = int(float(i+1) / l * 40)
sys.stdout.write("\r{0}: [{1}{2}]".format(txt, "="*complete, " "*(40-complete)))
sys.stdout.flush()
def get_html_format(print_path):
html_format = None
if os.path.exists(print_path):
with open(print_path, "r") as f:
html_format = f.read()
for include_directive, path in re.findall("""({% include ['"]([^'"]*)['"] %})""", html_format):
for app_name in frappe.get_installed_apps():
include_path = frappe.get_app_path(app_name, *path.split(os.path.sep))
if os.path.exists(include_path):
with open(include_path, "r") as f:
html_format = html_format.replace(include_directive, f.read())
break
return html_format
def is_markdown(text):
if "<!-- markdown -->" in text:
return True
elif "<!-- html -->" in text:
return False
else:
return not re.search("<p[\s]*>|<br[\s]*>", text)
def get_sites(sites_path=None):
if not sites_path:
sites_path = getattr(frappe.local, 'sites_path', None) or '.'
sites = []
for site in os.listdir(sites_path):
path = os.path.join(sites_path, site)
if (os.path.isdir(path)
and not os.path.islink(path)
and os.path.exists(os.path.join(path, 'site_config.json'))):
# is a dir and has site_config.json
sites.append(site)
return sorted(sites)
def get_request_session(max_retries=3):
from urllib3.util import Retry
session = requests.Session()
session.mount("http://", requests.adapters.HTTPAdapter(max_retries=Retry(total=5, status_forcelist=[500])))
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=Retry(total=5, status_forcelist=[500])))
return session
def watch(path, handler=None, debug=True):
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
def on_any_event(self, event):
if debug:
print("File {0}: {1}".format(event.event_type, event.src_path))
if not handler:
print("No handler specified")
return
handler(event.src_path, event.event_type)
event_handler = Handler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def markdown(text, sanitize=True, linkify=True):
html = _markdown(text)
if sanitize:
html = html.replace("<!-- markdown -->", "")
html = sanitize_html(html, linkify=linkify)
return html
def sanitize_email(emails):
sanitized = []
for e in split_emails(emails):
if not validate_email_add(e):
continue
full_name, email_id = parse_addr(e)
sanitized.append(formataddr((full_name, email_id)))
return ", ".join(sanitized)
def parse_addr(email_string):
"""
Return email_id and user_name based on email string
Raise error if email string is not valid
"""
name, email = parseaddr(email_string)
if check_format(email):
name = get_name_from_email_string(email_string, email, name)
return (name, email)
else:
email_regex = re.compile(r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)")
email_list = re.findall(email_regex, email_string)
if len(email_list) > 0 and check_format(email_list[0]):
#take only first email address
email = email_list[0]
name = get_name_from_email_string(email_string, email, name)
return (name, email)
return (None, email)
def check_format(email_id):
"""
Check if email_id is valid. valid email:[email protected]
String check ensures that email_id contains both '.' and
'@' and index of '@' is less than '.'
"""
is_valid = False
try:
pos = email_id.rindex("@")
is_valid = pos > 0 and (email_id.rindex(".") > pos) and (len(email_id) - pos > 4)
except Exception:
#print(e)
pass
return is_valid
def get_name_from_email_string(email_string, email_id, name):
name = email_string.replace(email_id, '')
name = re.sub('[^A-Za-z0-9\u00C0-\u024F\/\_\' ]+', '', name).strip()
if not name:
name = email_id
return name
def get_installed_apps_info():
out = []
for app in frappe.get_installed_apps():
out.append({
'app_name': app,
'version': getattr(frappe.get_module(app), '__version__', 'Unknown')
})
return out
def get_site_info():
from frappe.utils.user import get_system_managers
from frappe.core.doctype.user.user import STANDARD_USERS
from frappe.email.queue import get_emails_sent_this_month
# only get system users
users = frappe.get_all('User', filters={'user_type': 'System User', 'name': ('not in', STANDARD_USERS)},
fields=['name', 'enabled', 'last_login', 'last_active', 'language', 'time_zone'])
system_managers = get_system_managers(only_name=True)
for u in users:
# tag system managers
u.is_system_manager = 1 if u.name in system_managers else 0
u.full_name = get_fullname(u.name)
u.email = u.name
del u['name']
system_settings = frappe.db.get_singles_dict('System Settings')
space_usage = frappe._dict((frappe.local.conf.limits or {}).get('space_usage', {}))
site_info = {
'installed_apps': get_installed_apps_info(),
'users': users,
'country': system_settings.country,
'language': system_settings.language or 'english',
'time_zone': system_settings.time_zone,
'setup_complete': cint(system_settings.setup_complete),
'scheduler_enabled': system_settings.enable_scheduler,
# usage
'emails_sent': get_emails_sent_this_month(),
'space_used': flt((space_usage.total or 0) / 1024.0, 2),
'database_size': space_usage.database_size,
'backup_size': space_usage.backup_size,
'files_size': space_usage.files_size
}
# from other apps
for method_name in frappe.get_hooks('get_site_info'):
site_info.update(frappe.get_attr(method_name)(site_info) or {})
# dumps -> loads to prevent datatype conflicts
return json.loads(frappe.as_json(site_info))
def parse_json(val):
"""
Parses json if string else return
"""
if isinstance(val, string_types):
return json.loads(val)
return val
def cast_fieldtype(fieldtype, value):
if fieldtype in ("Currency", "Float", "Percent"):
value = flt(value)
elif fieldtype in ("Int", "Check"):
value = cint(value)
elif fieldtype in ("Data", "Text", "Small Text", "Long Text",
"Text Editor", "Select", "Link", "Dynamic Link"):
value = cstr(value)
elif fieldtype == "Date":
value = getdate(value)
elif fieldtype == "Datetime":
value = get_datetime(value)
elif fieldtype == "Time":
value = to_timedelta(value)
return value
|
|
# Copyright 2015 SimpliVity Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from oslo_utils import timeutils
import pytz
from cinder import context
from cinder import exception
from cinder import objects
from cinder.objects import base as ovo_base
from cinder.objects import fields
from cinder.tests.unit.consistencygroup import fake_consistencygroup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import objects as test_objects
fake_group = {
'id': fake.GROUP_ID,
'user_id': fake.USER_ID,
'project_id': fake.PROJECT_ID,
'host': 'fake_host',
'availability_zone': 'fake_az',
'name': 'fake_name',
'description': 'fake_description',
'group_type_id': fake.GROUP_TYPE_ID,
'status': fields.GroupStatus.CREATING,
}
@ddt.ddt
class TestVolume(test_objects.BaseObjectsTestCase):
@staticmethod
def _compare(test, db, obj):
db = {k: v for k, v in db.items()
if not k.endswith('metadata') or k.startswith('volume')}
test_objects.BaseObjectsTestCase._compare(test, db, obj)
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_get_by_id(self, volume_get):
db_volume = fake_volume.fake_db_volume()
volume_get.return_value = db_volume
volume = objects.Volume.get_by_id(self.context, fake.VOLUME_ID)
volume_get.assert_called_once_with(self.context, fake.VOLUME_ID)
self._compare(self, db_volume, volume)
@mock.patch('cinder.db.sqlalchemy.api.model_query')
def test_get_by_id_no_existing_id(self, model_query):
pf = (model_query().options().options().options().options().options().
options())
pf.filter_by().first.return_value = None
self.assertRaises(exception.VolumeNotFound,
objects.Volume.get_by_id, self.context, 123)
@mock.patch('cinder.db.volume_create')
# TODO: (Y release) remove ddt.data and ddt.unpack decorators
@ddt.data(
({}, True), # default value
({'use_quota': True}, True), # Normal init
({'use_quota': False}, False),
({'migration_status': 'target:'}, False), # auto detect migrating
({'migration_status': 'migrating:'}, True), # auto detect normal
({'admin_metadata': {'temporary': True}}, False), # temp
({'admin_metadata': {'something': True}}, True), # normal
)
@ddt.unpack
def test_create(self, ovo, expected, volume_create):
db_volume = fake_volume.fake_db_volume()
volume_create.return_value = db_volume
volume = objects.Volume(context=self.context, **ovo)
volume.create()
self.assertEqual(db_volume['id'], volume.id)
use_quota = volume_create.call_args[0][1]['use_quota']
# TODO: (Y release) remove next line
self.assertIs(expected, use_quota)
@mock.patch('cinder.db.volume_update')
# TODO: (Y release) replace ddt.data and ddt.unpack decorators with
# @ddt.data(False, True)
@ddt.data(
(False, {}, True),
(True, {}, True),
(False, {'use_quota': True}, True),
(False, {'use_quota': False}, False),
(False, {'migration_status': 'target:'}, False),
(False, {'migration_status': 'migrating:'}, True),
(False,
{'volume_admin_metadata': [{'key': 'temporary', 'value': True}]},
False),
(False,
{'volume_admin_metadata': [{'key': 'something', 'value': True}]},
True),
)
@ddt.unpack
def test_save(self, test_cg, ovo, expected, volume_update):
use_quota = ovo.pop('use_quota', None)
db_volume = fake_volume.fake_db_volume(**ovo)
# TODO: (Y release) remove expected_attrs
if 'volume_admin_metadata' in ovo:
expected_attrs = ['admin_metadata']
else:
expected_attrs = []
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume,
expected_attrs=expected_attrs)
volume.display_name = 'foobar'
if test_cg:
volume.consistencygroup = None
# TODO: (Y release) remove next 2 lines
if use_quota is not None:
volume.use_quota = use_quota
volume.save()
# TODO: (Y release) remove use_quota
volume_update.assert_called_once_with(self.context, volume.id,
{'display_name': 'foobar',
'use_quota': expected})
def test_save_error(self):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.consistencygroup = (
fake_consistencygroup.fake_consistencyobject_obj(self.context))
self.assertRaises(exception.ObjectActionError,
volume.save)
@mock.patch('cinder.db.volume_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.volume_update')
def test_save_with_metadata(self, volume_update, metadata_update):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.metadata = {'key1': 'value1'}
self.assertEqual({'display_name': 'foobar',
'metadata': {'key1': 'value1'}},
volume.obj_get_changes())
volume.save()
# TODO: (Y release) remove use_quota
volume_update.assert_called_once_with(self.context, volume.id,
{'display_name': 'foobar',
'use_quota': True})
metadata_update.assert_called_once_with(self.context, volume.id,
{'key1': 'value1'}, True)
@mock.patch('cinder.db.volume_admin_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.volume_update')
def test_save_with_admin_metadata(self, volume_update,
admin_metadata_update):
# Test with no admin context
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.admin_metadata = {'key1': 'value1'}
volume.save()
self.assertFalse(admin_metadata_update.called)
# Test with admin context
admin_context = context.RequestContext(self.user_id, self.project_id,
is_admin=True)
volume = objects.Volume._from_db_object(admin_context,
objects.Volume(), db_volume)
volume.admin_metadata = {'key1': 'value1'}
volume.save()
admin_metadata_update.assert_called_once_with(
admin_context, volume.id, {'key1': 'value1'}, True)
def test_save_with_glance_metadata(self):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.glance_metadata = {'key1': 'value1'}
self.assertRaises(exception.ObjectActionError, volume.save)
def test_save_with_consistencygroup(self):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.consistencygroup = objects.ConsistencyGroup()
self.assertRaises(exception.ObjectActionError, volume.save)
def test_save_with_snapshots(self):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.display_name = 'foobar'
volume.snapshots = objects.SnapshotList()
self.assertRaises(exception.ObjectActionError, volume.save)
@mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow())
@mock.patch('cinder.db.sqlalchemy.api.volume_destroy')
def test_destroy(self, volume_destroy, utcnow_mock):
volume_destroy.return_value = {
'status': 'deleted',
'deleted': True,
'deleted_at': utcnow_mock.return_value}
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
volume.destroy()
self.assertTrue(volume_destroy.called)
admin_context = volume_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
self.assertTrue(volume.deleted)
self.assertEqual('deleted', volume.status)
self.assertEqual(utcnow_mock.return_value.replace(tzinfo=pytz.UTC),
volume.deleted_at)
self.assertIsNone(volume.migration_status)
def test_obj_fields(self):
volume = objects.Volume(context=self.context, id=fake.VOLUME_ID,
name_id=fake.VOLUME_NAME_ID)
self.assertEqual(['name', 'name_id', 'volume_metadata',
'volume_admin_metadata', 'volume_glance_metadata'],
volume.obj_extra_fields)
self.assertEqual('volume-%s' % fake.VOLUME_NAME_ID, volume.name)
self.assertEqual(fake.VOLUME_NAME_ID, volume.name_id)
def test_obj_field_previous_status(self):
volume = objects.Volume(context=self.context,
previous_status='backing-up')
self.assertEqual('backing-up', volume.previous_status)
@mock.patch('cinder.db.volume_metadata_delete')
def test_delete_metadata_key(self, metadata_delete):
volume = objects.Volume(self.context, id=fake.VOLUME_ID)
volume.metadata = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual({}, volume._orig_metadata)
volume.delete_metadata_key('key2')
self.assertEqual({'key1': 'value1'}, volume.metadata)
metadata_delete.assert_called_once_with(self.context, fake.VOLUME_ID,
'key2')
@mock.patch('cinder.db.volume_metadata_get')
@mock.patch('cinder.db.volume_glance_metadata_get')
@mock.patch('cinder.db.volume_admin_metadata_get')
@mock.patch('cinder.objects.volume_type.VolumeType.get_by_id')
@mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.'
'get_all_by_volume_id')
@mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id')
@mock.patch('cinder.objects.snapshot.SnapshotList.get_all_for_volume')
def test_obj_load_attr(self, mock_sl_get_all_for_volume, mock_cg_get_by_id,
mock_va_get_all_by_vol, mock_vt_get_by_id,
mock_admin_metadata_get, mock_glance_metadata_get,
mock_metadata_get):
fake_db_volume = fake_volume.fake_db_volume(
consistencygroup_id=fake.CONSISTENCY_GROUP_ID)
volume = objects.Volume._from_db_object(
self.context, objects.Volume(), fake_db_volume)
# Test metadata lazy-loaded field
metadata = {'foo': 'bar'}
mock_metadata_get.return_value = metadata
self.assertEqual(metadata, volume.metadata)
mock_metadata_get.assert_called_once_with(self.context, volume.id)
# Test glance_metadata lazy-loaded field
glance_metadata = [{'key': 'foo', 'value': 'bar'}]
mock_glance_metadata_get.return_value = glance_metadata
self.assertEqual({'foo': 'bar'}, volume.glance_metadata)
mock_glance_metadata_get.assert_called_once_with(
self.context, volume.id)
# Test volume_type lazy-loaded field
# Case1. volume.volume_type_id = None
self.assertIsNone(volume.volume_type)
# Case2. volume2.volume_type_id = 1
fake2 = fake_volume.fake_db_volume()
fake2.update({'volume_type_id': fake.VOLUME_ID})
volume2 = objects.Volume._from_db_object(
self.context, objects.Volume(), fake2)
volume_type = objects.VolumeType(context=self.context,
id=fake.VOLUME_TYPE_ID)
mock_vt_get_by_id.return_value = volume_type
self.assertEqual(volume_type, volume2.volume_type)
mock_vt_get_by_id.assert_called_once_with(self.context,
volume2.volume_type_id)
# Test consistencygroup lazy-loaded field
consistencygroup = objects.ConsistencyGroup(
context=self.context, id=fake.CONSISTENCY_GROUP_ID)
mock_cg_get_by_id.return_value = consistencygroup
self.assertEqual(consistencygroup, volume.consistencygroup)
mock_cg_get_by_id.assert_called_once_with(self.context,
volume.consistencygroup_id)
# Test snapshots lazy-loaded field
snapshots = objects.SnapshotList(context=self.context,
id=fake.SNAPSHOT_ID)
mock_sl_get_all_for_volume.return_value = snapshots
self.assertEqual(snapshots, volume.snapshots)
mock_sl_get_all_for_volume.assert_called_once_with(self.context,
volume.id)
# Test volume_attachment lazy-loaded field
va_objs = [objects.VolumeAttachment(context=self.context, id=i)
for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]]
va_list = objects.VolumeAttachmentList(context=self.context,
objects=va_objs)
mock_va_get_all_by_vol.return_value = va_list
self.assertEqual(va_list, volume.volume_attachment)
mock_va_get_all_by_vol.assert_called_once_with(self.context, volume.id)
# Test admin_metadata lazy-loaded field - user context
adm_metadata = {'bar': 'foo'}
mock_admin_metadata_get.return_value = adm_metadata
self.assertEqual({}, volume.admin_metadata)
self.assertFalse(mock_admin_metadata_get.called)
# Test admin_metadata lazy-loaded field - admin context
adm_context = self.context.elevated()
volume = objects.Volume._from_db_object(adm_context, objects.Volume(),
fake_volume.fake_db_volume())
adm_metadata = {'bar': 'foo'}
mock_admin_metadata_get.return_value = adm_metadata
self.assertEqual(adm_metadata, volume.admin_metadata)
mock_admin_metadata_get.assert_called_once_with(adm_context, volume.id)
@mock.patch('cinder.objects.consistencygroup.ConsistencyGroup.get_by_id')
def test_obj_load_attr_cgroup_not_exist(self, mock_cg_get_by_id):
fake_db_volume = fake_volume.fake_db_volume(consistencygroup_id=None)
volume = objects.Volume._from_db_object(
self.context, objects.Volume(), fake_db_volume)
self.assertIsNone(volume.consistencygroup)
mock_cg_get_by_id.assert_not_called()
@mock.patch('cinder.objects.group.Group.get_by_id')
def test_obj_load_attr_group_not_exist(self, mock_group_get_by_id):
fake_db_volume = fake_volume.fake_db_volume(group_id=None)
volume = objects.Volume._from_db_object(
self.context, objects.Volume(), fake_db_volume)
self.assertIsNone(volume.group)
mock_group_get_by_id.assert_not_called()
def test_from_db_object_with_all_expected_attributes(self):
expected_attrs = ['metadata', 'admin_metadata', 'glance_metadata',
'volume_type', 'volume_attachment',
'consistencygroup']
db_metadata = [{'key': 'foo', 'value': 'bar'}]
db_admin_metadata = [{'key': 'admin_foo', 'value': 'admin_bar'}]
db_glance_metadata = [{'key': 'glance_foo', 'value': 'glance_bar'}]
db_volume_type = fake_volume.fake_db_volume_type()
db_volume_attachments = fake_volume.volume_attachment_db_obj()
db_consistencygroup = fake_consistencygroup.fake_db_consistencygroup()
db_snapshots = fake_snapshot.fake_db_snapshot()
db_volume = fake_volume.fake_db_volume(
volume_metadata=db_metadata,
volume_admin_metadata=db_admin_metadata,
volume_glance_metadata=db_glance_metadata,
volume_type=db_volume_type,
volume_attachment=[db_volume_attachments],
consistencygroup=db_consistencygroup,
snapshots=[db_snapshots],
)
volume = objects.Volume._from_db_object(self.context, objects.Volume(),
db_volume, expected_attrs)
self.assertEqual({'foo': 'bar'}, volume.metadata)
self.assertEqual({'admin_foo': 'admin_bar'}, volume.admin_metadata)
self.assertEqual({'glance_foo': 'glance_bar'}, volume.glance_metadata)
self._compare(self, db_volume_type, volume.volume_type)
self._compare(self, db_volume_attachments, volume.volume_attachment)
self._compare(self, db_consistencygroup, volume.consistencygroup)
self._compare(self, db_snapshots, volume.snapshots)
@mock.patch('cinder.db.volume_glance_metadata_get', return_value={})
@mock.patch('cinder.db.sqlalchemy.api.volume_get')
def test_refresh(self, volume_get, volume_metadata_get):
db_volume1 = fake_volume.fake_db_volume()
db_volume2 = db_volume1.copy()
db_volume2['display_name'] = 'foobar'
# On the second volume_get, return the volume with an updated
# display_name
volume_get.side_effect = [db_volume1, db_volume2]
volume = objects.Volume.get_by_id(self.context, fake.VOLUME_ID)
self._compare(self, db_volume1, volume)
# display_name was updated, so a volume refresh should have a new value
# for that field
volume.refresh()
self._compare(self, db_volume2, volume)
volume_get.assert_has_calls([mock.call(self.context, fake.VOLUME_ID),
mock.call.__bool__(),
mock.call(self.context, fake.VOLUME_ID)])
def test_metadata_aliases(self):
volume = objects.Volume(context=self.context)
# metadata<->volume_metadata
volume.metadata = {'abc': 'def'}
self.assertEqual([{'key': 'abc', 'value': 'def'}],
volume.volume_metadata)
md = [{'key': 'def', 'value': 'abc'}]
volume.volume_metadata = md
self.assertEqual({'def': 'abc'}, volume.metadata)
# admin_metadata<->volume_admin_metadata
volume.admin_metadata = {'foo': 'bar'}
self.assertEqual([{'key': 'foo', 'value': 'bar'}],
volume.volume_admin_metadata)
volume.volume_admin_metadata = [{'key': 'xyz', 'value': '42'}]
self.assertEqual({'xyz': '42'}, volume.admin_metadata)
# glance_metadata<->volume_glance_metadata
volume.glance_metadata = {'jkl': 'mno'}
self.assertEqual([{'key': 'jkl', 'value': 'mno'}],
volume.volume_glance_metadata)
volume.volume_glance_metadata = [{'key': 'prs', 'value': 'tuw'}]
self.assertEqual({'prs': 'tuw'}, volume.glance_metadata)
@mock.patch('cinder.db.volume_metadata_update', return_value={})
@mock.patch('cinder.db.volume_update')
@ddt.data({'src_vol_type_id': fake.VOLUME_TYPE_ID,
'dest_vol_type_id': fake.VOLUME_TYPE2_ID},
{'src_vol_type_id': None,
'dest_vol_type_id': fake.VOLUME_TYPE2_ID})
@ddt.unpack
def test_finish_volume_migration(self, volume_update, metadata_update,
src_vol_type_id, dest_vol_type_id):
src_volume_db = fake_volume.fake_db_volume(
**{'id': fake.VOLUME_ID, 'volume_type_id': src_vol_type_id,
'use_quota': True})
if src_vol_type_id:
src_volume_db['volume_type'] = fake_volume.fake_db_volume_type(
id=src_vol_type_id)
dest_volume_db = fake_volume.fake_db_volume(
**{'id': fake.VOLUME2_ID, 'volume_type_id': dest_vol_type_id,
'use_quota': False})
if dest_vol_type_id:
dest_volume_db['volume_type'] = fake_volume.fake_db_volume_type(
id=dest_vol_type_id)
expected_attrs = objects.Volume._get_expected_attrs(self.context)
src_volume = objects.Volume._from_db_object(
self.context, objects.Volume(), src_volume_db,
expected_attrs=expected_attrs)
dest_volume = objects.Volume._from_db_object(
self.context, objects.Volume(), dest_volume_db,
expected_attrs=expected_attrs)
updated_dest_volume = src_volume.finish_volume_migration(
dest_volume)
self.assertEqual('deleting', updated_dest_volume.migration_status)
self.assertEqual('migration src for ' + src_volume.id,
updated_dest_volume.display_description)
self.assertEqual(src_volume.id, updated_dest_volume._name_id)
self.assertTrue(volume_update.called)
volume_update.assert_has_calls([
mock.call(self.context, src_volume.id, mock.ANY),
mock.call(self.context, dest_volume.id, mock.ANY)])
ctxt, vol_id, updates = volume_update.call_args[0]
self.assertNotIn('volume_type', updates)
# Ensure that the destination volume type has not been overwritten
self.assertEqual(dest_vol_type_id,
getattr(updated_dest_volume, 'volume_type_id'))
# Ignore these attributes, since they were updated by
# finish_volume_migration
ignore_keys = ('id', 'provider_location', '_name_id',
'migration_status', 'display_description', 'status',
'volume_glance_metadata', 'volume_type', 'use_quota',
'volume_attachment')
dest_vol_dict = {k: updated_dest_volume[k] for k in
updated_dest_volume.keys() if k not in ignore_keys}
src_vol_dict = {k: src_volume[k] for k in src_volume.keys()
if k not in ignore_keys}
self.assertEqual(src_vol_dict, dest_vol_dict)
# use_quota must not have been switched, we'll mess our quota otherwise
self.assertTrue(src_volume.use_quota)
self.assertFalse(updated_dest_volume.use_quota)
def test_volume_with_metadata_serialize_deserialize_no_changes(self):
updates = {'volume_glance_metadata': [{'key': 'foo', 'value': 'bar'}],
'expected_attrs': ['glance_metadata']}
volume = fake_volume.fake_volume_obj(self.context, **updates)
serializer = objects.base.CinderObjectSerializer()
serialized_volume = serializer.serialize_entity(self.context, volume)
volume = serializer.deserialize_entity(self.context, serialized_volume)
self.assertDictEqual({}, volume.obj_get_changes())
@mock.patch('cinder.db.volume_admin_metadata_update')
@mock.patch('cinder.db.sqlalchemy.api.volume_attach')
def test_begin_attach(self, volume_attach, metadata_update):
volume = fake_volume.fake_volume_obj(self.context, use_quota=True)
db_attachment = fake_volume.volume_attachment_db_obj(
volume_id=volume.id,
attach_status=fields.VolumeAttachStatus.ATTACHING)
volume_attach.return_value = db_attachment
metadata_update.return_value = {'attached_mode': 'rw'}
with mock.patch.object(self.context, 'elevated') as mock_elevated:
mock_elevated.return_value = context.get_admin_context()
attachment = volume.begin_attach("rw")
self.assertIsInstance(attachment, objects.VolumeAttachment)
self.assertEqual(volume.id, attachment.volume_id)
self.assertEqual(fields.VolumeAttachStatus.ATTACHING,
attachment.attach_status)
metadata_update.assert_called_once_with(self.context.elevated(),
volume.id,
{'attached_mode': u'rw'},
True)
self.assertEqual('rw', volume.admin_metadata['attached_mode'])
@mock.patch('cinder.db.volume_admin_metadata_delete')
@mock.patch('cinder.db.sqlalchemy.api.volume_detached')
@mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.'
'get_all_by_volume_id')
def test_volume_detached_with_attachment(
self, volume_attachment_get,
volume_detached,
metadata_delete):
va_objs = [objects.VolumeAttachment(context=self.context, id=i,
volume_id=fake.VOLUME_ID)
for i in [fake.OBJECT_ID, fake.OBJECT2_ID, fake.OBJECT3_ID]]
# As changes are not saved, we need reset it here. Later changes
# will be checked.
for obj in va_objs:
obj.obj_reset_changes()
va_list = objects.VolumeAttachmentList(context=self.context,
objects=va_objs)
va_list.obj_reset_changes()
volume_attachment_get.return_value = va_list
admin_context = context.get_admin_context()
volume = fake_volume.fake_volume_obj(
admin_context,
id=fake.VOLUME_ID,
volume_attachment=va_list,
volume_admin_metadata=[{'key': 'attached_mode',
'value': 'rw'}])
self.assertEqual(3, len(volume.volume_attachment))
volume_detached.return_value = ({'status': 'in-use'},
{'attached_mode': 'rw'})
with mock.patch.object(admin_context, 'elevated') as mock_elevated:
mock_elevated.return_value = admin_context
volume.finish_detach(fake.OBJECT_ID)
volume_detached.assert_called_once_with(admin_context,
volume.id,
fake.OBJECT_ID)
metadata_delete.assert_called_once_with(admin_context,
volume.id,
'attached_mode')
self.assertEqual('in-use', volume.status)
self.assertEqual({}, volume.cinder_obj_get_changes())
self.assertEqual(2, len(volume.volume_attachment))
self.assertNotIn('attached_mode', volume.admin_metadata)
@mock.patch('cinder.db.volume_admin_metadata_delete')
@mock.patch('cinder.db.sqlalchemy.api.volume_detached')
@mock.patch('cinder.objects.volume_attachment.VolumeAttachmentList.'
'get_all_by_volume_id')
def test_volume_detached_without_attachment(
self, volume_attachment_get, volume_detached, metadata_delete):
admin_context = context.get_admin_context()
volume = fake_volume.fake_volume_obj(
admin_context,
volume_admin_metadata=[{'key': 'attached_mode',
'value': 'rw'}])
self.assertEqual([], volume.volume_attachment.objects)
volume_detached.return_value = ({'status': 'in-use'}, None)
with mock.patch.object(admin_context, 'elevated') as mock_elevated:
mock_elevated.return_value = admin_context
volume.finish_detach(fake.OBJECT_ID)
metadata_delete.assert_called_once_with(admin_context,
volume.id,
'attached_mode')
volume_detached.assert_called_once_with(admin_context,
volume.id,
fake.OBJECT_ID)
self.assertEqual('in-use', volume.status)
self.assertEqual({}, volume.cinder_obj_get_changes())
self.assertFalse(volume_attachment_get.called)
@ddt.data(True, False)
def test_is_replicated(self, result):
volume_type = fake_volume.fake_volume_type_obj(self.context)
volume = fake_volume.fake_volume_obj(
self.context, volume_type_id=volume_type.id)
volume.volume_type = volume_type
with mock.patch.object(volume_type, 'is_replicated',
return_value=result) as is_replicated:
self.assertEqual(result, volume.is_replicated())
is_replicated.assert_called_once_with()
def test_is_replicated_no_type(self):
volume = fake_volume.fake_volume_obj(
self.context, volume_type_id=None, volume_type=None)
self.assertFalse(bool(volume.is_replicated()))
@ddt.data((None, False), ('error', False), ('success', False),
('target:123456', True))
@ddt.unpack
def test_is_migration_target(self, migration_status, expected):
volume = fake_volume.fake_volume_obj(self.context,
migration_status=migration_status)
self.assertIs(expected, volume.is_migration_target())
@ddt.data(
# We could lose value during rolling upgrade if we added a new temp
# type in this upgrade and didn't take it into consideration
('1.38', {'use_quota': False}, True),
# On rehydration we auto calculate use_quota value if not present
('1.38', {'migration_status': 'target:123'}, False),
# Both versions in X
('1.39', {'use_quota': True}, True),
# In X we don't recalculate, since we transmit the field
('1.39', {'migration_status': 'target:123', 'use_quota': True}, True),
)
@ddt.unpack
def test_obj_make_compatible_use_quota_added(self, version, ovo, expected):
volume = objects.Volume(self.context, **ovo)
# When serializing to v1.38 we'll lose the use_quota value so it will
# be recalculated based on the Volume values
serializer = ovo_base.CinderObjectSerializer(version)
primitive = serializer.serialize_entity(self.context, volume)
converted_volume = objects.Volume.obj_from_primitive(primitive)
self.assertIs(expected, converted_volume.use_quota)
@ddt.ddt
class TestVolumeList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.volume_get_all')
def test_get_all(self, volume_get_all):
db_volume = fake_volume.fake_db_volume()
volume_get_all.return_value = [db_volume]
volumes = objects.VolumeList.get_all(self.context,
mock.sentinel.marker,
mock.sentinel.limit,
mock.sentinel.sort_key,
mock.sentinel.sort_dir)
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_host')
def test_get_by_host(self, get_all_by_host):
db_volume = fake_volume.fake_db_volume()
get_all_by_host.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_host(
self.context, 'fake-host')
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_group')
def test_get_by_group(self, get_all_by_group):
db_volume = fake_volume.fake_db_volume()
get_all_by_group.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_group(
self.context, 'fake-host')
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_get_all_by_project')
def test_get_by_project(self, get_all_by_project):
db_volume = fake_volume.fake_db_volume()
get_all_by_project.return_value = [db_volume]
volumes = objects.VolumeList.get_all_by_project(
self.context, mock.sentinel.project_id, mock.sentinel.marker,
mock.sentinel.limit, mock.sentinel.sorted_keys,
mock.sentinel.sorted_dirs, mock.sentinel.filters)
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@ddt.data(['name_id'], ['__contains__'])
def test_get_by_project_with_sort_key(self, sort_keys):
fake_volume.fake_db_volume()
self.assertRaises(exception.InvalidInput,
objects.VolumeList.get_all_by_project,
self.context,
self.context.project_id,
sort_keys=sort_keys)
@mock.patch('cinder.db.volume_include_in_cluster')
def test_include_in_cluster(self, include_mock):
filters = {'host': mock.sentinel.host,
'cluster_name': mock.sentinel.cluster_name}
cluster = 'new_cluster'
objects.VolumeList.include_in_cluster(self.context, cluster, **filters)
include_mock.assert_called_once_with(self.context, cluster, True,
**filters)
@mock.patch('cinder.db.volume_include_in_cluster')
def test_include_in_cluster_specify_partial(self, include_mock):
filters = {'host': mock.sentinel.host,
'cluster_name': mock.sentinel.cluster_name}
cluster = 'new_cluster'
objects.VolumeList.include_in_cluster(self.context, cluster,
mock.sentinel.partial_rename,
**filters)
include_mock.assert_called_once_with(self.context, cluster,
mock.sentinel.partial_rename,
**filters)
@mock.patch('cinder.db.group_create',
return_value=fake_group)
def test_populate_consistencygroup(self, mock_db_grp_create):
db_volume = fake_volume.fake_db_volume()
volume = objects.Volume._from_db_object(self.context,
objects.Volume(), db_volume)
fake_grp = fake_group.copy()
del fake_grp['id']
group = objects.Group(context=self.context,
**fake_grp)
group.create()
volume.group_id = group.id
volume.group = group
volume.populate_consistencygroup()
self.assertEqual(volume.group_id, volume.consistencygroup_id)
self.assertEqual(volume.group.id, volume.consistencygroup.id)
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import copy
import functools
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet']
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance['uuid']
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
def _iface_id_option_value(client_factory, iface_id, port_index):
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % port_index
opt.value = iface_id
return opt
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type=constants.DEFAULT_OS_TYPE,
allocations=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = os_type
# The name is the unique identifier for the VM. This will either be the
# instance UUID or the instance UUID with suffix '-rescue' for VM's that
# are in rescue mode
config_spec.instanceUuid = name
# Allow nested ESX instances to host 64 bit VMs.
if os_type == "vmkernel5Guest":
config_spec.nestedHVEnabled = "True"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance['vcpus'])
config_spec.memoryMB = int(instance['memory_mb'])
# Configure cpu information
if (allocations is not None and
('cpu_limit' in allocations or
'cpu_reservation' in allocations or
'cpu_shares_level' in allocations)):
allocation = client_factory.create('ns0:ResourceAllocationInfo')
if 'cpu_limit' in allocations:
allocation.limit = allocations['cpu_limit']
if 'cpu_reservation' in allocations:
allocation.reservation = allocations['cpu_reservation']
if 'cpu_shares_level' in allocations:
shares = client_factory.create('ns0:SharesInfo')
shares.level = allocations['cpu_shares_level']
if (shares.level == 'custom' and
'cpu_shares_share' in allocations):
shares.shares = allocations['cpu_shares_share']
else:
shares.shares = 0
allocation.shares = shares
config_spec.cpuAllocation = allocation
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = _create_vif_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance['uuid']
extra_config.append(opt)
port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
extra_config.append(_iface_id_option_value(client_factory,
vif_info['iface_id'],
port_index))
port_index += 1
config_spec.extraConfig = extra_config
return config_spec
def get_vm_resize_spec(client_factory, instance):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = int(instance['vcpus'])
resize_spec.memoryMB = int(instance['memory_mb'])
return resize_spec
def create_controller_spec(client_factory, key,
adapter_type=constants.DEFAULT_ADAPTER_TYPE):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == constants.ADAPTER_TYPE_BUSLOGIC:
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def _create_vif_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing_name = ''.join(['ns0:VirtualEthernetCard',
'OpaqueNetworkBackingInfo'])
backing = client_factory.create(backing_name)
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_network_attach_config_spec(client_factory, vif_info, index):
"""Builds the vif attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec = _create_vif_spec(client_factory, vif_info)
config_spec.deviceChange = [vif_spec]
if vif_info['iface_id'] is not None:
config_spec.extraConfig = [_iface_id_option_value(client_factory,
vif_info['iface_id'],
index)]
return config_spec
def get_network_detach_config_spec(client_factory, device, port_index):
"""Builds the vif detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
config_spec.deviceChange = [virtual_device_config]
# If a key is already present then it cannot be deleted, only updated.
# This enables us to reuse this key if there is an additional
# attachment. The keys need to be preserved. This is due to the fact
# that there is logic on the ESX that does the network wiring
# according to these values. If they are changed then this will
# break networking to and from the interface.
config_spec.extraConfig = [_iface_id_option_value(client_factory,
'free',
port_index)]
return config_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.iteritems():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def get_vmdk_path(session, vm_ref, instance):
"""Gets the vmdk file path for specified instance."""
hardware_devices = session._call_method(vim_util,
"get_dynamic_property", vm_ref, "VirtualMachine",
"config.hardware.device")
(vmdk_path, adapter_type, disk_type) = get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance['uuid'])
return vmdk_path
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
if uuid:
if uuid in device.backing.fileName:
vmdk_file_path = device.backing.fileName
else:
vmdk_file_path = device.backing.fileName
vmdk_controller_key = device.controllerKey
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
if getattr(device.backing, 'eagerlyScrub', False):
disk_type = "eagerZeroedThick"
else:
disk_type = constants.DEFAULT_DISK_TYPE
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = constants.DEFAULT_ADAPTER_TYPE
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_BUSLOGIC
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_IDE
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = constants.ADAPTER_TYPE_LSILOGICSAS
adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
return (vmdk_file_path, adapter_type, disk_type)
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if unit_number not in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == constants.ADAPTER_TYPE_IDE:
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in [constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_BUSLOGIC]:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_copy_virtual_disk_spec(client_factory,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the Virtual Disk copy spec."""
dest_spec = client_factory.create('ns0:VirtualDiskSpec')
dest_spec.adapterType = get_vmdk_adapter_type(adapter_type)
dest_spec.diskType = disk_type
return dest_spec
def get_vmdk_create_spec(client_factory, size_in_kb,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def create_virtual_disk_spec(client_factory, controller_key,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == "thin":
disk_file_backing.thinProvisioned = True
else:
if disk_type == "eagerZeroedThick":
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
extras = [opt_enabled, opt_port]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
@utils.synchronized('vmware.get_vnc_port')
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
token = _get_token(result)
if token:
result = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return vnc_ports
# NOTE(mdbooth): this convenience function is temporarily duplicated in
# ds_util. The correct fix is to handle paginated results as they are returned
# from the relevant vim_util function. However, vim_util is currently
# effectively deprecated as we migrate to oslo.vmware. This duplication will be
# removed when we fix it properly in oslo.vmware.
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
def _get_reference_for_value(results, value):
for object in results.objects:
if object.obj.value == value:
return object
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
token = _get_token(results)
object = func(results, value)
if object:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return object
if token:
results = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
return None
def _cancel_retrieve_if_necessary(session, results):
token = _get_token(results)
if token:
results = session._call_method(vim_util,
"cancel_retrieve",
token)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session._get_vim(),
"FindAllByUuid",
session._get_vim().get_service_content().searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance['uuid']
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance['name']))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_from_id(session, host_id, property_list=None):
"""Get a host reference object for a host_id string."""
if property_list is None:
property_list = ['name']
host_refs = session._call_method(
vim_util, "get_objects",
"HostSystem", property_list)
return _get_object_from_results(session, host_refs, host_id,
_get_reference_for_value)
def get_host_id_from_vm_ref(session, vm_ref):
"""This method allows you to find the managed object
ID of the host running a VM. Since vMotion can
change the value, you should not presume that this
is a value that you can cache for very long and
should be prepared to allow for it to change.
:param session: a vSphere API connection
:param vm_ref: a reference object to the running VM
:return: the host_id running the virtual machine
"""
# to prevent typographical errors below
property_name = 'runtime.host'
# a property collector in VMware vSphere Management API
# is a set of local representations of remote values.
# property_set here, is a local representation of the
# properties we are querying for.
property_set = session._call_method(
vim_util, "get_object_properties",
None, vm_ref, vm_ref._type, [property_name])
prop = property_from_property_set(
property_name, property_set)
if prop is not None:
prop = prop.val.value
else:
# reaching here represents an impossible state
raise RuntimeError(
"Virtual Machine %s exists without a runtime.host!"
% (vm_ref))
return prop
def property_from_property_set(property_name, property_set):
'''Use this method to filter property collector results.
Because network traffic is expensive, multiple
VMwareAPI calls will sometimes pile-up properties
to be collected. That means results may contain
many different values for multiple purposes.
This helper will filter a list for a single result
and filter the properties of that result to find
the single value of whatever type resides in that
result. This could be a ManagedObjectReference ID
or a complex value.
:param property_name: name of property you want
:param property_set: all results from query
:return: the value of the property.
'''
for prop in property_set.objects:
p = _property_from_propSet(prop.propSet, property_name)
if p is not None:
return p
def _property_from_propSet(propSet, name='name'):
for p in propSet:
if p.name == name:
return p
def get_host_ref_for_vm(session, instance, props):
"""Get the ESXi host running a VM by its name."""
vm_ref = get_vm_ref(session, instance)
host_id = get_host_id_from_vm_ref(session, vm_ref)
return get_host_ref_from_id(session, host_id, props)
def get_host_name_for_vm(session, instance):
"""Get the ESXi host running a VM by its name."""
host_ref = get_host_ref_for_vm(session, instance, ['name'])
return get_host_name_from_host_ref(host_ref)
def get_host_name_from_host_ref(host_ref):
p = _property_from_propSet(host_ref.propSet)
if p is not None:
return p.val
def get_vm_state_from_name(session, vm_name):
vm_ref = get_vm_ref_from_name(session, vm_name)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if (runtime_summary.inMaintenanceMode is False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
_cancel_retrieve_if_necessary(session, results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
# TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
volume_uuid in device.backing.fileName):
return device.backing.uuid
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster, node_mo_id):
"""Get the resource pool."""
if cluster is None:
# With no cluster named, use the root resource pool.
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
# The 0th resource pool is always the root resource pool on both ESX
# and vCenter.
res_pool_ref = results.objects[0].obj
else:
if cluster.value == node_mo_id:
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get cluster references %s") % excep)
def get_all_res_pool_mors(session):
"""Get all the resource pools in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
def get_dynamic_property_mor(session, mor_ref, attribute):
"""Get the value of an attribute for a given managed object."""
return session._call_method(vim_util, "get_dynamic_property",
mor_ref, mor_ref._type, attribute)
def find_entity_mor(entity_list, entity_name):
"""Returns managed object ref for given cluster or resource pool name."""
return [mor for mor in entity_list if (hasattr(mor, 'propSet') and
mor.propSet[0].val == entity_name)]
def get_all_cluster_refs_by_name(session, path_list):
"""Get reference to the Cluster, ResourcePool with the path specified.
The path is the display name. This can be the full path as well.
The input will have the list of clusters and resource pool names
"""
cls = get_all_cluster_mors(session)
if not cls:
return
res = get_all_res_pool_mors(session)
if not res:
return
path_list = [path.strip() for path in path_list]
list_obj = []
for entity_path in path_list:
# entity_path could be unique cluster and/or resource-pool name
res_mor = find_entity_mor(res, entity_path)
cls_mor = find_entity_mor(cls, entity_path)
cls_mor.extend(res_mor)
for mor in cls_mor:
list_obj.append((mor.obj, mor.propSet[0].val))
return get_dict_mor(session, list_obj)
def get_dict_mor(session, list_obj):
"""The input is a list of objects in the form
(manage_object,display_name)
The managed object will be in the form
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
| dict_mors = {
| 'respool-1001': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| 'domain-1002': { 'cluster_mor': clusterMor,
| 'res_pool_mor': resourcePoolMor,
| 'name': display_name },
| }
"""
dict_mors = {}
for obj_ref, path in list_obj:
if obj_ref._type == "ResourcePool":
# Get owner cluster-ref mor
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
'res_pool_mor': obj_ref,
'name': path,
}
else:
# Get default resource pool of the cluster
res_pool_ref = get_dynamic_property_mor(session,
obj_ref, "resourcePool")
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
'res_pool_mor': res_pool_ref,
'name': path,
}
return dict_mors
def get_mo_id_from_instance(instance):
"""Return the managed object ID from the instance.
The instance['node'] will have the hypervisor_hostname field of the
compute node on which the instance exists or will be provisioned.
This will be of the form
'respool-1001(MyResPoolName)'
'domain-1001(MyClusterName)'
"""
return instance['node'].partition('(')[0]
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
vmdk_adapter_type = constants.DEFAULT_ADAPTER_TYPE
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session._get_vim(),
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
task_info = session._wait_for_task(vm_create_task)
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
virtual_disk_path, size_in_kb):
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the data store",
{"vmdk_file_size_in_kb": size_in_kb,
"adapter_type": adapter_type})
vmdk_create_spec = get_vmdk_create_spec(
session._get_vim().client.factory,
size_in_kb,
adapter_type,
disk_type)
vmdk_create_task = session._call_method(
session._get_vim(),
"CreateVirtualDisk_Task",
session._get_vim().get_service_content().virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_ref,
spec=vmdk_create_spec)
session._wait_for_task(vmdk_create_task)
LOG.debug("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s",
{"vmdk_file_size_in_kb": size_in_kb,
"disk_type": disk_type})
def copy_virtual_disk(session, dc_ref, source, dest, copy_spec=None):
"""Copy a sparse virtual disk to a thin virtual disk. This is also
done to generate the meta-data file whose specifics
depend on the size of the disk, thin/thick provisioning and the
storage adapter type.
:param session: - session for connection
:param dc_ref: - data center reference object
:param source: - source datastore path
:param dest: - destination datastore path
:param copy_spec: - the copy specification
"""
LOG.debug("Copying Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
vim = session._get_vim()
vmdk_copy_task = session._call_method(
vim,
"CopyVirtualDisk_Task",
vim.get_service_content().virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_ref,
destName=dest,
destSpec=copy_spec)
session._wait_for_task(vmdk_copy_task)
LOG.debug("Copied Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
def reconfigure_vm(session, vm_ref, config_spec):
"""Reconfigure a VM according to the config spec."""
reconfig_task = session._call_method(session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=config_spec)
session._wait_for_task(reconfig_task)
def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
vmfolder_ref):
"""Clone VM and link the cloned VM to the instance.
Clones the passed vm_ref into a new VM and links the cloned vm to
the passed instance.
"""
if vm_ref is None:
LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called "
"with vm_ref=None"))
raise error_util.MissingParameter(param="vm_ref")
# Get the clone vm spec
client_factory = session._get_vim().client.factory
rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref,
disk_move_type='moveAllDiskBackingsAndDisallowSharing')
extra_opts = {'nvp.vm-uuid': instance['uuid']}
config_spec = get_vm_extra_config_spec(client_factory, extra_opts)
config_spec.instanceUuid = instance['uuid']
clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec)
# Clone VM on ESX host
LOG.debug("Cloning VM for instance %s", instance['uuid'],
instance=instance)
vm_clone_task = session._call_method(session._get_vim(), "CloneVM_Task",
vm_ref, folder=vmfolder_ref,
name=instance['uuid'],
spec=clone_spec)
session._wait_for_task(vm_clone_task)
LOG.debug("Cloned VM for instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def disassociate_vmref_from_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Disassociates the VM linked to the instance.
Disassociates the VM linked to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]+suffix
2. Rename the VM to be instance[uuid]+suffix instead
3. Reset the instanceUUID of the VM to a new generated value
"""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
extra_opts = {'nvp.vm-uuid': instance['uuid'] + suffix}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid'] + suffix
reconfig_spec.instanceUuid = ''
LOG.debug("Disassociating VM from instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Disassociated VM from instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def associate_vmref_for_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Associates the VM to the instance.
Associates the VM to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]
2. Rename the VM to be instance[uuid]
3. Reset the instanceUUID of the VM to be instance[uuid]
"""
if vm_ref is None:
vm_ref = search_vm_ref_by_identifier(session,
instance['uuid'] + suffix)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid']
+ suffix)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid']
reconfig_spec.instanceUuid = instance['uuid']
LOG.debug("Associating VM to instance %s", instance['uuid'],
instance=instance)
reconfigure_vm(session, vm_ref, reconfig_spec)
LOG.debug("Associated VM to instance %s", instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def power_on_instance(session, instance, vm_ref=None):
"""Power on the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering on the VM", instance=instance)
try:
poweron_task = session._call_method(
session._get_vim(),
"PowerOnVM_Task", vm_ref)
session._wait_for_task(poweron_task)
LOG.debug("Powered on the VM", instance=instance)
except error_util.InvalidPowerStateException:
LOG.debug("VM already powered on", instance=instance)
def get_values_from_object_properties(session, props):
"""Get the specific values from a object list.
The object values will be returned as a dictionary.
"""
dictionary = {}
while props:
for elem in props.objects:
propdict = propset_dict(elem.propSet)
dictionary.update(propdict)
token = _get_token(props)
if not token:
break
props = session._call_method(vim_util,
"continue_to_get_objects",
token)
return dictionary
def _get_vm_port_indices(session, vm_ref):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
ports = []
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value != 'free'):
ports.append(int(option.key.split('.')[2]))
return ports
def get_attach_port_index(session, vm_ref):
"""Get the first free port index."""
ports = _get_vm_port_indices(session, vm_ref)
# No ports are configured on the VM
if not ports:
return 0
ports.sort()
configured_ports_len = len(ports)
# Find the first free port index
for port_index in range(configured_ports_len):
if port_index != ports[port_index]:
return port_index
return configured_ports_len
def get_vm_detach_port_index(session, vm_ref, iface_id):
extra_config = session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
'config.extraConfig')
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value == iface_id):
return int(option.key.split('.')[2])
def power_off_instance(session, instance, vm_ref=None):
"""Power off the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering off the VM", instance=instance)
try:
poweroff_task = session._call_method(session._get_vim(),
"PowerOffVM_Task", vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug("Powered off the VM", instance=instance)
except error_util.InvalidPowerStateException:
LOG.debug("VM already powered off", instance=instance)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Show'
db.create_table(u'spa_show', (
(u'event_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['schedule.Event'], unique=True, primary_key=True)),
))
db.send_create_signal('spa', ['Show'])
def backwards(self, orm):
# Deleting model 'Show'
db.delete_table(u'spa_show')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'schedule.calendar': {
'Meta': {'object_name': 'Calendar'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'})
},
'schedule.event': {
'Meta': {'object_name': 'Event'},
'calendar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['schedule.Calendar']", 'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'creator'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {}),
'end_recurring_period': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['schedule.Rule']", 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'schedule.rule': {
'Meta': {'object_name': 'Rule'},
'description': ('django.db.models.fields.TextField', [], {}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'params': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.activity': {
'Meta': {'object_name': 'Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']", 'null': 'True', 'blank': 'True'})
},
'spa.activitycomment': {
'Meta': {'object_name': 'ActivityComment', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_comments'", 'to': "orm['spa.Mix']"})
},
'spa.activitydownload': {
'Meta': {'object_name': 'ActivityDownload', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_downloads'", 'to': "orm['spa.Mix']"})
},
'spa.activityfavourite': {
'Meta': {'object_name': 'ActivityFavourite', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_favourites'", 'to': "orm['spa.Mix']"})
},
'spa.activityfollow': {
'Meta': {'object_name': 'ActivityFollow', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_follow'", 'to': "orm['spa.UserProfile']"})
},
'spa.activitylike': {
'Meta': {'object_name': 'ActivityLike', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_likes'", 'to': "orm['spa.Mix']"})
},
'spa.activityplay': {
'Meta': {'object_name': 'ActivityPlay', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_plays'", 'to': "orm['spa.Mix']"})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'spa.genre': {
'Meta': {'object_name': 'Genre'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'spa.label': {
'Meta': {'object_name': 'Label'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa.mix': {
'Meta': {'object_name': 'Mix'},
'description': ('django.db.models.fields.TextField', [], {}),
'download_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'favourites': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'favourites'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['spa.UserProfile']"}),
'filetype': ('django.db.models.fields.CharField', [], {'default': "'mp3'", 'max_length': '10'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['spa.Genre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'likes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['spa.UserProfile']"}),
'mix_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'blank': 'True'}),
'mp3tags_updated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '38', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 4, 29, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mixes'", 'to': "orm['spa.UserProfile']"}),
'waveform_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'spa.notification': {
'Meta': {'object_name': 'Notification'},
'accepted_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notifications'", 'null': 'True', 'to': "orm['spa.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_html': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'notification_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'notification_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_notications'", 'to': "orm['spa.UserProfile']"}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
'spa.purchaselink': {
'Meta': {'object_name': 'PurchaseLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_link'", 'to': "orm['spa.Tracklist']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'spa.recurrence': {
'Meta': {'object_name': 'Recurrence', '_ormbases': ['spa._Lookup']},
u'_lookup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Lookup']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.release': {
'Meta': {'object_name': 'Release'},
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'release_artist': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 4, 29, 0, 0)'}),
'release_description': ('django.db.models.fields.TextField', [], {}),
'release_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'release_label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Label']"}),
'release_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"})
},
'spa.releaseaudio': {
'Meta': {'object_name': 'ReleaseAudio'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_audio'", 'null': 'True', 'to': "orm['spa.Release']"})
},
'spa.show': {
'Meta': {'object_name': 'Show', '_ormbases': ['schedule.Event']},
u'event_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['schedule.Event']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.tracklist': {
'Meta': {'object_name': 'Tracklist'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.SmallIntegerField', [], {}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracklist'", 'to': "orm['spa.Mix']"}),
'remixer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timeindex': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'spa.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_sharing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'activity_sharing_networks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'social'", 'max_length': '15'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['spa.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_known_session': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'spa.venue': {
'Meta': {'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'venue_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['spa']
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module is for internal use only; no backwards-compatibility guarantees.
The classes in this file keep shared state, and organize metrics information.
Available classes:
- MetricKey - Internal key for a metric.
- MetricResult - Current status of a metric's updates/commits.
- _MetricsEnvironment - Keeps track of MetricsContainer and other metrics
information for every single execution working thread.
- MetricsContainer - Holds the metrics of a single step and a single
unit-of-commit (bundle).
"""
# pytype: skip-file
from __future__ import absolute_import
from builtins import object
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.cells import CounterCell
from apache_beam.metrics.cells import DistributionCell
from apache_beam.metrics.cells import GaugeCell
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.statesampler import get_current_tracker
class MetricKey(object):
"""Key used to identify instance of metric cell.
Metrics are internally keyed by the name of the step they're associated with,
the name and namespace (if it is a user defined metric) of the metric,
and any extra label metadata added by the runner specific metric collection
service.
"""
def __init__(self, step, metric, labels=None):
"""Initializes ``MetricKey``.
Args:
step: A string with the step this metric cell is part of.
metric: A ``MetricName`` namespace+name that identifies a metric.
labels: An arbitrary set of labels that also identifies the metric.
"""
self.step = step
self.metric = metric
self.labels = labels if labels else dict()
def __eq__(self, other):
return (
self.step == other.step and self.metric == other.metric and
self.labels == other.labels)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.step, self.metric, frozenset(self.labels)))
def __repr__(self):
return 'MetricKey(step={}, metric={}, labels={})'.format(
self.step, self.metric, self.labels)
class MetricResult(object):
"""Keeps track of the status of a metric within a single bundle.
It contains the physical and logical updates to the metric. Physical updates
are updates that have not necessarily been committed, but that have been made
during pipeline execution. Logical updates are updates that have been
committed.
Attributes:
key: A ``MetricKey`` that identifies the metric and bundle of this result.
committed: The committed updates of the metric. This attribute's type is
of metric type result (e.g. int, DistributionResult, GaugeResult).
attempted: The logical updates of the metric. This attribute's type is that
of metric type result (e.g. int, DistributionResult, GaugeResult).
"""
def __init__(self, key, committed, attempted):
"""Initializes ``MetricResult``.
Args:
key: A ``MetricKey`` object.
committed: Metric data that has been committed (e.g. logical updates)
attempted: Metric data that has been attempted (e.g. physical updates)
"""
self.key = key
self.committed = committed
self.attempted = attempted
def __eq__(self, other):
return (
self.key == other.key and self.committed == other.committed and
self.attempted == other.attempted)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.key, self.committed, self.attempted))
def __repr__(self):
return 'MetricResult(key={}, committed={}, attempted={})'.format(
self.key, str(self.committed), str(self.attempted))
def __str__(self):
return repr(self)
@property
def result(self):
"""Short-hand for falling back to attempted metrics if it seems that
committed was not populated (e.g. due to not being supported on a given
runner"""
return self.committed if self.committed else self.attempted
class _MetricsEnvironment(object):
"""Holds the MetricsContainer for every thread and other metric information.
This class is not meant to be instantiated, instead being used to keep
track of global state.
"""
def current_container(self):
"""Returns the current MetricsContainer."""
sampler = statesampler.get_current_tracker()
if sampler is None:
return None
return sampler.current_state().metrics_container
MetricsEnvironment = _MetricsEnvironment()
class _TypedMetricName(object):
"""Like MetricName, but also stores the cell type of the metric."""
def __init__(self, cell_type, metric_name):
self.cell_type = cell_type
self.metric_name = metric_name
if isinstance(metric_name, str):
self.fast_name = metric_name
else:
self.fast_name = '%d_%s%s' % (
len(metric_name.name), metric_name.name, metric_name.namespace)
# Cached for speed, as this is used as a key for every counter update.
self._hash = hash((cell_type, self.fast_name))
def __eq__(self, other):
return self is other or (
self.cell_type == other.cell_type and self.fast_name == other.fast_name)
def __ne__(self, other):
return not self == other
def __hash__(self):
return self._hash
def __reduce__(self):
return _TypedMetricName, (self.cell_type, self.metric_name)
_DEFAULT = None
class MetricUpdater(object):
"""A callable that updates the metric as quickly as possible."""
def __init__(self, cell_type, metric_name, default=None):
self.typed_metric_name = _TypedMetricName(cell_type, metric_name)
self.default = default
def __call__(self, value=_DEFAULT):
if value is _DEFAULT:
if self.default is _DEFAULT:
raise ValueError('Missing value for update of %s' % self.metric_name)
value = self.default
tracker = get_current_tracker()
if tracker is not None:
tracker.update_metric(self.typed_metric_name, value)
def __reduce__(self):
return MetricUpdater, (
self.typed_metric_name.cell_type,
self.typed_metric_name.metric_name,
self.default)
class MetricsContainer(object):
"""Holds the metrics of a single step and a single bundle."""
def __init__(self, step_name):
self.step_name = step_name
self.metrics = dict()
def get_counter(self, metric_name):
return self.get_metric_cell(_TypedMetricName(CounterCell, metric_name))
def get_distribution(self, metric_name):
return self.get_metric_cell(_TypedMetricName(DistributionCell, metric_name))
def get_gauge(self, metric_name):
return self.get_metric_cell(_TypedMetricName(GaugeCell, metric_name))
def get_metric_cell(self, typed_metric_name):
cell = self.metrics.get(typed_metric_name, None)
if cell is None:
cell = self.metrics[typed_metric_name] = typed_metric_name.cell_type()
return cell
def get_cumulative(self):
"""Return MetricUpdates with cumulative values of all metrics in container.
This returns all the cumulative values for all metrics.
"""
counters = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == CounterCell
}
distributions = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == DistributionCell
}
gauges = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == GaugeCell
}
return MetricUpdates(counters, distributions, gauges)
def to_runner_api(self):
return [
cell.to_runner_api_user_metric(key.metric_name) for key,
cell in self.metrics.items()
]
def to_runner_api_monitoring_infos(self, transform_id):
"""Returns a list of MonitoringInfos for the metrics in this container."""
all_user_metrics = [
cell.to_runner_api_monitoring_info(key.metric_name, transform_id)
for key,
cell in self.metrics.items()
]
return {monitoring_infos.to_key(mi): mi for mi in all_user_metrics}
def reset(self):
for metric in self.metrics.values():
metric.reset()
def __reduce__(self):
raise NotImplementedError
class MetricUpdates(object):
"""Contains updates for several metrics.
A metric update is an object containing information to update a metric.
For Distribution metrics, it is DistributionData, and for Counter metrics,
it's an int.
"""
def __init__(self, counters=None, distributions=None, gauges=None):
"""Create a MetricUpdates object.
Args:
counters: Dictionary of MetricKey:MetricUpdate updates.
distributions: Dictionary of MetricKey:MetricUpdate objects.
gauges: Dictionary of MetricKey:MetricUpdate objects.
"""
self.counters = counters or {}
self.distributions = distributions or {}
self.gauges = gauges or {}
|
|
# Sapy
# Copyright (C) 2018 stefano prina <[email protected]> <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sapy_modules.sapy.mom import Mom
import sapy_modules.core.db as db_iface
import sapy_modules.commands.setter.set_end as se
from sapy_modules.core import LoggerFactory
import datetime,csv
CREATE_TABLE_LOM= """
CREATE TABLE "loms" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"name" TEXT NOT NULL UNIQUE,
"visible" INTEGER NOT NULL DEFAULT 0,
"color" TEXT NOT NULL DEFAULT "red"
) """
CREATE_TABLE_MOM_IN_LOM = """
CREATE TABLE "mom_in_lom" (
"mom_id" INTEGER NOT NULL,
"lom_id" INTEGER NOT NULL,
PRIMARY KEY("lom_id","mom_id")
) """
INSERT_LOM = "insert into loms (name,visible,color) values (?,?,?)"
GET_LAST_LOM = "select id from loms order by id DESC ;"
LINK_MOM_TO_LOM ="insert into mom_in_lom (mom_id,lom_id) values ( ?, ?)"
DELETE_LOM = "delete from loms where id = ?"
GET_MOMS_0 = """
SELECT * FROM (
SELECT id,value,cause,date,lom_id
FROM moms INNER join mom_in_lom on moms.id = mom_in_lom.mom_id
)
where lom_id = ?
"""
GET_MOMS_1 = """
SELECT * FROM (
SELECT id,value,cause,date,lom_id
FROM moms INNER join mom_in_lom on moms.id = mom_in_lom.mom_id
)
where lom_id = ? and date <= ?
"""
GET_MOMS_2 = """
SELECT * FROM (
SELECT id,value,cause,date,lom_id
FROM moms INNER join mom_in_lom on moms.id = mom_in_lom.mom_id
)
where lom_id = ? and date >= ?
"""
GET_MOMS_3 = """
SELECT * FROM (
SELECT id,value,cause,date,lom_id
FROM moms INNER join mom_in_lom on moms.id = mom_in_lom.mom_id
)
where lom_id = ? and date >= ? and date <= ?
"""
GET_MOM = "SELECT * from moms where moms.id = ? ;"
GET_LOM_BY_NAME = 'select * from loms where `name` == ? '
GET_LOM_BY_ID = 'select * from loms where `id` == ? '
GET_ALL_LOMS = """SELECT * FROM loms"""
CREATE_DEFAULT_LOMS = """
INSERT INTO "loms"
("id","name","visible","color") VALUES
(1,'real','1','green'),
(2,'expected','1','red')
;"""
UPDATE_LOM_VISIBLE ="UPDATE loms SET `visible`=? WHERE id=?;"
UPDATE_LOM_NAME ="UPDATE loms SET `name`=? WHERE id=?;"
UPDATE_LOM_COLOR ="UPDATE loms SET `color`=? WHERE id=?;"
GET_LOM_VISIBLE ="SELECT visible FROM loms WHERE id=?;"
SET_TAB_VERSION = """INSERT INTO "app_meta" ("key","value") VALUES ("lom_tab_version",?)"""
TAB_VERSION = 1
def create_tables():
cur = db_iface.get_cursor()
cur.execute(CREATE_TABLE_LOM)
cur.execute(CREATE_TABLE_MOM_IN_LOM)
cur.execute(CREATE_DEFAULT_LOMS)
cur.execute(SET_TAB_VERSION,(TAB_VERSION,))
db_iface.commit()
cur.close()
class Lom(object): # list of movements
def __init__(
self,
id=None,
name="list of movements",
visible=False,
color="black"
):
self.logger = LoggerFactory.getLogger( str( self.__class__ ))
self.name = name
self.visible = visible
self.color = color
if id == None:
cur = db_iface.get_cursor()
cur.execute(INSERT_LOM,(name,visible,color))
cur.execute(GET_LAST_LOM)
self.id = cur.fetchone()[0]
db_iface.commit()
cur.close()
else:
self.id = id
def add(self, mlist):
cur = db_iface.get_cursor()
for m in mlist:
cur.execute(LINK_MOM_TO_LOM,(m.id, self.id, ))
db_iface.commit()
cur.close()
def delete(self):
#TODO : before delete the lom delete all the moms linked
cur = db_iface.get_cursor()
cur.execute(DELETE_LOM,(self.id, ))
db_iface.commit()
cur.close()
self.name = None
self.id = None
def get_moms(
self,
start_date=datetime.datetime.today().date(),
end_date=datetime.datetime.today().date()
):
mlist = []
cur = db_iface.get_cursor()
if start_date is None and end_date is None:
cur.execute(GET_MOMS_0, (self.id, ) )
elif start_date is None and end_date is not None:
cur.execute(GET_MOMS_1, (self.id,end_date.strftime('%Y-%m-%d') ) )
elif start_date is not None and end_date is None:
cur.execute(GET_MOMS_2, (self.id,start_date.strftime('%Y-%m-%d') ) )
elif start_date is not None and end_date is not None:
cur.execute(GET_MOMS_3, (self.id, start_date.strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d') ) )
for raw in cur.fetchall():
raw_year = raw[3].split('-')[0]
raw_month = raw[3].split('-')[1]
raw_day = raw[3].split('-')[2]
mom = Mom(id=raw[0], value=raw[1], cause=raw[2], year=raw_year, month=raw_month, day=raw_day)
mlist.append(mom)
cur.close()
return mlist
def get_mom(self,id):
cur = db_iface.get_cursor()
cur.execute(GET_MOM,( id, ))
raw = cur.fetchone()
raw_year = raw[3].split('-')[0]
raw_month = raw[3].split('-')[1]
raw_day = raw[3].split('-')[2]
return Mom(id=raw[0], value=raw[1], cause=raw[2], year=raw_year, month=raw_month, day=raw_day)
def csv_import(self,csv_file):
mom_list = []
#m_dialect = csv.Dialect()
#m_dialect.delimiter=";"
with open(str(csv_file.get_path()),'r') as data_file:
data = csv.DictReader(data_file, fieldnames=[
'cause',
'value',
'day',
'month',
'year'
])
#data = csv.DictReader(data_file, fieldnames=[
# 'cause',
# 'value',
# 'date'
#], dialect=m_dialect)
for raw_mom in data:
#raw_year = raw_mom['date'].split('.')[2]
#raw_month = raw_mom['date'].split('.')[1]
#raw_day = raw_mom['date'].split('.')[0]
#mom_list.append( Mom(
# cause= raw_mom['cause'],
# value= raw_mom['value'],
# year=raw_year,
# month=raw_month,
# day=raw_day
#)
mom_list.append(
Mom(
cause= raw_mom['cause'],
value= raw_mom['value'],
year=raw_mom['year'],
month=raw_mom['month'],
day=raw_mom['day']
)
)
self.add(mom_list)
def balance(self, start_date=None, end_date=None):
balance = 0
for m in self.get_moms(start_date=start_date,end_date=end_date):
balance += m.value
return balance
def balance_per_day(self, start_date=None, end_date=None):
base_balance = self.balance(end_date=start_date)
moms = self.get_moms(start_date=start_date,end_date=end_date)
dates = []
values = []
min_date = datetime.date(datetime.MAXYEAR,1,1)
max_date = datetime.date(datetime.MINYEAR,12,31)
for mom in moms:
if mom.time < min_date:
min_date = mom.time
if mom.time > max_date:
max_date = mom.time
time_delta = datetime.timedelta(days=1)
while min_date <= max_date:
day_balance = base_balance
for mom in self.get_moms(start_date=min_date,end_date=min_date):
day_balance += mom.value
dates.append(min_date)
values.append(day_balance)
min_date += time_delta
base_balance = day_balance
return (dates,values)
def set_visible(self,value):
self.visible = value
cur = db_iface.get_cursor()
if self.visible :
cur.execute(UPDATE_LOM_VISIBLE,(1,self.id))
else:
cur.execute(UPDATE_LOM_VISIBLE,(0,self.id))
db_iface.commit()
def set_name(self,value):
self.name = value
cur = db_iface.get_cursor()
cur.execute(UPDATE_LOM_NAME,(self.name,self.id))
db_iface.commit()
def set_color(self,value):
self.color = value
cur = db_iface.get_cursor()
cur.execute(UPDATE_LOM_COLOR,(self.color,self.id))
db_iface.commit()
def get_lom(name=None, id=None):
cur = db_iface.get_cursor()
if name:
cur.execute(GET_LOM_BY_NAME,(name,))
elif id:
cur.execute(GET_LOM_BY_ID,(id,))
res = cur.fetchone()
cur.close()
return Lom(res[0], res[1])
def get_loms():
llist=[]
cur = db_iface.get_cursor()
cur.execute(GET_ALL_LOMS)
for l in cur.fetchall():
if l[2] == 1:
llist.append(Lom(l[0], l[1], True, l[3]))
else:
llist.append(Lom(l[0], l[1], False, l[3]))
cur.close()
return llist
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.