repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rspavel/spack | var/spack/repos/builtin/packages/hbase/package.py | 3 | 1402 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hbase(Package):
"""Apache HBase is an open-source, distributed, versioned, column-oriented
store modeled after Google' Bigtable: A Distributed Storage System for
Structured Data by Chang et al. Just as Bigtable leverages the distributed
data storage provided by the Google File System, HBase provides
Bigtable-like capabilities on top of Apache Hadoop."""
homepage = "https://archive.apache.org/"
url = "https://archive.apache.org/dist/hbase/2.2.4/hbase-2.2.4-bin.tar.gz"
list_url = "https://archive.apache.org/dist/hbase"
list_depth = 1
version('2.2.5', sha256='25d08f8f038d9de5beb43dfb0392e8a8b34eae7e0f2670d6c2c172abc3855194')
version('2.2.4', sha256='ec91b628352931e22a091a206be93061b6bf5364044a28fb9e82f0023aca3ca4')
version('2.2.3', sha256='ea8fa72aa6220e038e30bd7c439d181b10bd7225383f7f2d224ebb5f5397310a')
version('2.2.2', sha256='97dcca3a031925a379a0ee6bbfb6007533fb4fdb982c23345e5fc04d6c52bebc')
version('2.1.8', sha256='d8296e8405b1c39c73f0dd03fc6b4d2af754035724168fd56e8f2a0ff175ad90')
depends_on('java@8', type='run')
def install(self, spec, prefix):
install_tree('.', prefix)
| lgpl-2.1 |
kaphka/catconv | convert.py | 1 | 1091 | import argparse
import signal
from tqdm import tqdm
import catconv.operations as co
import catconv.stabi as sb
exit = False
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
exit = True
parser = argparse.ArgumentParser()
parser.add_argument("source")
parser.add_argument("target")
parser.add_argument("-u", "--update", help="overwrite previous results",
action="store_true")
args = parser.parse_args()
source = sb.op.normpath(args.source)
target = sb.op.normpath(args.target)
data_dir, target_cat_name = sb.op.split(target)
pages = map(sb.page_from_path, sb.catalog_pages(source,ext=".tif"))
print("Source catalog:")
print("path:", source)
print("pages:", len(pages))
conversion = {"ext": ".jpg", "remove_type": True, "to_cat": data_dir,"cat": target_cat_name}
from_to = [(page, sb.convert_page_path(page, conversion)) for page in pages]
for ft in tqdm(from_to):
if exit:
break
from_page, to_page = ft
if sb.op.isfile(to_page['path']) and not args.update:
continue
else:
co.convert_to_png(*ft)
| apache-2.0 |
supersven/intellij-community | python/helpers/coverage/data.py | 209 | 9188 | """Coverage data for Coverage."""
import os
from coverage.backward import iitems, pickle, sorted # pylint: disable=W0622
from coverage.files import PathAliases
from coverage.misc import file_be_gone
class CoverageData(object):
"""Manages collected coverage data, including file storage.
The data file format is a pickled dict, with these keys:
* collector: a string identifying the collecting software
* lines: a dict mapping filenames to sorted lists of line numbers
executed:
{ 'file1': [17,23,45], 'file2': [1,2,3], ... }
* arcs: a dict mapping filenames to sorted lists of line number pairs:
{ 'file1': [(17,23), (17,25), (25,26)], ... }
"""
def __init__(self, basename=None, collector=None, debug=None):
"""Create a CoverageData.
`basename` is the name of the file to use for storing data.
`collector` is a string describing the coverage measurement software.
`debug` is a `DebugControl` object for writing debug messages.
"""
self.collector = collector or 'unknown'
self.debug = debug
self.use_file = True
# Construct the filename that will be used for data file storage, if we
# ever do any file storage.
self.filename = basename or ".coverage"
self.filename = os.path.abspath(self.filename)
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed:
#
# {
# 'filename1.py': { 12: None, 47: None, ... },
# ...
# }
#
self.lines = {}
# A map from canonical Python source file name to a dictionary with an
# entry for each pair of line numbers forming an arc:
#
# {
# 'filename1.py': { (12,14): None, (47,48): None, ... },
# ...
# }
#
self.arcs = {}
def usefile(self, use_file=True):
"""Set whether or not to use a disk file for data."""
self.use_file = use_file
def read(self):
"""Read coverage data from the coverage data file (if it exists)."""
if self.use_file:
self.lines, self.arcs = self._read_file(self.filename)
else:
self.lines, self.arcs = {}, {}
def write(self, suffix=None):
"""Write the collected coverage data to a file.
`suffix` is a suffix to append to the base file name. This can be used
for multiple or parallel execution, so that many coverage data files
can exist simultaneously. A dot will be used to join the base name and
the suffix.
"""
if self.use_file:
filename = self.filename
if suffix:
filename += "." + suffix
self.write_file(filename)
def erase(self):
"""Erase the data, both in this object, and from its file storage."""
if self.use_file:
if self.filename:
file_be_gone(self.filename)
self.lines = {}
self.arcs = {}
def line_data(self):
"""Return the map from filenames to lists of line numbers executed."""
return dict(
[(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)]
)
def arc_data(self):
"""Return the map from filenames to lists of line number pairs."""
return dict(
[(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)]
)
def write_file(self, filename):
"""Write the coverage data to `filename`."""
# Create the file data.
data = {}
data['lines'] = self.line_data()
arcs = self.arc_data()
if arcs:
data['arcs'] = arcs
if self.collector:
data['collector'] = self.collector
if self.debug and self.debug.should('dataio'):
self.debug.write("Writing data to %r" % (filename,))
# Write the pickle to the file.
fdata = open(filename, 'wb')
try:
pickle.dump(data, fdata, 2)
finally:
fdata.close()
def read_file(self, filename):
"""Read the coverage data from `filename`."""
self.lines, self.arcs = self._read_file(filename)
def raw_data(self, filename):
"""Return the raw pickled data from `filename`."""
if self.debug and self.debug.should('dataio'):
self.debug.write("Reading data from %r" % (filename,))
fdata = open(filename, 'rb')
try:
data = pickle.load(fdata)
finally:
fdata.close()
return data
def _read_file(self, filename):
"""Return the stored coverage data from the given file.
Returns two values, suitable for assigning to `self.lines` and
`self.arcs`.
"""
lines = {}
arcs = {}
try:
data = self.raw_data(filename)
if isinstance(data, dict):
# Unpack the 'lines' item.
lines = dict([
(f, dict.fromkeys(linenos, None))
for f, linenos in iitems(data.get('lines', {}))
])
# Unpack the 'arcs' item.
arcs = dict([
(f, dict.fromkeys(arcpairs, None))
for f, arcpairs in iitems(data.get('arcs', {}))
])
except Exception:
pass
return lines, arcs
def combine_parallel_data(self, aliases=None):
"""Combine a number of data files together.
Treat `self.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
aliases = aliases or PathAliases()
data_dir, local = os.path.split(self.filename)
localdot = local + '.'
for f in os.listdir(data_dir or '.'):
if f.startswith(localdot):
full_path = os.path.join(data_dir, f)
new_lines, new_arcs = self._read_file(full_path)
for filename, file_data in iitems(new_lines):
filename = aliases.map(filename)
self.lines.setdefault(filename, {}).update(file_data)
for filename, file_data in iitems(new_arcs):
filename = aliases.map(filename)
self.arcs.setdefault(filename, {}).update(file_data)
if f != local:
os.remove(full_path)
def add_line_data(self, line_data):
"""Add executed line data.
`line_data` is { filename: { lineno: None, ... }, ...}
"""
for filename, linenos in iitems(line_data):
self.lines.setdefault(filename, {}).update(linenos)
def add_arc_data(self, arc_data):
"""Add measured arc data.
`arc_data` is { filename: { (l1,l2): None, ... }, ...}
"""
for filename, arcs in iitems(arc_data):
self.arcs.setdefault(filename, {}).update(arcs)
def touch_file(self, filename):
"""Ensure that `filename` appears in the data, empty if needed."""
self.lines.setdefault(filename, {})
def measured_files(self):
"""A list of all files that had been measured."""
return list(self.lines.keys())
def executed_lines(self, filename):
"""A map containing all the line numbers executed in `filename`.
If `filename` hasn't been collected at all (because it wasn't executed)
then return an empty map.
"""
return self.lines.get(filename) or {}
def executed_arcs(self, filename):
"""A map containing all the arcs executed in `filename`."""
return self.arcs.get(filename) or {}
def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the Md5Hash `hasher`."""
hasher.update(self.executed_lines(filename))
hasher.update(self.executed_arcs(filename))
def summary(self, fullpath=False):
"""Return a dict summarizing the coverage data.
Keys are based on the filenames, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
else:
filename_fn = os.path.basename
for filename, lines in iitems(self.lines):
summ[filename_fn(filename)] = len(lines)
return summ
def has_arcs(self):
"""Does this data have arcs?"""
return bool(self.arcs)
if __name__ == '__main__':
# Ad-hoc: show the raw data in a data file.
import pprint, sys
covdata = CoverageData()
if sys.argv[1:]:
fname = sys.argv[1]
else:
fname = covdata.filename
pprint.pprint(covdata.raw_data(fname))
| apache-2.0 |
DavidNorman/tensorflow | tensorflow/python/ops/image_grad.py | 5 | 15565 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains Gradient functions for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("ResizeNearestNeighbor")
def _ResizeNearestNeighborGrad(op, grad):
"""The derivatives for nearest neighbor resizing.
Args:
op: The ResizeNearestNeighbor op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input and the output.
"""
image = op.inputs[0]
if image.get_shape()[1:3].is_fully_defined():
image_shape = image.get_shape()[1:3]
else:
image_shape = array_ops.shape(image)[1:3]
grads = gen_image_ops.resize_nearest_neighbor_grad(
grad,
image_shape,
align_corners=op.get_attr("align_corners"),
half_pixel_centers=op.get_attr("half_pixel_centers"))
return [grads, None]
@ops.RegisterGradient("ResizeBilinear")
def _ResizeBilinearGrad(op, grad):
"""The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
grad0 = gen_image_ops.resize_bilinear_grad(
grad,
op.inputs[0],
align_corners=op.get_attr("align_corners"),
half_pixel_centers=op.get_attr("half_pixel_centers"))
return [grad0, None]
@ops.RegisterGradient("ScaleAndTranslate")
def _ScaleAndTranslateGrad(op, grad):
"""The derivatives for ScaleAndTranslate transformation op.
Args:
op: The ScaleAndTranslate op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
grad0 = gen_image_ops.scale_and_translate_grad(
grad,
op.inputs[0],
op.inputs[2],
op.inputs[3],
kernel_type=op.get_attr("kernel_type"),
antialias=op.get_attr("antialias"))
return [grad0, None, None, None]
@ops.RegisterGradient("ResizeBicubic")
def _ResizeBicubicGrad(op, grad):
"""The derivatives for bicubic resizing.
Args:
op: The ResizeBicubic op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
grad0 = gen_image_ops.resize_bicubic_grad(
grad,
op.inputs[0],
align_corners=op.get_attr("align_corners"),
half_pixel_centers=op.get_attr("half_pixel_centers"))
return [grad0, None]
@ops.RegisterGradient("CropAndResize")
def _CropAndResizeGrad(op, grad):
"""The derivatives for crop_and_resize.
We back-propagate to the image only when the input image tensor has floating
point dtype but we always back-propagate to the input boxes tensor.
Args:
op: The CropAndResize op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input image, boxes, as well as the always-None
gradients w.r.t. box_ind and crop_size.
"""
image = op.inputs[0]
if image.get_shape().is_fully_defined():
image_shape = image.get_shape().as_list()
else:
image_shape = array_ops.shape(image)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops.crop_and_resize_grad_image(
grad, op.inputs[1], op.inputs[2], image_shape, T=op.get_attr("T"),
method=op.get_attr("method"))
# pylint: enable=protected-access
else:
grad0 = None
# `grad0` is the gradient to the input image pixels and it
# has been implemented for nearest neighbor and bilinear sampling
# respectively. `grad1` is the gradient to the input crop boxes' coordinates.
# When using nearest neighbor sampling, the gradient to crop boxes'
# coordinates are not well defined. In practice, we still approximate
# grad1 using the gradient derived from bilinear sampling.
grad1 = gen_image_ops.crop_and_resize_grad_boxes(
grad, op.inputs[0], op.inputs[1], op.inputs[2])
return [grad0, grad1, None, None]
def _CustomReciprocal(x):
"""Wrapper function around `math_ops.div_no_nan()` to perform a "safe" reciprocal incase the input is zero. Avoids divide by zero and NaNs.
Input:
x -> input tensor to be reciprocat-ed.
Returns:
x_reciprocal -> reciprocal of x without NaNs.
"""
return math_ops.div_no_nan(1.0, x)
@ops.RegisterGradient("RGBToHSV")
def _RGBToHSVGrad(op, grad):
"""The gradients for `rgb_to_hsv` operation.
This function is a piecewise continuous function as defined here:
https://en.wikipedia.org/wiki/HSL_and_HSV#From_RGB
We perform the multi variate derivative and compute all partial derivates
seperately before adding them in the end. Formulas are given before each
partial derivative calculation.
Args:
op: The `rgb_to_hsv` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `rgb_to_hsv` op.
Returns:
Gradients with respect to the input of `rgb_to_hsv`.
"""
# Input Channels
reds = op.inputs[0][..., 0]
greens = op.inputs[0][..., 1]
blues = op.inputs[0][..., 2]
# Output Channels
saturation = op.outputs[0][..., 1]
value = op.outputs[0][..., 2]
# Mask/Indicator for max and min values of each pixel.
# Arbitrary assignment in case of tie breakers with R>G>B.
# Max values
red_biggest = math_ops.cast((reds >= blues) & \
(reds >= greens), dtypes.float32)
green_biggest = math_ops.cast((greens > reds) & \
(greens >= blues), dtypes.float32)
blue_biggest = math_ops.cast((blues > reds) & \
(blues > greens), dtypes.float32)
# Min values
red_smallest = math_ops.cast((reds < blues) & \
(reds < greens), dtypes.float32)
green_smallest = math_ops.cast((greens <= reds) & \
(greens < blues), dtypes.float32)
blue_smallest = math_ops.cast((blues <= reds) & \
(blues <= greens), dtypes.float32)
# Derivatives of R, G, B wrt Value slice
dv_dr = red_biggest
dv_dg = green_biggest
dv_db = blue_biggest
# Derivatives of R, G, B wrt Saturation slice
# The first term in the addition is the case when the corresponding color
# from (r,g,b) was "MAX"
# -> derivative = MIN/square(MAX), MIN could be one of the other two colors
# The second term is the case when the corresponding color from
# (r,g,b) was "MIN"
# -> derivative = -1/MAX, MAX could be one of the other two colours.
ds_dr = math_ops.cast(reds > 0, dtypes.float32) * \
math_ops.add(red_biggest * \
math_ops.add(green_smallest * greens, blue_smallest * blues) * \
_CustomReciprocal(math_ops.square(reds)),\
red_smallest * -1 * _CustomReciprocal((green_biggest * \
greens) + (blue_biggest * blues)))
ds_dg = math_ops.cast(greens > 0, dtypes.float32) * \
math_ops.add(green_biggest * \
math_ops.add(red_smallest * reds, blue_smallest * blues) * \
_CustomReciprocal(math_ops.square(greens)),\
green_smallest * -1 * _CustomReciprocal((red_biggest * \
reds) + (blue_biggest * blues)))
ds_db = math_ops.cast(blues > 0, dtypes.float32) * \
math_ops.add(blue_biggest * \
math_ops.add(green_smallest * greens, red_smallest * reds) * \
_CustomReciprocal(math_ops.square(blues)),\
blue_smallest * -1 * _CustomReciprocal((green_biggest * \
greens) + (red_biggest * reds)))
# Derivatives of R, G, B wrt Hue slice
# Need to go case by case for each color.
# for red, dh_dr -> dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5
# dh_dr_1 ->
# if red was MAX, then derivative = 60 * -1 * (G-B)/square(MAX-MIN) == 60 *\
# -1 * (greens-blues) * reciprocal(square(saturation)) * \
# reciprical(square(value))
# elif green was MAX, there are two subcases
# ie when red was MIN and when red was NOT MIN
# dh_dr_2 ->
# if red was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) +\
# (B-R)*(-1/square(MAX-MIN) * -1)) == 60 * (blues - greens) *\
# reciprocal(square(reds - greens))
# dh_dr_3 ->
# if red was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(greens-blues)
# elif blue was MAX, there are two subcases
# dh_dr_4 ->
# if red was MIN (similarly use the UV rule) -> 60 * (blues - greens) *\
# reciprocal(square(blues - reds))
# dh_dr_5 ->
# if red was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(blues-greens)
dh_dr_1 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
-1 * \
(greens - blues) * \
_CustomReciprocal(math_ops.square(saturation)) *\
_CustomReciprocal(math_ops.square(value)))
dh_dr_2 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
red_smallest * (blues - greens) * \
_CustomReciprocal(math_ops.square(reds - greens)))
dh_dr_3 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
blue_smallest * -1 * _CustomReciprocal(greens - blues))
dh_dr_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
red_smallest * (blues - greens) * \
_CustomReciprocal(math_ops.square(blues - reds)))
dh_dr_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
green_smallest * _CustomReciprocal(blues - greens))
dh_dr = dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5
# Converting from degrees to [0,1] scale as specified in
# https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv
dh_dr = dh_dr / 360
# for green, dh_dg -> dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5
# dh_dg_1 ->
# if green was MAX, then derivative = 60 * -1 * (B-R)/square(MAX-MIN) == 60 *\
# -1 * (blues - reds) * reciprocal(square(saturation)) * \
# reciprocal(square(value))
# elif red was MAX, there are two subcases ie
# when green was MIN and when green was NOT MIN
# dh_dg_2 ->
# if green was MIN (use UV rule) -> 60 * ((1 * 1/(MAX-MIN)) + \
# (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * \
# ((reciprocal(reds-greens) + (greens-blues) * \
# reciprocal(square(reds-greens))))
# dh_dg_3 ->
# if green was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(reds - blues)
# elif blue was MAX, there are two subcases
# dh_dg_4 ->
# if green was MIN (similarly use the UV rule) -> 60 * -1 * \
# (reciprocal(blues - greens) + (reds-greens)* -1 * \
# reciprocal(square(blues-greens)))
# dh_dr_5 ->
# if green was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(blues - reds)
dh_dg_1 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
-1 * (blues - reds) * \
_CustomReciprocal(math_ops.square(saturation))\
* _CustomReciprocal(math_ops.square(value)))
dh_dg_2 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
green_smallest * (reds - blues) * \
_CustomReciprocal(math_ops.square(reds - greens)))
dh_dg_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
blue_smallest * _CustomReciprocal(reds - blues))
dh_dg_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
green_smallest * (reds - blues) * \
_CustomReciprocal(math_ops.square(blues - greens)))
dh_dg_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
red_smallest * -1 * _CustomReciprocal(blues - reds))
dh_dg = dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5
# Converting from degrees to [0,1] scale as specified in
# https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv
dh_dg = dh_dg / 360
# for blue, dh_db -> dh_db_1 + dh_db_2 + dh_db_3 + dh_db_4 + dh_db_5
# dh_db_1 ->
# if blue was MAX, then derivative = 60 * -1 * (R-G)/square(MAX-MIN) == 60 *\
# -1 * reciprocal(square(saturation)) * reciprocal(square(value))
# elif red was MAX, there are two subcases
# ie when blue was MIN and when blue was NOT MIN
# dh_dg_2 ->
# if blue was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) + \
# (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * (greens - reds) *\
# reciprocal(square(reds - blues))
# dh_dg_3 ->
# if blue was NOT MIN -> 60 * -1/MAX-MIN == 60 * -1 * \
# reciprocal(reds - greens)
# elif green was MAX, there are two subcases
# dh_dg_4 ->
# if blue was MIN (similarly use the UV rule) -> 60 * -1 * \
# (reciprocal(greens - blues) + (blues - reds) * -1 * \
# reciprocal(square(greens - blues)))
# dh_dr_5 ->
# if blue was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(greens - reds)
dh_db_1 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
-1 * \
(reds - greens) * \
_CustomReciprocal(math_ops.square(saturation)) * \
_CustomReciprocal(math_ops.square(value)))
dh_db_2 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest *\
blue_smallest * (greens - reds) * \
_CustomReciprocal(math_ops.square(reds - blues)))
dh_db_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
green_smallest * -1 * _CustomReciprocal(reds - greens))
dh_db_4 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
blue_smallest * (greens - reds) * \
_CustomReciprocal(math_ops.square(greens - blues)))
dh_db_5 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
red_smallest * _CustomReciprocal(greens - reds))
dh_db = dh_db_1 + dh_db_2 + dh_db_3 + dh_db_4 + dh_db_5
# Converting from degrees to [0,1] scale as specified in
# https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv
dh_db = dh_db / 360
# Gradients wrt to inputs
dv_drgb = array_ops.stack(
[grad[..., 2] * dv_dr, grad[..., 2] * dv_dg, grad[..., 2] * dv_db],
axis=-1)
ds_drgb = array_ops.stack(
[grad[..., 1] * ds_dr, grad[..., 1] * ds_dg, grad[..., 1] * ds_db],
axis=-1)
dh_drgb = array_ops.stack(
[grad[..., 0] * dh_dr, grad[..., 0] * dh_dg, grad[..., 0] * dh_db],
axis=-1)
gradient_input = math_ops.add(math_ops.add(dv_drgb, ds_drgb), dh_drgb)
return gradient_input
| apache-2.0 |
Bysmyyr/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py | 39 | 13796 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.controllers import repaint_overlay
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
def write_test_result(filesystem, port, results_directory, test_name, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
root_output_dir = results_directory
writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
if driver_output.error:
writer.write_stderr(driver_output.error)
for failure in failures:
# FIXME: Instead of this long 'if' block, each failure class might
# have a responsibility for writing a test result.
if isinstance(failure, (test_failures.FailureMissingResult,
test_failures.FailureTextMismatch,
test_failures.FailureTestHarnessAssertion)):
writer.write_text_files(driver_output.text, expected_driver_output.text)
writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
elif isinstance(failure, test_failures.FailureMissingImage):
writer.write_image_files(driver_output.image, expected_image=None)
elif isinstance(failure, test_failures.FailureMissingImageHash):
writer.write_image_files(driver_output.image, expected_driver_output.image)
elif isinstance(failure, test_failures.FailureImageHashMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
writer.write_image_diff_files(driver_output.image_diff)
elif isinstance(failure, (test_failures.FailureAudioMismatch,
test_failures.FailureMissingAudio)):
writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
elif isinstance(failure, test_failures.FailureCrash):
crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
writer.write_crash_log(crashed_driver_output.crash_log)
elif isinstance(failure, test_failures.FailureLeak):
writer.write_leak_log(driver_output.leak_log)
elif isinstance(failure, test_failures.FailureReftestMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
# FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
# FIXME: We should always have 2 images here.
if driver_output.image and expected_driver_output.image:
diff_image, err_str = port.diff_image(expected_driver_output.image, driver_output.image)
if diff_image:
writer.write_image_diff_files(diff_image)
else:
_log.warn('ref test mismatch did not produce an image diff.')
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
else:
assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
if expected_driver_output is not None:
writer.create_repaint_overlay_result(driver_output.text, expected_driver_output.text)
class TestResultWriter(object):
"""A class which handles all writing operations to the result directory."""
# Filename pieces when writing failures to the test results directory.
FILENAME_SUFFIX_ACTUAL = "-actual"
FILENAME_SUFFIX_EXPECTED = "-expected"
FILENAME_SUFFIX_DIFF = "-diff"
FILENAME_SUFFIX_STDERR = "-stderr"
FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
FILENAME_SUFFIX_SAMPLE = "-sample"
FILENAME_SUFFIX_LEAK_LOG = "-leak-log"
FILENAME_SUFFIX_WDIFF = "-wdiff.html"
FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
FILENAME_SUFFIX_OVERLAY = "-overlay.html"
def __init__(self, filesystem, port, root_output_dir, test_name):
self._filesystem = filesystem
self._port = port
self._root_output_dir = root_output_dir
self._test_name = test_name
def _make_output_directory(self):
"""Creates the output directory (if needed) for a given test filename."""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
fs.maybe_make_directory(fs.dirname(output_filename))
def output_filename(self, modifier):
"""Returns a filename inside the output dir that contains modifier.
For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
Args:
modifier: a string to replace the extension of filename with
Return:
The absolute path to the output filename
"""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
return fs.splitext(output_filename)[0] + modifier
def _write_file(self, path, contents):
if contents is not None:
self._make_output_directory()
self._filesystem.write_binary_file(path, contents)
def _output_testname(self, modifier):
fs = self._filesystem
return fs.splitext(fs.basename(self._test_name))[0] + modifier
def write_output_files(self, file_type, output, expected):
"""Writes the test output, the expected output in the results directory.
The full output filename of the actual, for example, will be
<filename>-actual<file_type>
For instance,
my_test-actual.txt
Args:
file_type: A string describing the test output file type, e.g. ".txt"
output: A string containing the test output
expected: A string containing the expected test output
"""
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
self._write_file(actual_filename, output)
self._write_file(expected_filename, expected)
def write_stderr(self, error):
filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
self._write_file(filename, error)
def write_crash_log(self, crash_log):
filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
self._write_file(filename, crash_log.encode('utf8', 'replace'))
def write_leak_log(self, leak_log):
filename = self.output_filename(self.FILENAME_SUFFIX_LEAK_LOG + ".txt")
self._write_file(filename, leak_log)
def copy_sample_file(self, sample_file):
filename = self.output_filename(self.FILENAME_SUFFIX_SAMPLE + ".txt")
self._filesystem.copyfile(sample_file, filename)
def write_text_files(self, actual_text, expected_text):
self.write_output_files(".txt", actual_text, expected_text)
def create_text_diff_and_write_result(self, actual_text, expected_text):
# FIXME: This function is actually doing the diffs as well as writing results.
# It might be better to extract code which does 'diff' and make it a separate function.
if not actual_text or not expected_text:
return
file_type = '.txt'
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
# We treat diff output as binary. Diff output may contain multiple files
# in conflicting encodings.
diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
self._write_file(diff_filename, diff)
# Shell out to wdiff to get colored inline diffs.
if self._port.wdiff_available():
wdiff = self._port.wdiff_text(expected_filename, actual_filename)
wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
self._write_file(wdiff_filename, wdiff)
# Use WebKit's PrettyPatch.rb to get an HTML diff.
if self._port.pretty_patch_available():
pretty_patch = self._port.pretty_patch_text(diff_filename)
pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
self._write_file(pretty_patch_filename, pretty_patch)
def create_repaint_overlay_result(self, actual_text, expected_text):
html = repaint_overlay.generate_repaint_overlay_html(self._test_name, actual_text, expected_text)
if html:
overlay_filename = self.output_filename(self.FILENAME_SUFFIX_OVERLAY)
self._write_file(overlay_filename, html)
def write_audio_files(self, actual_audio, expected_audio):
self.write_output_files('.wav', actual_audio, expected_audio)
def write_image_files(self, actual_image, expected_image):
self.write_output_files('.png', actual_image, expected_image)
def write_image_diff_files(self, image_diff):
diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
self._write_file(diff_filename, image_diff)
diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
# FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
# FIXME: old-run-webkit-tests include a link to the test file.
html = """<!DOCTYPE HTML>
<html>
<head>
<title>%(title)s</title>
<style>.label{font-weight:bold}</style>
</head>
<body>
Difference between images: <a href="%(diff_filename)s">diff</a><br>
<div class=imageText></div>
<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
<script>
(function() {
var preloadedImageCount = 0;
function preloadComplete() {
++preloadedImageCount;
if (preloadedImageCount < 2)
return;
toggleImages();
setInterval(toggleImages, 2000)
}
function preloadImage(url) {
image = new Image();
image.addEventListener('load', preloadComplete);
image.src = url;
return image;
}
function toggleImages() {
if (text.textContent == 'Expected Image') {
text.textContent = 'Actual Image';
container.replaceChild(actualImage, container.firstChild);
} else {
text.textContent = 'Expected Image';
container.replaceChild(expectedImage, container.firstChild);
}
}
var text = document.querySelector('.imageText');
var container = document.querySelector('.imageContainer');
var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
})();
</script>
</body>
</html>
""" % {
'title': self._test_name,
'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
'prefix': self._output_testname(''),
}
self._write_file(diffs_html_filename, html)
def write_reftest(self, src_filepath):
fs = self._filesystem
dst_dir = fs.dirname(fs.join(self._root_output_dir, self._test_name))
dst_filepath = fs.join(dst_dir, fs.basename(src_filepath))
self._write_file(dst_filepath, fs.read_binary_file(src_filepath))
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/numpy/testing/decorators.py | 66 | 8409 | """
Decorators for labeling and modifying behavior of test objects.
Decorators that merely return a modified version of the original
function object are straightforward. Decorators that return a new
function object need to use
::
nose.tools.make_decorator(original_function)(decorator)
in returning the decorator, in order to preserve meta-data such as
function name, setup and teardown functions and so on - see
``nose.tools`` for more information.
"""
from __future__ import division, absolute_import, print_function
import warnings
import collections
def slow(t):
"""
Label a test as 'slow'.
The exact definition of a slow test is obviously both subjective and
hardware-dependent, but in general any individual test that requires more
than a second or two should be labeled as slow (the whole suite consits of
thousands of tests, so even a second is significant).
Parameters
----------
t : callable
The test to label as slow.
Returns
-------
t : callable
The decorated test `t`.
Examples
--------
The `numpy.testing` module includes ``import decorators as dec``.
A test can be decorated as slow like this::
from numpy.testing import *
@dec.slow
def test_big(self):
print 'Big, slow test'
"""
t.slow = True
return t
def setastest(tf=True):
"""
Signals to nose that this function is or is not a test.
Parameters
----------
tf : bool
If True, specifies that the decorated callable is a test.
If False, specifies that the decorated callable is not a test.
Default is True.
Notes
-----
This decorator can't use the nose namespace, because it can be
called from a non-test module. See also ``istest`` and ``nottest`` in
``nose.tools``.
Examples
--------
`setastest` can be used in the following way::
from numpy.testing.decorators import setastest
@setastest(False)
def func_with_test_in_name(arg1, arg2):
pass
"""
def set_test(t):
t.__test__ = tf
return t
return set_test
def skipif(skip_condition, msg=None):
"""
Make function raise SkipTest exception if a given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
skip_condition : bool or callable
Flag to determine whether to skip the decorated test.
msg : str, optional
Message to give on raising a SkipTest exception. Default is None.
Returns
-------
decorator : function
Decorator which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
def skip_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
# Allow for both boolean or callable skip conditions.
if isinstance(skip_condition, collections.Callable):
skip_val = lambda: skip_condition()
else:
skip_val = lambda: skip_condition
def get_msg(func,msg=None):
"""Skip message with information about function being skipped."""
if msg is None:
out = 'Test skipped due to test condition'
else:
out = msg
return "Skipping test: %s: %s" % (func.__name__, out)
# We need to define *two* skippers because Python doesn't allow both
# return with value and yield inside the same function.
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
raise nose.SkipTest(get_msg(f, msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
raise nose.SkipTest(get_msg(f, msg))
else:
for x in f(*args, **kwargs):
yield x
# Choose the right skipper to use when building the actual decorator.
if nose.util.isgenerator(f):
skipper = skipper_gen
else:
skipper = skipper_func
return nose.tools.make_decorator(f)(skipper)
return skip_decorator
def knownfailureif(fail_condition, msg=None):
"""
Make function raise KnownFailureTest exception if given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
imports, to delay the cost until the test suite is actually executed.
Parameters
----------
fail_condition : bool or callable
Flag to determine whether to mark the decorated test as a known
failure (if True) or not (if False).
msg : str, optional
Message to give on raising a KnownFailureTest exception.
Default is None.
Returns
-------
decorator : function
Decorator, which, when applied to a function, causes SkipTest
to be raised when `skip_condition` is True, and the function
to be called normally otherwise.
Notes
-----
The decorator itself is decorated with the ``nose.tools.make_decorator``
function in order to transmit function name, and various other metadata.
"""
if msg is None:
msg = 'Test skipped due to known failure'
# Allow for both boolean or callable known failure conditions.
if isinstance(fail_condition, collections.Callable):
fail_val = lambda: fail_condition()
else:
fail_val = lambda: fail_condition
def knownfail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
from .noseclasses import KnownFailureTest
def knownfailer(*args, **kwargs):
if fail_val():
raise KnownFailureTest(msg)
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
def deprecated(conditional=True):
"""
Filter deprecation warnings while running the test suite.
This decorator can be used to filter DeprecationWarning's, to avoid
printing them during the test suite run, while checking that the test
actually raises a DeprecationWarning.
Parameters
----------
conditional : bool or callable, optional
Flag to determine whether to mark test as deprecated or not. If the
condition is a callable, it is used at runtime to dynamically make the
decision. Default is True.
Returns
-------
decorator : function
The `deprecated` decorator itself.
Notes
-----
.. versionadded:: 1.4.0
"""
def deprecate_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def _deprecated_imp(*args, **kwargs):
# Poor man's replacement for the with statement
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
f(*args, **kwargs)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% f.__name__)
if not l[0].category is DeprecationWarning:
raise AssertionError("First warning for %s is not a "
"DeprecationWarning( is %s)" % (f.__name__, l[0]))
if isinstance(conditional, collections.Callable):
cond = conditional()
else:
cond = conditional
if cond:
return nose.tools.make_decorator(f)(_deprecated_imp)
else:
return f
return deprecate_decorator
| mit |
alirizakeles/zato | code/zato-zmq/src/zato/zmq_/mdp/worker.py | 1 | 9531 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
import time
from datetime import datetime, timedelta
# ZeroMQ
import zmq.green as zmq
# Zato
from zato.zmq_.mdp import BaseZMQConnection, const, EventWorkerDisconnect, EventWorkerHeartbeat, EventReady, EventWorkerReply
# ################################################################################################################################
logger = logging.getLogger(__name__)
# ################################################################################################################################
class Worker(BaseZMQConnection):
""" Standalone implementation of a worker for ZeroMQ Majordomo Protocol 0.1 http://rfc.zeromq.org/spec:7
"""
def __init__(self, service_name, broker_address='tcp://localhost:47047', linger=0, poll_interval=100, log_details=False,
heartbeat=3, heartbeat_mult=2, reconnect_sleep=2):
self.service_name = service_name
super(Worker, self).__init__(broker_address, linger, poll_interval, log_details)
# How often, in seconds, to send a heartbeat to the broker or expect one from the broker
self.heartbeat = heartbeat
# If self.heartbeat * self.heartbeat_mult is exceeded, we assume the broker is down
self.heartbeat_mult = heartbeat_mult
# How long, in seconds, to wait before attempting to reconnect to the broker
self.reconnect_sleep = reconnect_sleep
# When did we last hear from the broker
self.broker_last_heartbeat = None
# When did we last send our own heartbeat to the broker
self.worker_last_heartbeat = None
# Timestamp of when we started to run
self.last_connected = datetime.utcnow()
self.has_debug = logger.isEnabledFor(logging.DEBUG)
# Maps event IDs to methods that handle a given one
self.handle_event_map = {
const.v01.request_to_worker: self.on_event_request_to_worker,
const.v01.heartbeat: self.on_event_heartbeat,
const.v01.disconnect: self.on_event_disconnect,
}
# ################################################################################################################################
def connect(self):
logger.info('Connecting to broker %s', self.broker_address)
# Open ZeroMQ sockets first
# From worker to broker
self.client_socket.connect(self.broker_address)
# From broker to worker
self.worker_socket = self.ctx.socket(zmq.DEALER)
self.worker_socket.linger = self.linger
self.worker_poller = zmq.Poller()
self.worker_poller.register(self.worker_socket, zmq.POLLIN)
self.worker_socket.connect(self.broker_address)
# Ok, we are ready
self.notify_ready()
# We can assume that the broker received our message
self.last_connected = datetime.utcnow()
# ################################################################################################################################
def stop(self):
self.worker_poller.unregister(self.worker_socket)
self.worker_socket.close()
self.stop_client_socket()
self.connect_client_socket()
logger.info('Stopped worker for %s', self.broker_address)
# ################################################################################################################################
def needs_reconnect(self):
base_timestamp = self.broker_last_heartbeat if self.broker_last_heartbeat else self.last_connected
return datetime.utcnow() >= base_timestamp + timedelta(seconds=self.heartbeat * self.heartbeat_mult)
# ################################################################################################################################
def reconnect(self):
last_hb = '{} (UTC)'.format(self.broker_last_heartbeat.isoformat()) if self.broker_last_heartbeat else 'never'
logger.info('Sleeping for %ss before reconnecting to broker %s, last HB from broker: %s',
self.reconnect_sleep, self.broker_address, last_hb)
time.sleep(self.reconnect_sleep)
logger.info('Reconnecting to broker %s', self.broker_address)
self.stop()
self.connect()
# Let's give the other side a moment to reply to our ready event
time.sleep(self.reconnect_sleep)
# ################################################################################################################################
def needs_hb_to_broker(self):
return datetime.utcnow() >= self.worker_last_heartbeat + timedelta(seconds=self.heartbeat)
# ################################################################################################################################
def serve_forever(self):
# To speed up look-ups
log_details = self.log_details
# Main loop
while self.keep_running:
try:
items = self.worker_poller.poll(self.poll_interval)
except KeyboardInterrupt:
self.notify_disconnect()
break
if items:
msg = self.worker_socket.recv_multipart()
if log_details:
logger.info('Received msg at %s %s', self.broker_address, msg)
self.handle(msg)
else:
if log_details:
logger.info('No items for worker at %s', self.broker_address)
if self.needs_hb_to_broker():
self.notify_heartbeat()
if self.needs_reconnect():
self.reconnect()
# ################################################################################################################################
def on_event_request_to_worker(self, msg):
logger.info('In _handle %s', msg)
return datetime.utcnow().isoformat()
# ################################################################################################################################
def on_event_heartbeat(self, *ignored):
""" A no-op since self.handle already handles heartbeats from the broker.
"""
# ################################################################################################################################
def on_event_disconnect(self, *ignored):
""" Our broker tells us to disconnect - according to the spec we now must re-open the connection.
"""
self.reconnect()
# ################################################################################################################################
def handle(self, msg):
logger.info('Handling %s', msg)
# Since we received this message, it means the broker is up so the message,
# no matter what event it is, allows us to update the timestamp of the last HB from broker
self.broker_last_heartbeat = datetime.utcnow()
sender_id = None
body = None
command = msg[2]
if command == const.v01.request_to_worker:
sender_id = msg[3]
body = msg[4]
# Hand over the message to an actual implementation and reply if told to
response = self.handle_event_map[command](body)
if response:
self.send(EventWorkerReply(response, sender_id).serialize())
# Message handled, we are ready to handle a new one, assuming this one was a request
if command == const.v01.request_to_worker:
self.notify_ready()
# ################################################################################################################################
def send(self, data, needs_hb=True):
""" Sends data to the broker and updates an internal timer of when the last time we send a heartbeat to the broker
since sending anything in that direction should be construed by the broker as a heartbeat itself.
"""
# Send data first
self.worker_socket.send_multipart(data)
# Update the timer
if needs_hb:
self.worker_last_heartbeat = datetime.utcnow()
# ################################################################################################################################
def notify_ready(self):
""" Notify the broker that we are ready to handle a new message.
"""
self.send(EventReady(self.service_name).serialize())
# ################################################################################################################################
def notify_heartbeat(self):
""" Notify the broker that we are still around.
"""
self.send(EventWorkerHeartbeat().serialize())
# ################################################################################################################################
def notify_disconnect(self):
""" Notify the broker that we are to disconnect from it.
"""
self.send(EventWorkerDisconnect().serialize(), needs_hb=False)
# ################################################################################################################################
if __name__ == '__main__':
w = Worker(b'My service', 'tcp://localhost:47047')
w.connect()
w.serve_forever()
| gpl-3.0 |
mahinthjoe/bedrock | py3env/lib/python3.4/site-packages/pip/index.py | 237 | 47847 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
Inf, cached_property, normalize_name, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
SECURE_ORIGINS = [
# protocol, hostname, port
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls,
allow_external=(), allow_unverified=(),
allow_all_external=False, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip8Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location == INSTALLED_VERSION:
pri = 1
elif candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
# Check to see if the protocol matches
if origin[0] != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, project_url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
project_url_name = urllib_parse.quote(project_name.lower())
if self.index_urls:
# Check that we have the url_name correctly spelled:
# Only check main index if index URL is given
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
page = self._get_page(main_index_url)
if page is None and PyPI.netloc not in str(main_index_url):
warnings.warn(
"Failed to find %r at %s. It is suggested to upgrade "
"your index to support normalized names as the name in "
"/simple/{name}." % (project_name, main_index_url),
RemovedInPip8Warning,
)
project_url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
project_url_name,
) or project_url_name
if project_url_name is not None:
return [mkurl_pypi_url(url) for url in self.index_urls]
return []
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url, trusted=True) for url in index_url_loc),
(Link(url, trusted=True) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = pkg_resources.safe_name(project_name).lower()
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name.lower(), canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f', trusted=True) for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find an InstallationCandidate for req
Expects req, an InstallRequirement and upgrade, a boolean
Returns an InstallationCandidate or None
May raise DistributionNotFound or BestVersionAlreadyInstalled
"""
all_versions = self._find_all_versions(req.name)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(x.version) for x in all_versions],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_versions = [
# Again, converting to str to deal with debundling.
x for x in all_versions if str(x.version) in _versions
]
if req.satisfied_by is not None:
# Finally add our existing versions to the front of our versions.
applicable_versions.insert(
0,
InstallationCandidate(
req.name,
req.satisfied_by.version,
INSTALLED_VERSION,
)
)
existing_applicable = True
else:
existing_applicable = False
applicable_versions = self._sort_versions(applicable_versions)
if not upgrade and existing_applicable:
if applicable_versions[0].location is INSTALLED_VERSION:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
req.satisfied_by.version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
req.satisfied_by.version,
applicable_versions[0][2],
)
return None
if not applicable_versions:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(i.version) for i in all_versions),
key=parse_version,
)
)
)
if self.need_warn_external:
logger.warning(
"Some externally hosted files were ignored as access to "
"them may be unreliable (use --allow-external %s to "
"allow).",
req.name,
)
if self.need_warn_unverified:
logger.warning(
"Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow).",
req.name,
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
if applicable_versions[0].location is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
req.satisfied_by.version,
', '.join(str(i.version) for i in applicable_versions[1:]) or
"none",
)
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.debug(
'Using version %s (newest of versions: %s)',
applicable_versions[0].version,
', '.join(str(i.version) for i in applicable_versions)
)
selected_version = applicable_versions[0].location
if (selected_version.verifiable is not None and not
selected_version.verifiable):
logger.warning(
"%s is potentially insecure and unverifiable.", req.name,
)
return selected_version
def _find_url_name(self, index_url, url_name):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
normalized = normalize_name(project_name)
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
for link in page.rel_links():
if (normalized not in self.allow_external and not
self.allow_all_external):
self.need_warn_external = True
logger.debug(
"Not searching %s for files because external "
"urls are disallowed.",
link,
)
continue
if (link.trusted is not None and not
link.trusted and
normalized not in self.allow_unverified):
logger.debug(
"Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files.",
link,
)
self.need_warn_unverified = True
continue
all_locations.append(link)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if (pkg_resources.safe_name(wheel.name).lower() !=
search.canonical):
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
self._log_skipped_link(
link,
"it is a pypi-hosted binary "
"Wheel on an unsupported platform",
)
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if (link.internal is not None and not
link.internal and not
normalize_name(search.supplied).lower()
in self.allow_external and not
self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
self._log_skipped_link(link, 'it is externally hosted')
self.need_warn_external = True
return
if (link.verifiable is not None and not
link.verifiable and not
(normalize_name(search.supplied).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
self._log_skipped_link(
link, 'it is an insecure and unverifiable file')
self.need_warn_unverified = True
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None, trusted=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(
resp.content, resp.url, resp.headers,
trusted=link.trusted,
)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(link, exc, url, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, level=2, meth=logger.info)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, level=1, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def api_version(self):
metas = [
x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"
]
if metas:
try:
return int(metas[0].get("value", None))
except (TypeError, ValueError):
pass
return None
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(
anchor.get("rel") and
"internal" in anchor.get("rel").split()
)
yield Link(url, self, internal=internal)
def rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None):
# url can be a UNC windows share
if url != Inf and url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = pkg_resources.safe_name(name).lower()
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', DeprecationWarning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| mpl-2.0 |
halwai/cvxpy | cvxpy/problems/iterative.py | 12 | 4961 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
# Methods for SCS iterative solver.
from cvxpy.lin_ops.tree_mat import mul, tmul, sum_dicts
import numpy as np
import scipy.sparse.linalg as LA
def get_mul_funcs(sym_data):
def accAmul(x, y, is_abs=False):
# y += A*x
rows = y.shape[0]
var_dict = vec_to_dict(x, sym_data.var_offsets,
sym_data.var_sizes)
y += constr_mul(sym_data.constraints, var_dict, rows, is_abs)
def accATmul(x, y, is_abs=False):
# y += A.T*x
terms = constr_unpack(sym_data.constraints, x)
val_dict = constr_tmul(sym_data.constraints, terms, is_abs)
y += dict_to_vec(val_dict, sym_data.var_offsets,
sym_data.var_sizes, sym_data.x_length)
return (accAmul, accATmul)
def constr_unpack(constraints, vector):
"""Unpacks a vector into a list of values for constraints.
"""
values = []
offset = 0
for constr in constraints:
rows, cols = constr.size
val = np.zeros((rows, cols))
for col in range(cols):
val[:, col] = vector[offset:offset+rows]
offset += rows
values.append(val)
return values
def vec_to_dict(vector, var_offsets, var_sizes):
"""Converts a vector to a map of variable id to value.
Parameters
----------
vector : NumPy matrix
The vector of values.
var_offsets : dict
A map of variable id to offset in the vector.
var_sizes : dict
A map of variable id to variable size.
Returns
-------
dict
A map of variable id to variable value.
"""
val_dict = {}
for id_, offset in var_offsets.items():
size = var_sizes[id_]
value = np.zeros(size)
offset = var_offsets[id_]
for col in range(size[1]):
value[:, col] = vector[offset:size[0]+offset]
offset += size[0]
val_dict[id_] = value
return val_dict
def dict_to_vec(val_dict, var_offsets, var_sizes, vec_len):
"""Converts a map of variable id to value to a vector.
Parameters
----------
val_dict : dict
A map of variable id to value.
var_offsets : dict
A map of variable id to offset in the vector.
var_sizes : dict
A map of variable id to variable size.
vector : NumPy matrix
The vector to store the values in.
"""
# TODO take in vector.
vector = np.zeros(vec_len)
for id_, value in val_dict.items():
size = var_sizes[id_]
offset = var_offsets[id_]
for col in range(size[1]):
# Handle scalars separately.
if np.isscalar(value):
vector[offset:size[0]+offset] = value
else:
vector[offset:size[0]+offset] = np.squeeze(value[:, col])
offset += size[0]
return vector
def constr_mul(constraints, var_dict, vec_size, is_abs):
"""Multiplies a vector by the matrix implied by the constraints.
Parameters
----------
constraints : list
A list of linear constraints.
var_dict : dict
A dictionary mapping variable id to value.
vec_size : int
The length of the product vector.
is_abs : bool
Multiply by the absolute value of the matrix?
"""
product = np.zeros(vec_size)
offset = 0
for constr in constraints:
result = mul(constr.expr, var_dict, is_abs)
rows, cols = constr.size
for col in range(cols):
# Handle scalars separately.
if np.isscalar(result):
product[offset:offset+rows] = result
else:
product[offset:offset+rows] = np.squeeze(result[:, col])
offset += rows
return product
def constr_tmul(constraints, values, is_abs):
"""Multiplies a vector by the transpose of the constraints matrix.
Parameters
----------
constraints : list
A list of linear constraints.
values : list
A list of NumPy matrices.
is_abs : bool
Multiply by the absolute value of the matrix?
Returns
-------
dict
A mapping of variable id to value.
"""
products = []
for constr, val in zip(constraints, values):
products.append(tmul(constr.expr, val, is_abs))
return sum_dicts(products)
| gpl-3.0 |
jeffheaton/aifh | vol3/vol3-python-examples/examples/example_timeseries.py | 2 | 1695 | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import os
import sys
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
import numpy as np
from window import *
# Create a simple 3-column dataset. This will hold the values:
# [1, 10, 100]
# [2, 20, 200]
# ...
# [10, 100, 1000]
raw_data = []
for i in range(1,11):
raw_data.append([i,i*10,i*100])
raw_data = np.array(raw_data)
result_x, result_y = encode_timeseries_window(raw_data, 3, 1, [True, True, True], [False, False, True])
result_x = np.array(result_x)
result_y = np.array(result_y)
for x,y in zip(result_x, result_y):
print("{} --> {}".format(x,y)) | apache-2.0 |
pcingola/server | ga4gh/cli.py | 1 | 32399 | """
Command line interface programs for the GA4GH reference implementation.
TODO: document how to use these for development and simple deployment.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import unittest
import unittest.loader
import unittest.suite
import requests
import ga4gh.client as client
import ga4gh.converters as converters
import ga4gh.frontend as frontend
import ga4gh.configtest as configtest
import ga4gh.exceptions as exceptions
# the maximum value of a long type in avro = 2**63 - 1
# (64 bit signed integer)
# http://avro.apache.org/docs/1.7.7/spec.html#schema_primitive
# AVRO_LONG_MAX = (1 << 63) - 1
# TODO in the meantime, this is the max value pysam can handle
# This should be removed once pysam input sanitisation has been
# implemented.
AVRO_LONG_MAX = 2**31 - 1
##############################################################################
# Server
##############################################################################
def addServerOptions(parser):
parser.add_argument(
"--port", "-P", default=8000, type=int,
help="The port to listen on")
parser.add_argument(
"--host", "-H", default="127.0.0.1",
help="The server host string; use 0.0.0.0 to allow all connections.")
parser.add_argument(
"--config", "-c", default='DevelopmentConfig', type=str,
help="The configuration to use")
parser.add_argument(
"--config-file", "-f", type=str, default=None,
help="The configuration file to use")
parser.add_argument(
"--tls", "-t", action="store_true", default=False,
help="Start in TLS (https) mode.")
parser.add_argument(
"--dont-use-reloader", default=False, action="store_true",
help="Don't use the flask reloader")
addDisableUrllibWarningsArgument(parser)
def server_main(parser=None):
if parser is None:
parser = argparse.ArgumentParser(
description="GA4GH reference server")
addServerOptions(parser)
args = parser.parse_args()
if args.disable_urllib_warnings:
requests.packages.urllib3.disable_warnings()
frontend.configure(
args.config_file, args.config, args.port)
sslContext = None
if args.tls or ("OIDC_PROVIDER" in frontend.app.config):
sslContext = "adhoc"
frontend.app.run(
host=args.host, port=args.port,
use_reloader=not args.dont_use_reloader, ssl_context=sslContext)
##############################################################################
# Client
##############################################################################
def verbosityToLogLevel(verbosity):
"""
Returns the specfied verbosity level interpreted as a logging level.
"""
ret = 0
if verbosity == 1:
ret = logging.INFO
elif verbosity >= 2:
ret = logging.DEBUG
return ret
class AbstractQueryRunner(object):
"""
Abstract base class for runner classes
"""
def __init__(self, args):
self._key = args.key
self._httpClient = client.HttpClient(
args.baseUrl, verbosityToLogLevel(args.verbose), self._key)
class FormattedOutputRunner(AbstractQueryRunner):
"""
Superclass of runners that support output in common formats.
"""
def __init__(self, args):
super(FormattedOutputRunner, self).__init__(args)
self._output = self._textOutput
if args.outputFormat == "json":
self._output = self._jsonOutput
def _jsonOutput(self, gaObjects):
"""
Outputs the specified protocol objects as one JSON string per
line.
"""
for gaObject in gaObjects:
print(gaObject.toJsonString())
def _textOutput(self, gaObjects):
"""
Outputs a text summary of the specified protocol objects, one
per line.
"""
for gaObject in gaObjects:
print(gaObject.id, gaObject.name, sep="\t")
class AbstractGetRunner(FormattedOutputRunner):
"""
Abstract base class for get runner classes
"""
def __init__(self, args):
super(AbstractGetRunner, self).__init__(args)
self._id = args.id
self._httpClient = client.HttpClient(
args.baseUrl, verbosityToLogLevel(args.verbose), self._key)
def run(self):
response = self._method(self._id)
self._output([response])
class AbstractSearchRunner(FormattedOutputRunner):
"""
Abstract base class for search runner classes
"""
def __init__(self, args):
super(AbstractSearchRunner, self).__init__(args)
self._pageSize = args.pageSize
self._httpClient.setPageSize(self._pageSize)
def getAllDatasets(self):
"""
Returns all datasets on the server.
"""
return self._httpClient.searchDatasets()
def getAllVariantSets(self):
"""
Returns all variant sets on the server.
"""
for dataset in self.getAllDatasets():
iterator = self._httpClient.searchVariantSets(datasetId=dataset.id)
for variantSet in iterator:
yield variantSet
def getAllReadGroupSets(self):
"""
Returns all readgroup sets on the server.
"""
for dataset in self.getAllDatasets():
iterator = self._httpClient.searchReadGroupSets(
datasetId=dataset.id)
for readGroupSet in iterator:
yield readGroupSet
def getAllReferenceSets(self):
"""
Returns all reference sets on the server.
"""
return self._httpClient.searchReferenceSets()
# Runners for the various search methods
class SearchDatasetsRunner(AbstractSearchRunner):
"""
Runner class for the datasets/search method
"""
def __init__(self, args):
super(SearchDatasetsRunner, self).__init__(args)
def run(self):
iterator = self._httpClient.searchDatasets()
self._output(iterator)
class SearchReferenceSetsRunner(AbstractSearchRunner):
"""
Runner class for the referencesets/search method.
"""
def __init__(self, args):
super(SearchReferenceSetsRunner, self).__init__(args)
self._accession = args.accession
self._md5checksum = args.md5checksum
def run(self):
iterator = self._httpClient.searchReferenceSets(
accession=self._accession, md5checksum=self._md5checksum)
self._output(iterator)
class SearchReferencesRunner(AbstractSearchRunner):
"""
Runner class for the references/search method
"""
def __init__(self, args):
super(SearchReferencesRunner, self).__init__(args)
self._referenceSetId = args.referenceSetId
self._accession = args.accession
self._md5checksum = args.md5checksum
def _run(self, referenceSetId):
iterator = self._httpClient.searchReferences(
accession=self._accession, md5checksum=self._md5checksum,
referenceSetId=referenceSetId)
self._output(iterator)
def run(self):
if self._referenceSetId is None:
for referenceSet in self.getAllReferenceSets():
self._run(referenceSet.id)
else:
self._run(self._referenceSetId)
class SearchVariantSetsRunner(AbstractSearchRunner):
"""
Runner class for the variantsets/search method.
"""
def __init__(self, args):
super(SearchVariantSetsRunner, self).__init__(args)
self._datasetId = args.datasetId
def _run(self, datasetId):
iterator = self._httpClient.searchVariantSets(datasetId=datasetId)
self._output(iterator)
def run(self):
if self._datasetId is None:
for dataset in self.getAllDatasets():
self._run(dataset.id)
else:
self._run(self._datasetId)
class SearchReadGroupSetsRunner(AbstractSearchRunner):
"""
Runner class for the readgroupsets/search method
"""
def __init__(self, args):
super(SearchReadGroupSetsRunner, self).__init__(args)
self._datasetId = args.datasetId
self._name = args.name
def _run(self, datasetId):
iterator = self._httpClient.searchReadGroupSets(
datasetId=datasetId, name=self._name)
self._output(iterator)
def run(self):
if self._datasetId is None:
for dataset in self.getAllDatasets():
self._run(dataset.id)
else:
self._run(self._datasetId)
class SearchCallSetsRunner(AbstractSearchRunner):
"""
Runner class for the callsets/search method
"""
def __init__(self, args):
super(SearchCallSetsRunner, self).__init__(args)
self._variantSetId = args.variantSetId
self._name = args.name
def _run(self, variantSetId):
iterator = self._httpClient.searchCallSets(
variantSetId=variantSetId, name=self._name)
self._output(iterator)
def run(self):
if self._variantSetId is None:
for variantSet in self.getAllVariantSets():
self._run(variantSet.id)
else:
self._run(self._variantSetId)
class VariantFormatterMixin(object):
"""
Simple mixin to format variant objects.
"""
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for variant in gaObjects:
print(
variant.id, variant.variantSetId, variant.names,
variant.referenceName, variant.start, variant.end,
variant.referenceBases, variant.alternateBases,
sep="\t", end="\t")
for key, value in variant.info.items():
print(key, value, sep="=", end=";")
print("\t", end="")
for c in variant.calls:
print(
c.callSetId, c.genotype, c.genotypeLikelihood, c.info,
c.phaseset, sep=":", end="\t")
print()
class SearchVariantsRunner(VariantFormatterMixin, AbstractSearchRunner):
"""
Runner class for the variants/search method.
"""
def __init__(self, args):
super(SearchVariantsRunner, self).__init__(args)
self._referenceName = args.referenceName
self._variantSetId = args.variantSetId
self._start = args.start
self._end = args.end
if args.callSetIds == []:
self._callSetIds = []
elif args.callSetIds == '*':
self._callSetIds = None
else:
self._callSetIds = args.callSetIds.split(",")
def _run(self, variantSetId):
iterator = self._httpClient.searchVariants(
start=self._start, end=self._end,
referenceName=self._referenceName,
variantSetId=variantSetId, callSetIds=self._callSetIds)
self._output(iterator)
def run(self):
if self._variantSetId is None:
for variantSet in self.getAllVariantSets():
self._run(variantSet.id)
else:
self._run(self._variantSetId)
class SearchReadsRunner(AbstractSearchRunner):
"""
Runner class for the reads/search method
"""
def __init__(self, args):
super(SearchReadsRunner, self).__init__(args)
self._start = args.start
self._end = args.end
self._referenceId = args.referenceId
self._readGroupIds = None
if args.readGroupIds is not None:
self._readGroupIds = args.readGroupIds.split(",")
def run(self):
# TODO add support for looking up ReadGroupSets and References
# like we do with SearchVariants and others.
iterator = self._httpClient.searchReads(
readGroupIds=self._readGroupIds, referenceId=self._referenceId,
start=self._start, end=self._end)
self._output(iterator)
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for read in gaObjects:
# TODO add in some more useful output here.
print(read.id)
# ListReferenceBases is an oddball, and doesn't fit either get or
# search patterns.
class ListReferenceBasesRunner(AbstractQueryRunner):
"""
Runner class for the references/{id}/bases method
"""
def __init__(self, args):
super(ListReferenceBasesRunner, self).__init__(args)
self._referenceId = args.id
self._start = args.start
self._end = args.end
def run(self):
iterator = self._httpClient.listReferenceBases(
self._referenceId, self._start, self._end)
# TODO add support for FASTA output.
for segment in iterator:
print(segment, end="")
print()
# Runners for the various GET methods.
class GetReferenceSetRunner(AbstractGetRunner):
"""
Runner class for the referencesets/{id} method
"""
def __init__(self, args):
super(GetReferenceSetRunner, self).__init__(args)
self._method = self._httpClient.getReferenceSet
class GetReferenceRunner(AbstractGetRunner):
"""
Runner class for the references/{id} method
"""
def __init__(self, args):
super(GetReferenceRunner, self).__init__(args)
self._method = self._httpClient.getReference
class GetReadGroupSetRunner(AbstractGetRunner):
"""
Runner class for the readgroupsets/{id} method
"""
def __init__(self, args):
super(GetReadGroupSetRunner, self).__init__(args)
self._method = self._httpClient.getReadGroupSet
class GetReadGroupRunner(AbstractGetRunner):
"""
Runner class for the references/{id} method
"""
def __init__(self, args):
super(GetReadGroupRunner, self).__init__(args)
self._method = self._httpClient.getReadGroup
class GetCallsetRunner(AbstractGetRunner):
"""
Runner class for the callsets/{id} method
"""
def __init__(self, args):
super(GetCallsetRunner, self).__init__(args)
self._method = self._httpClient.getCallset
class GetDatasetRunner(AbstractGetRunner):
"""
Runner class for the datasets/{id} method
"""
def __init__(self, args):
super(GetDatasetRunner, self).__init__(args)
self._method = self._httpClient.getDataset
class GetVariantRunner(VariantFormatterMixin, AbstractGetRunner):
"""
Runner class for the variants/{id} method
"""
def __init__(self, args):
super(GetVariantRunner, self).__init__(args)
self._method = self._httpClient.getVariant
def addDisableUrllibWarningsArgument(parser):
parser.add_argument(
"--disable-urllib-warnings", default=False, action="store_true",
help="Disable urllib3 warnings")
def addVariantSearchOptions(parser):
"""
Adds common options to a variant searches command line parser.
"""
addVariantSetIdArgument(parser)
addReferenceNameArgument(parser)
addCallSetIdsArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
addPageSizeArgument(parser)
def addVariantSetIdArgument(parser):
parser.add_argument(
"--variantSetId", "-V", default=None,
help="The variant set id to search over")
def addReferenceNameArgument(parser):
parser.add_argument(
"--referenceName", "-r", default="1",
help="Only return variants on this reference.")
def addCallSetIdsArgument(parser):
parser.add_argument(
"--callSetIds", "-c", default=[],
help="""Return variant calls which belong to call sets
with these IDs. Pass in IDs as a comma separated list (no spaces).
Use '*' to request all call sets (the quotes are important!).
""")
def addStartArgument(parser):
parser.add_argument(
"--start", "-s", default=0, type=int,
help="The start of the search range (inclusive).")
def addEndArgument(parser, defaultValue=AVRO_LONG_MAX):
parser.add_argument(
"--end", "-e", default=defaultValue, type=int,
help="The end of the search range (exclusive).")
def addIdArgument(parser):
parser.add_argument("id", default=None, help="The id of the object")
def addGetArguments(parser):
addUrlArgument(parser)
addIdArgument(parser)
addOutputFormatArgument(parser)
def addUrlArgument(parser):
"""
Adds the URL endpoint argument to the specified parser.
"""
parser.add_argument("baseUrl", help="The URL of the API endpoint")
def addOutputFormatArgument(parser):
parser.add_argument(
"--outputFormat", "-O", choices=['text', 'json'], default="text",
help=(
"The format for object output. Currently supported are "
"'text' (default), which gives a short summary of the object and "
"'json', which outputs each object in line-delimited JSON"))
def addAccessionArgument(parser):
parser.add_argument(
"--accession", default=None,
help="The accession to search for")
def addMd5ChecksumArgument(parser):
parser.add_argument(
"--md5checksum", default=None,
help="The md5checksum to search for")
def addPageSizeArgument(parser):
parser.add_argument(
"--pageSize", "-m", default=None, type=int,
help=(
"The maximum number of results returned in one page. "
"The default is to let the server decide how many "
"results to return in a single page."))
def addDatasetIdArgument(parser):
parser.add_argument(
"--datasetId", default=None,
help="The datasetId to search over")
def addReferenceSetIdArgument(parser):
parser.add_argument(
"--referenceSetId", default=None,
help="The referenceSet to search over")
def addNameArgument(parser):
parser.add_argument(
"--name", default=None,
help="The name to search over")
def addClientGlobalOptions(parser):
parser.add_argument(
'--verbose', '-v', action='count', default=0,
help="Increase verbosity; can be supplied multiple times")
parser.add_argument(
"--key", "-k", default='invalid',
help="Auth Key. Found on server index page.")
addDisableUrllibWarningsArgument(parser)
def addHelpParser(subparsers):
parser = subparsers.add_parser(
"help", description="ga4gh_client help",
help="show this help message and exit")
return parser
def addVariantsSearchParser(subparsers):
parser = subparsers.add_parser(
"variants-search",
description="Search for variants",
help="Search for variants.")
parser.set_defaults(runner=SearchVariantsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addVariantSearchOptions(parser)
return parser
def addVariantSetsSearchParser(subparsers):
parser = subparsers.add_parser(
"variantsets-search",
description="Search for variantSets",
help="Search for variantSets.")
parser.set_defaults(runner=SearchVariantSetsRunner)
addOutputFormatArgument(parser)
addUrlArgument(parser)
addPageSizeArgument(parser)
addDatasetIdArgument(parser)
return parser
def addReferenceSetsSearchParser(subparsers):
parser = subparsers.add_parser(
"referencesets-search",
description="Search for referenceSets",
help="Search for referenceSets")
parser.set_defaults(runner=SearchReferenceSetsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addAccessionArgument(parser)
addMd5ChecksumArgument(parser)
parser.add_argument(
"--assemblyId",
help="The assembly id to search for")
return parser
def addReferencesSearchParser(subparsers):
parser = subparsers.add_parser(
"references-search",
description="Search for references",
help="Search for references")
parser.set_defaults(runner=SearchReferencesRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addAccessionArgument(parser)
addMd5ChecksumArgument(parser)
addReferenceSetIdArgument(parser)
return parser
def addReadGroupSetsSearchParser(subparsers):
parser = subparsers.add_parser(
"readgroupsets-search",
description="Search for readGroupSets",
help="Search for readGroupSets")
parser.set_defaults(runner=SearchReadGroupSetsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addDatasetIdArgument(parser)
addNameArgument(parser)
return parser
def addCallsetsSearchParser(subparsers):
parser = subparsers.add_parser(
"callsets-search",
description="Search for callSets",
help="Search for callSets")
parser.set_defaults(runner=SearchCallSetsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addNameArgument(parser)
addVariantSetIdArgument(parser)
return parser
def addReadsSearchParser(subparsers):
parser = subparsers.add_parser(
"reads-search",
description="Search for reads",
help="Search for reads")
parser.set_defaults(runner=SearchReadsRunner)
addOutputFormatArgument(parser)
addReadsSearchParserArguments(parser)
return parser
def addDatasetsGetParser(subparsers):
parser = subparsers.add_parser(
"datasets-get",
description="Get a dataset",
help="Get a dataset")
parser.set_defaults(runner=GetDatasetRunner)
addGetArguments(parser)
def addDatasetsSearchParser(subparsers):
parser = subparsers.add_parser(
"datasets-search",
description="Search for datasets",
help="Search for datasets")
parser.set_defaults(runner=SearchDatasetsRunner)
addUrlArgument(parser)
addPageSizeArgument(parser)
addOutputFormatArgument(parser)
return parser
def addReadsSearchParserArguments(parser):
addUrlArgument(parser)
addPageSizeArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
parser.add_argument(
"--readGroupIds", default=None,
help="The readGroupIds to search over")
parser.add_argument(
"--referenceId", default=None,
help="The referenceId to search over")
def addReferenceSetsGetParser(subparsers):
parser = subparsers.add_parser(
"referencesets-get",
description="Get a referenceset",
help="Get a referenceset")
parser.set_defaults(runner=GetReferenceSetRunner)
addGetArguments(parser)
def addReferencesGetParser(subparsers):
parser = subparsers.add_parser(
"references-get",
description="Get a reference",
help="Get a reference")
parser.set_defaults(runner=GetReferenceRunner)
addGetArguments(parser)
def addReadGroupSetsGetParser(subparsers):
parser = subparsers.add_parser(
"readgroupsets-get",
description="Get a read group set",
help="Get a read group set")
parser.set_defaults(runner=GetReadGroupSetRunner)
addGetArguments(parser)
def addReadGroupsGetParser(subparsers):
parser = subparsers.add_parser(
"readgroups-get",
description="Get a read group",
help="Get a read group")
parser.set_defaults(runner=GetReadGroupRunner)
addGetArguments(parser)
def addCallsetsGetParser(subparsers):
parser = subparsers.add_parser(
"callsets-get",
description="Get a callset",
help="Get a callset")
parser.set_defaults(runner=GetCallsetRunner)
addGetArguments(parser)
def addVariantsGetParser(subparsers):
parser = subparsers.add_parser(
"variants-get",
description="Get a variant",
help="Get a variant")
parser.set_defaults(runner=GetVariantRunner)
addGetArguments(parser)
def addReferencesBasesListParser(subparsers):
parser = subparsers.add_parser(
"references-list-bases",
description="List bases of a reference",
help="List bases of a reference")
parser.set_defaults(runner=ListReferenceBasesRunner)
addUrlArgument(parser)
addIdArgument(parser)
addStartArgument(parser)
addEndArgument(parser, defaultValue=None)
def getClientParser():
parser = argparse.ArgumentParser(
description="GA4GH reference client")
addClientGlobalOptions(parser)
subparsers = parser.add_subparsers(title='subcommands',)
addHelpParser(subparsers)
addVariantsSearchParser(subparsers)
addVariantSetsSearchParser(subparsers)
addReferenceSetsSearchParser(subparsers)
addReferencesSearchParser(subparsers)
addReadGroupSetsSearchParser(subparsers)
addCallsetsSearchParser(subparsers)
addReadsSearchParser(subparsers)
addDatasetsSearchParser(subparsers)
addReferenceSetsGetParser(subparsers)
addReferencesGetParser(subparsers)
addReadGroupSetsGetParser(subparsers)
addReadGroupsGetParser(subparsers)
addCallsetsGetParser(subparsers)
addVariantsGetParser(subparsers)
addDatasetsGetParser(subparsers)
addReferencesBasesListParser(subparsers)
return parser
def client_main():
parser = getClientParser()
args = parser.parse_args()
if "runner" not in args:
parser.print_help()
else:
if args.disable_urllib_warnings:
requests.packages.urllib3.disable_warnings()
try:
runner = args.runner(args)
runner.run()
except (exceptions.BaseClientException,
requests.exceptions.RequestException) as exception:
# TODO suppress exception unless debug settings are enabled
raise exception
##############################################################################
# ga2vcf
##############################################################################
class Ga2VcfRunner(SearchVariantsRunner):
"""
Runner class for the ga2vcf
"""
def __init__(self, args):
super(Ga2VcfRunner, self).__init__(args)
self._outputFile = args.outputFile
self._binaryOutput = False
if args.outputFormat == "bcf":
self._binaryOutput = True
def run(self):
variantSet = self._httpClient.getVariantSet(self._variantSetId)
iterator = self._httpClient.searchVariants(
start=self._start, end=self._end,
referenceName=self._referenceName,
variantSetId=self._variantSetId,
callSetIds=self._callSetIds)
# do conversion
vcfConverter = converters.VcfConverter(
variantSet, iterator, self._outputFile, self._binaryOutput)
vcfConverter.convert()
def addOutputFileArgument(parser):
parser.add_argument(
"--outputFile", "-o", default=None,
help="the file to write the output to")
def getGa2VcfParser():
parser = argparse.ArgumentParser(
description=(
"GA4GH VCF conversion tool. Converts variant information "
"stored in a GA4GH repository into VCF format."))
addClientGlobalOptions(parser)
addOutputFileArgument(parser)
addUrlArgument(parser)
parser.add_argument("variantSetId", help="The variant set to convert")
parser.add_argument(
"--outputFormat", "-O", choices=['vcf', 'bcf'], default="vcf",
help=(
"The format for object output. Currently supported are "
"'vcf' (default), which is a text-based format and "
"'bcf', which is the binary equivalent"))
addReferenceNameArgument(parser)
addCallSetIdsArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
addPageSizeArgument(parser)
return parser
def ga2vcf_main():
parser = getGa2VcfParser()
args = parser.parse_args()
if "baseUrl" not in args:
parser.print_help()
else:
runner = Ga2VcfRunner(args)
runner.run()
##############################################################################
# ga2sam
##############################################################################
class Ga2SamRunner(SearchReadsRunner):
"""
Runner class for the ga2vcf
"""
def __init__(self, args):
args.readGroupIds = args.readGroupId
super(Ga2SamRunner, self).__init__(args)
self._outputFile = args.outputFile
self._binaryOutput = False
if args.outputFormat == "bam":
self._binaryOutput = True
def run(self):
readGroup = self._httpClient.getReadGroup(self._readGroupIds[0])
iterator = self._httpClient.searchReads(
readGroupIds=self._readGroupIds, referenceId=self._referenceId,
start=self._start, end=self._end)
# do conversion
samConverter = converters.SamConverter(
readGroup, iterator, self._outputFile, self._binaryOutput)
samConverter.convert()
def getGa2SamParser():
parser = argparse.ArgumentParser(
description="GA4GH SAM conversion tool")
addClientGlobalOptions(parser)
addUrlArgument(parser)
parser.add_argument(
"readGroupId",
help="The ReadGroup to convert to SAM/BAM format.")
addPageSizeArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
parser.add_argument(
"--referenceId", default=None,
help="The referenceId to search over")
parser.add_argument(
"--outputFormat", "-O", default="sam", choices=["sam", "bam"],
help=(
"The format for object output. Currently supported are "
"'sam' (default), which is a text-based format and "
"'bam', which is the binary equivalent"))
addOutputFileArgument(parser)
return parser
def ga2sam_main():
parser = getGa2SamParser()
args = parser.parse_args()
if "baseUrl" not in args:
parser.print_help()
else:
runner = Ga2SamRunner(args)
runner.run()
##############################################################################
# Configuration testing
##############################################################################
class SimplerResult(unittest.TestResult):
"""
The TestResult class gives formatted tracebacks as error messages, which
is not what we want. Instead we just want the error message from the
err praram. Hence this subclass.
"""
def addError(self, test, err):
self.errors.append((test,
"{0}: {1}".format(err[0].__name__, err[1])))
def addFailure(self, test, err):
self.failures.append((test,
"{0}: {1}".format(err[0].__name__, err[1])))
def configtest_main(parser=None):
if parser is None:
parser = argparse.ArgumentParser(
description="GA4GH server configuration validator")
parser.add_argument(
"--config", "-c", default='DevelopmentConfig', type=str,
help="The configuration to use")
parser.add_argument(
"--config-file", "-f", type=str, default=None,
help="The configuration file to use")
args = parser.parse_args()
configStr = 'ga4gh.serverconfig:{0}'.format(args.config)
configtest.TestConfig.configStr = configStr
configtest.TestConfig.configFile = args.config_file
configtest.TestConfig.configEnv = "GA4GH_CONFIGURATION"
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(configtest)
results = SimplerResult()
tests.run(results)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
log.info('{0} Tests run. {1} errors, {2} failures, {3} skipped'.
format(results.testsRun,
len(results.errors),
len(results.failures),
len(results.skipped)))
for result in results.errors:
if result is not None:
log.critical('Error: {0}: {1}'.format(result[0].id(), result[1]))
for result in results.failures:
if result is not None:
log.critical('Failure: {0}: {1}'.format(result[0].id(), result[1]))
for result in results.skipped:
if result is not None:
log.info('Skipped: {0}: {1}'.format(result[0].id(), result[1]))
| apache-2.0 |
cgwalters/pykickstart | tests/orderedset.py | 3 | 1277 | import unittest
from pykickstart.orderedset import OrderedSet
class OrderedSet_TestCase(unittest.TestCase):
def runTest(self):
# __eq__, __len__, etc.
self.assertEqual(OrderedSet([]), OrderedSet([]))
self.assertEqual(OrderedSet([1, 2, 3]), OrderedSet([1, 2, 3]))
self.assertEqual(OrderedSet([1, 2, 3]), [1, 2, 3])
# __reversed__
self.assertEqual(reversed(OrderedSet([2, 4, 1, 3])), OrderedSet([3, 1, 4, 2]))
# discard
self.assertEqual(len(OrderedSet(["one", "two", "three"])), 3)
os = OrderedSet(["one", "two", "three"])
os.discard("two")
self.assertEqual(len(os), 2)
os = OrderedSet(["one", "two", "three"])
os.discard("four")
self.assertEqual(len(os), 3)
# pop
self.assertRaises(KeyError, OrderedSet().pop)
self.assertEqual(OrderedSet(["one", "two", "three"]).pop(), "three")
self.assertEqual(OrderedSet(["one"]).pop(), "one")
os = OrderedSet(["one"])
os.pop()
self.assertEqual(len(os), 0)
# __repr__
self.assertEqual(repr(OrderedSet()), "OrderedSet()")
self.assertEqual(repr(OrderedSet([1, 2, 3])), "OrderedSet([1, 2, 3])")
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
antonygc/liblightbase | liblightbase/lbdoc/metaclass.py | 1 | 6065 | from liblightbase import lbutils
from liblightbase.lbdoc.metadata import DocumentMetadata
def generate_metaclass(struct, base=None):
"""
Generate document metaclass. The document metaclass
is an abstraction of document model defined by base
structures.
@param struct: Field or Group object.
@param base: Base object or None.
"""
build_metadata = False
if base is None:
base = struct
build_metadata = True
snames = struct.content.__snames__
rnames = struct.content.__rnames__
class MetaClass(object):
"""
Document metaclass. Describes the structures defifined by
document structure model.
"""
# @property __valreq__: Flag used to validate required
# fields or not.
__valreq__ = True
# @property __slots__: reserves space for the declared
# variables and prevents the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ['_' + sname for sname in snames]
if build_metadata:
__slots__.append('__metadata__')
def __init__(self, **kwargs):
""" Document MetaClass constructor
"""
if self.__valreq__:
lbutils.validate_required(rnames, kwargs)
for arg in kwargs:
setattr(self, arg, kwargs[arg])
for childstruct in struct.content:
structname, prop = generate_property(base, childstruct)
setattr(MetaClass, structname, prop)
if build_metadata:
MetaClass._metadata = build_metadata_prop()
MetaClass.__name__ = struct.metadata.name
return MetaClass
def generate_property(base, struct):
"""
Make python's property based on structure attributes.
@param base: Base object.
@param struct: Field or Group object.
"""
if struct.is_field:
structname = struct.name
elif struct.is_group:
structname = struct.metadata.name
attr_name = '_' + structname
def getter(self):
value = getattr(self, attr_name)
if struct.is_field:
return getattr(value, '__value__')
return value
def setter(self, value):
struct_metaclass = base.metaclass(structname)
if struct.is_field:
value = struct_metaclass(value)
elif struct.is_group:
if struct.metadata.multivalued:
msg = 'object {} should be instance of {}'.format(
struct.metadata.name, list)
assert isinstance(value, list), msg
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assertion = all(isinstance(element, struct_metaclass) \
for element in value)
assert assertion, msg
value = generate_multimetaclass(struct,
struct_metaclass)(value)
else:
msg = '{} object should be an instance of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(value, struct_metaclass), msg
setattr(self, attr_name, value)
def deleter(self):
delattr(self, attr_name)
return structname, property(getter,
setter, deleter, structname)
def build_metadata_prop():
def fget(self):
return self.__metadata__
def fset(self, value):
msg = '_metadata attribute should be a DocumentMetadata object.'
assert isinstance(value, DocumentMetadata)
self.__metadata__ = value
def fdel(self):
del self.__metadata__
return property(fget, fset, fdel, '_metadata')
def generate_multimetaclass(struct, struct_metaclass):
"""
Generate metaclass to use with multivalued groups.
@param struct: Field or Group object
@param struct_metaclass: The struct Metaclass
"""
class MultiGroupMetaClass(list):
"""
Multivalued Group Metaclass. Metaclass used to ensure list
elements are instances of right metaclasses.
"""
def __setitem__(self, index, element):
""" x.__setitem__(y, z) <==> x[y] = z
"""
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(element, struct_metaclass), msg
return super(MultiGroupMetaClass, self).__setitem__(index,
element)
def append(self, element):
""" L.append(object) -- append object to end
"""
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(element, struct_metaclass), msg
return super(MultiGroupMetaClass, self).append(element)
return MultiGroupMetaClass
def generate_field_metaclass(field, base):
"""
Generate field metaclass. The field metaclass
validates incoming value against fields' datatype.
@param field: Field object.
@param base: Base object.
"""
class FieldMetaClass(object):
"""
Field MetaClass. validates incoming
value against fields' datatype.
"""
def __init__(self, value):
self.__value__ = value
def __setattr__(self, obj, value):
validator = field._datatype.__schema__(base, field, 0)
if field.multivalued is True:
msg = 'Expected type list for {}, but found {}'
assert isinstance(value, list), msg.format(
field.name, type(value))
value = [validator(element) for element in value]
else:
value = validator(value)
super(FieldMetaClass, self).__setattr__('__value__', value)
def __getattr__(self, obj):
return super(FieldMetaClass, self).__getattribute__('__value__')
FieldMetaClass.__name__ = field.name
return FieldMetaClass
| gpl-2.0 |
aio-libs/aiozmq | examples/core_dealer_router.py | 1 | 1579 | import asyncio
import aiozmq
import zmq
class ZmqDealerProtocol(aiozmq.ZmqProtocol):
transport = None
def __init__(self, queue, on_close):
self.queue = queue
self.on_close = on_close
def connection_made(self, transport):
self.transport = transport
def msg_received(self, msg):
self.queue.put_nowait(msg)
def connection_lost(self, exc):
self.on_close.set_result(exc)
class ZmqRouterProtocol(aiozmq.ZmqProtocol):
transport = None
def __init__(self, on_close):
self.on_close = on_close
def connection_made(self, transport):
self.transport = transport
def msg_received(self, msg):
self.transport.write(msg)
def connection_lost(self, exc):
self.on_close.set_result(exc)
async def go():
router_closed = asyncio.Future()
dealer_closed = asyncio.Future()
router, _ = await aiozmq.create_zmq_connection(
lambda: ZmqRouterProtocol(router_closed), zmq.ROUTER, bind="tcp://127.0.0.1:*"
)
addr = list(router.bindings())[0]
queue = asyncio.Queue()
dealer, _ = await aiozmq.create_zmq_connection(
lambda: ZmqDealerProtocol(queue, dealer_closed), zmq.DEALER, connect=addr
)
for i in range(10):
msg = (b"data", b"ask", str(i).encode("utf-8"))
dealer.write(msg)
answer = await queue.get()
print(answer)
dealer.close()
await dealer_closed
router.close()
await router_closed
def main():
asyncio.run(go())
print("DONE")
if __name__ == "__main__":
main()
| bsd-2-clause |
pyfa-org/eos | eos/item/mixin/effect_stats/remote_repair.py | 1 | 1829 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.eve_obj.effect.repairs.base import RemoteArmorRepairEffect
from eos.eve_obj.effect.repairs.base import RemoteShieldRepairEffect
from eos.item.mixin.base import BaseItemMixin
class RemoteRepairMixin(BaseItemMixin):
def __repair_effect_iter(self, effect_class):
for effect in self._type_effects.values():
if not isinstance(effect, effect_class):
continue
if effect.id not in self._running_effect_ids:
continue
yield effect
def get_armor_rps(self, reload=False):
rps = 0
for effect in self.__repair_effect_iter(RemoteArmorRepairEffect):
rps += effect.get_rps(self, reload=reload)
return rps
def get_shield_rps(self, reload=False):
rps = 0
for effect in self.__repair_effect_iter(RemoteShieldRepairEffect):
rps += effect.get_rps(self, reload=reload)
return rps
| lgpl-3.0 |
bfirsh/django-old | django/contrib/localflavor/ie/ie_counties.py | 503 | 1127 | """
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
| bsd-3-clause |
netfirms/erpnext | erpnext/crm/doctype/newsletter_list/newsletter_list.py | 15 | 3079 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import validate_email_add
from frappe import _
from email.utils import parseaddr
class NewsletterList(Document):
def onload(self):
singles = [d.name for d in frappe.db.get_all("DocType", "name", {"issingle": 1})]
self.get("__onload").import_types = [{"value": d.parent, "label": "{0} ({1})".format(d.parent, d.label)} \
for d in frappe.db.get_all("DocField", ("parent", "label"), {"options": "Email"}) if d.parent not in singles]
def import_from(self, doctype):
"""Extract email ids from given doctype and add them to the current list"""
meta = frappe.get_meta(doctype)
email_field = [d.fieldname for d in meta.fields if d.fieldtype in ("Data", "Small Text") and d.options=="Email"][0]
unsubscribed_field = "unsubscribed" if meta.get_field("unsubscribed") else None
added = 0
for user in frappe.db.get_all(doctype, [email_field, unsubscribed_field or "name"]):
try:
email = parseaddr(user.get(email_field))[1]
if email:
frappe.get_doc({
"doctype": "Newsletter List Subscriber",
"newsletter_list": self.name,
"email": email,
"unsubscribed": user.get(unsubscribed_field) if unsubscribed_field else 0
}).insert(ignore_permissions=True)
added += 1
except Exception, e:
# already added, ignore
if e.args[0]!=1062:
raise
frappe.msgprint(_("{0} subscribers added").format(added))
return self.update_total_subscribers()
def update_total_subscribers(self):
self.total_subscribers = self.get_total_subscribers()
self.db_update()
return self.total_subscribers
def get_total_subscribers(self):
return frappe.db.sql("""select count(*) from `tabNewsletter List Subscriber`
where newsletter_list=%s""", self.name)[0][0]
def on_trash(self):
for d in frappe.get_all("Newsletter List Subscriber", "name", {"newsletter_list": self.name}):
frappe.delete_doc("Newsletter List Subscriber", d.name)
@frappe.whitelist()
def import_from(name, doctype):
nlist = frappe.get_doc("Newsletter List", name)
if nlist.has_permission("write"):
return nlist.import_from(doctype)
@frappe.whitelist()
def add_subscribers(name, email_list):
if not isinstance(email_list, (list, tuple)):
email_list = email_list.replace(",", "\n").split("\n")
count = 0
for email in email_list:
email = email.strip()
validate_email_add(email, True)
if email:
if not frappe.db.get_value("Newsletter List Subscriber",
{"newsletter_list": name, "email": email}):
frappe.get_doc({
"doctype": "Newsletter List Subscriber",
"newsletter_list": name,
"email": email
}).insert(ignore_permissions = frappe.flags.ignore_permissions)
count += 1
else:
pass
frappe.msgprint(_("{0} subscribers added").format(count))
return frappe.get_doc("Newsletter List", name).update_total_subscribers()
| agpl-3.0 |
bollu/polymage | sandbox/apps/python/img_proc/harris/init.py | 1 | 1485 | import sys
import os.path
from PIL import Image
import numpy as np
from arg_parser import parse_args
from printer import print_header, print_usage, print_line
def init_images(app_data):
print("[init.py] : initializing images...")
app_args = app_data['app_args']
# input image:
img_path = app_args.img_file
img = np.array(Image.open(img_path).convert('1'))
rows, cols = img.shape
# convert to float image
IN = np.array(img)
IN = IN.astype(np.float32).ravel()
# final output image
OUT = np.zeros((rows, cols), np.float32).ravel()
img_data = {}
img_data['IN'] = IN
img_data['OUT'] = OUT
app_data['img_data'] = img_data
app_data['rows'] = rows
app_data['cols'] = cols
return
def get_input(app_data):
# parse the command-line arguments
app_args = parse_args()
app_data['app_args'] = app_args
app_data['mode'] = app_args.mode
app_data['runs'] = int(app_args.runs)
app_data['graph_gen'] = bool(app_args.graph_gen)
app_data['timer'] = app_args.timer
# storage optimization
app_data['optimize_storage'] = bool(app_args.optimize_storage)
# early freeing of allocated arrays
app_data['early_free'] = bool(app_args.early_free)
# pool allocate option
app_data['pool_alloc'] = bool(app_args.pool_alloc)
return
def init_all(app_data):
pipe_data = {}
app_data['pipe_data'] = pipe_data
get_input(app_data)
init_images(app_data)
return
| apache-2.0 |
fanjunwei/depot_tools | testing_support/auto_stub.py | 53 | 2301 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__version__ = '1.0'
import collections
import inspect
import unittest
class AutoStubMixIn(object):
"""Automatically restores stubbed functions on unit test teardDown.
It's an extremely lightweight mocking class that doesn't require bookeeping.
"""
_saved = None
def mock(self, obj, member, mock):
self._saved = self._saved or collections.OrderedDict()
old_value = self._saved.setdefault(
obj, collections.OrderedDict()).setdefault(member, getattr(obj, member))
setattr(obj, member, mock)
return old_value
def tearDown(self):
"""Restore all the mocked members."""
if self._saved:
for obj, items in self._saved.iteritems():
for member, previous_value in items.iteritems():
setattr(obj, member, previous_value)
class SimpleMock(object):
"""Really simple manual class mock."""
def __init__(self, unit_test):
"""Do not call __init__ if you want to use the global call list to detect
ordering across different instances.
"""
self.calls = []
self.unit_test = unit_test
self.assertEqual = unit_test.assertEqual
def pop_calls(self):
"""Returns the list of calls up to date.
Good to do self.assertEqual(expected, mock.pop_calls()).
"""
calls = self.calls
self.calls = []
return calls
def check_calls(self, expected):
self.assertEqual(expected, self.pop_calls())
def _register_call(self, *args, **kwargs):
"""Registers the name of the caller function."""
caller_name = kwargs.pop('caller_name', None) or inspect.stack()[1][3]
str_args = ', '.join(repr(arg) for arg in args)
str_kwargs = ', '.join('%s=%r' % (k, v) for k, v in kwargs.iteritems())
self.calls.append('%s(%s)' % (
caller_name, ', '.join(filter(None, [str_args, str_kwargs]))))
class TestCase(unittest.TestCase, AutoStubMixIn):
"""Adds self.mock() and self.has_failed() to a TestCase."""
def tearDown(self):
AutoStubMixIn.tearDown(self)
unittest.TestCase.tearDown(self)
def has_failed(self):
"""Returns True if the test has failed."""
return not self._resultForDoCleanups.wasSuccessful()
| bsd-3-clause |
lucalianas/openmicroscopy | components/tools/OmeroPy/test/integration/test_files.py | 9 | 2553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2014 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of client upload/download functionality
"""
import pytest
import library as lib
from omero.util.temp_files import create_path
def tmpfile():
file = create_path()
file.write_lines(["abc", "def", "123"])
return file
class TestFiles(lib.ITest):
def testUploadDownload(self):
uploaded = tmpfile()
downloaded = create_path()
ofile = self.client.upload(str(uploaded), type="text/plain")
self.client.download(ofile, str(downloaded))
lines = downloaded.lines()
assert "abc\n" == lines[0], lines[0]
assert "def\n" == lines[1], lines[1]
assert "123\n" == lines[2], lines[2]
sha1_upload = self.client.sha1(str(uploaded))
sha1_download = self.client.sha1(str(downloaded))
assert sha1_upload == sha1_download, "%s!=%s" % (
sha1_upload, sha1_download)
@pytest.mark.broken(ticket="11610")
def testUploadDifferentSizeTicket2337(self):
uploaded = tmpfile()
ofile = self.client.upload(str(uploaded), type="text/plain")
uploaded.write_lines(["abc", "def"]) # Shorten
ofile = self.client.upload(
str(uploaded), type="text/plain", ofile=ofile)
downloaded = create_path()
self.client.download(ofile, str(downloaded))
lines = downloaded.lines()
assert 2 == len(lines)
assert "abc\n" == lines[0], lines[0]
assert "def\n" == lines[1], lines[1]
sha1_upload = self.client.sha1(str(uploaded))
sha1_download = self.client.sha1(str(downloaded))
assert sha1_upload == sha1_download, "%s!=%s" % (
sha1_upload, sha1_download)
| gpl-2.0 |
CharlieGreenman/codeIllustrator | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/flock_tool.py | 1835 | 1748 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
zhanrnl/ag | webapp/models/roomassignment.py | 1 | 2751 | from google.appengine.ext import ndb
from models.grading import SITTING_ROOM_TYPES
from models.team import (
Team,
Contestant,
)
from models.sitting import Sitting
import random
SINGLE_TEST_SITTING = {
'alg' : 'alg,at',
'at' : 'at,calc',
'calc' : 'calc,geo',
'geo' : 'at,geo',
'team' : 'power,team',
'power' : 'power,team'
}
class RoomAssignment(ndb.Model):
testing_id = ndb.StringProperty(required=True)
sitting_nid = ndb.IntegerProperty(required=True)
@classmethod
def assign_team(cls, team_id):
team = Team.get_by_team_id(team_id)
contestants = Contestant.fetch_by_team(team.key)
cls.delete_team_assignments(team.key)
def get_sitting_type(tests):
if len(tests) == 1 and tests[0] != 'gen':
return SINGLE_TEST_SITTING[tests[0]]
return ','.join(sorted(list(tests)))
def select_sitting(sitting_type):
sittings = Sitting.fetch_by_exam(sitting_type)
weights = [s.capacity for s in sittings]
total = sum(weights)
index = random.randint(1, total)
counter = 0
i = 0
while (counter < index):
counter += weights[i]
i += 1
return sittings[i-1]
def assign_to_sitting(testing_id, tests, size):
sitting_type = get_sitting_type(tests)
sitting = select_sitting(sitting_type)
assignment = RoomAssignment(
testing_id=testing_id,
sitting_nid=sitting.nid,
parent=team.key,
)
assignment.put()
if len(team.team_tests) > 0:
assign_to_sitting(str(team_id), team.team_tests, len(contestants))
for c in contestants:
if len(c.tests) == 0: continue
assign_to_sitting(c.contestant_id, c.tests, 1)
@staticmethod
def get_assigned_team_ids():
team_ids = set()
all_room_assignments = RoomAssignment.query().fetch()
for ra in all_room_assignments:
try:
team_id = int(ra.testing_id)
team_ids.add(team_id)
except ValueError as e:
continue
return list(team_ids)
@staticmethod
def delete_all():
ndb.delete_multi(RoomAssignment.query().iter(keys_only=True))
@staticmethod
def delete_team_assignments(team_key):
for a in RoomAssignment.query(ancestor=team_key).fetch():
a.key.delete()
@staticmethod
def fetch_by_team(team_key):
return RoomAssignment.query(ancestor=team_key).fetch()
@staticmethod
def fetch_all():
return RoomAssignment.query().fetch()
| mit |
google/tf-quant-finance | tf_quant_finance/experimental/pricing_platform/framework/market_data/market_data_test.py | 1 | 5816 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the market data."""
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
core = tff.experimental.pricing_platform.framework.core
market_data = tff.experimental.pricing_platform.framework.market_data
interpolation_method = tff.experimental.pricing_platform.framework.core.interpolation_method
@test_util.run_all_in_graph_and_eager_modes
class MarketDataTest(tf.test.TestCase):
def setUp(self):
valuation_date = [(2020, 6, 24)]
fixing_dates = [(2020, 2, 24), (2020, 3, 12), (2020, 4, 14), (2020, 5, 21)]
fixing_rates = [0.01, 0.02, 0.03, 0.025]
dates = [[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8],
[2027, 2, 8], [2030, 2, 8], [2050, 2, 8]]
discounts = [0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.8013675,
0.72494879, 0.37602059]
vol_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2027, 2, 8]]
strikes = [[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510]]
volatilities = [[0.1, 0.12, 0.13],
[0.15, 0.2, 0.15],
[0.1, 0.2, 0.1],
[0.1, 0.2, 0.1],
[0.1, 0.1, 0.3]]
risk_free_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2050, 2, 8]]
risk_free_discounts = [
0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.37602059]
self._market_data_dict = {
"rates": {
"USD": {
"risk_free_curve": {
"dates": risk_free_dates, "discounts": risk_free_discounts
},
"OIS": {
"dates": dates, "discounts": discounts
},
"LIBOR_3M": {
"dates": dates,
"discounts": discounts,
"fixing_dates": fixing_dates,
"fixing_rates": fixing_rates,
"fixing_daycount": "ACTUAL_365",
"config": {
"interpolation_method": interpolation_method.
InterpolationMethod.LINEAR
}
},
},
},
"equities": {
"USD": {
"GOOG": {
"spot": 1500,
"volatility_surface": {
"dates": vol_dates,
"strikes": strikes,
"implied_volatilities": volatilities
}
}
}
},
"reference_date": valuation_date,
}
self._libor_discounts = discounts
self._risk_free_discounts = risk_free_discounts
super(MarketDataTest, self).setUp()
def test_discount_curve(self):
market = market_data.MarketDataDict(
self._market_data_dict)
# Get the risk free discount curve
risk_free_curve_type = core.curve_types.RiskFreeCurve(currency="USD")
risk_free_curve = market.yield_curve(risk_free_curve_type)
# Get LIBOR 3M discount
libor_3m = core.rate_indices.RateIndex(type="LIBOR_3M")
rate_index_curve_type = core.curve_types.RateIndexCurve(
currency="USD", index=libor_3m)
libor_3m_curve = market.yield_curve(rate_index_curve_type)
with self.subTest("RiskFree"):
discount_factor_nodes = risk_free_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._risk_free_discounts)
with self.subTest("LIBOR_3M"):
discount_factor_nodes = libor_3m_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._libor_discounts)
def test_volatility(self):
market = market_data.MarketDataDict(
self._market_data_dict)
# Get volatility surface
vol_surface = market.volatility_surface(currency=["USD", "USD"],
asset=["GOOG", "GOOG"])
expiry = tff.datetime.dates_from_year_month_day(
year=[[2023], [2030]], month=[[5], [10]], day=[[10], [15]])
vols = vol_surface.volatility(expiry_dates=expiry, strike=[[1510], [1520]])
self.assertAllClose(
self.evaluate(vols), [[0.108], [0.31]], atol=1e-6)
def test_fixings(self):
market = market_data.MarketDataDict(
self._market_data_dict)
index_curve_3m = core.curve_types.RateIndexCurve(
"USD", core.rate_indices.RateIndex(type="LIBOR_3M"))
index_curve_ois = core.curve_types.RateIndexCurve(
"USD", core.rate_indices.RateIndex(type="OIS"))
dates = [(2020, 5, 24), (2020, 3, 24)]
with self.subTest("LIBOR_3M"):
fixings, fixings_daycount = market.fixings(dates, index_curve_3m)
self.assertAllClose(
self.evaluate(fixings), [0.025, 0.03], atol=1e-6)
self.assertEqual(fixings_daycount.value, "ACTUAL_365")
with self.subTest("OIS"):
fixings, _ = market.fixings(dates, index_curve_ois)
self.assertAllClose(
self.evaluate(fixings), [0.0, 0.0], atol=1e-6)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
liluo/pygments-main | external/markdown-processor.py | 42 | 2041 | # -*- coding: utf-8 -*-
"""
The Pygments Markdown Preprocessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
import markdown
html = markdown.markdown(someText, extensions=[CodeBlockExtension()])
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
to create a stylesheet to be added to the website.
You can then highlight source code in your markdown markup::
[sourcecode:lexer]
some code
[/sourcecode]
.. _Markdown: https://pypi.python.org/pypi/Markdown
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import re
from markdown.preprocessors import Preprocessor
from markdown.extensions import Extension
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class CodeBlockPreprocessor(Preprocessor):
pattern = re.compile(r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n \n').replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
joined_lines = "\n".join(lines)
joined_lines = self.pattern.sub(repl, joined_lines)
return joined_lines.split("\n")
class CodeBlockExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add('CodeBlockPreprocessor', CodeBlockPreprocessor(), '_begin')
| bsd-2-clause |
eLBati/server-tools | dbfilter_from_header/__init__.py | 44 | 1534 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
# This module copyright (C) 2014 ACSONE SA/NV (<http://acsone.eu>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import http
db_filter_org = http.db_filter
def db_filter(dbs, httprequest=None):
dbs = db_filter_org(dbs, httprequest)
httprequest = httprequest or http.request.httprequest
db_filter_hdr = \
httprequest.environ.get('HTTP_X_ODOO_DBFILTER') or \
httprequest.environ.get('HTTP_X_OPENERP_DBFILTER')
if db_filter_hdr:
dbs = [db for db in dbs if re.match(db_filter_hdr, db)]
return dbs
http.db_filter = db_filter
| agpl-3.0 |
towerjoo/DjangoNotes | Django-1.5.1/tests/modeltests/model_forms/models.py | 44 | 8840 | """
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
from __future__ import unicode_literals
import os
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
temp_storage_dir = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
@python_2_unicode_compatible
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self):
import datetime
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save()
def __str__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
@python_2_unicode_compatible
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
@python_2_unicode_compatible
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __str__(self):
return self.description
try:
# If PIL is available, try testing ImageFields. Checking for the existence
# of Image is enough for CPython, but for PyPy, you need to check for the
# underlying modules If PIL is not available, ImageField tests are omitted.
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image, _imaging
except ImportError:
import Image, _imaging
test_images = True
@python_2_unicode_compatible
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
@python_2_unicode_compatible
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
except ImportError:
test_images = False
@python_2_unicode_compatible
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __str__(self):
return self.field
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
@python_2_unicode_compatible
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __str__(self):
return self.key
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
class DerivedPost(Post):
pass
@python_2_unicode_compatible
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __str__(self):
return six.text_type(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
@python_2_unicode_compatible
class Colour(models.Model):
name = models.CharField(max_length=50)
def __iter__(self):
for number in xrange(5):
yield number
def __str__(self):
return self.name
class ColourfulItem(models.Model):
name = models.CharField(max_length=50)
colours = models.ManyToManyField(Colour)
| mit |
syaiful6/django | django/contrib/gis/geos/prototypes/topology.py | 338 | 2145 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
from ctypes import c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_string,
)
from django.contrib.gis.geos.prototypes.geom import geos_char_p
class Topology(GEOSFuncFactory):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# Topology Routines
geos_boundary = Topology('GEOSBoundary')
geos_buffer = Topology('GEOSBuffer', argtypes=[GEOM_PTR, c_double, c_int])
geos_centroid = Topology('GEOSGetCentroid')
geos_convexhull = Topology('GEOSConvexHull')
geos_difference = Topology('GEOSDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_envelope = Topology('GEOSEnvelope')
geos_intersection = Topology('GEOSIntersection', argtypes=[GEOM_PTR, GEOM_PTR])
geos_linemerge = Topology('GEOSLineMerge')
geos_pointonsurface = Topology('GEOSPointOnSurface')
geos_preservesimplify = Topology('GEOSTopologyPreserveSimplify', argtypes=[GEOM_PTR, c_double])
geos_simplify = Topology('GEOSSimplify', argtypes=[GEOM_PTR, c_double])
geos_symdifference = Topology('GEOSSymDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_union = Topology('GEOSUnion', argtypes=[GEOM_PTR, GEOM_PTR])
geos_cascaded_union = GEOSFuncFactory('GEOSUnionCascaded', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFuncFactory(
'GEOSRelate', argtypes=[GEOM_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
# Linear referencing routines
geos_project = GEOSFuncFactory(
'GEOSProject', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate = Topology('GEOSInterpolate', argtypes=[GEOM_PTR, c_double])
geos_project_normalized = GEOSFuncFactory(
'GEOSProjectNormalized', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate_normalized = Topology('GEOSInterpolateNormalized', argtypes=[GEOM_PTR, c_double])
| bsd-3-clause |
Endika/mitmproxy | libmproxy/contentviews.py | 1 | 16688 | """
Mitmproxy Content Views
=======================
mitmproxy includes a set of content views which can be used to format/decode/highlight data.
While they are currently used for HTTP message bodies only, the may be used in other contexts
in the future, e.g. to decode protobuf messages sent as WebSocket frames.
Thus, the View API is very minimalistic. The only arguments are `data` and `**metadata`,
where `data` is the actual content (as bytes). The contents on metadata depend on the protocol in
use. For HTTP, the message headers are passed as the ``headers`` keyword argument.
"""
from __future__ import (absolute_import, print_function, division)
import cStringIO
import json
import logging
import subprocess
import sys
import lxml.html
import lxml.etree
import datetime
from PIL import Image
from PIL.ExifTags import TAGS
import html2text
import six
from netlib.odict import ODict
from netlib import encoding
from netlib.utils import clean_bin, hexdump, urldecode, multipartdecode, parse_content_type
from . import utils
from .exceptions import ContentViewException
from .contrib import jsbeautifier
from .contrib.wbxml.ASCommandResponse import ASCommandResponse
try:
import pyamf
from pyamf import remoting, flex
except ImportError: # pragma nocover
pyamf = None
try:
import cssutils
except ImportError: # pragma nocover
cssutils = None
else:
cssutils.log.setLevel(logging.CRITICAL)
cssutils.ser.prefs.keepComments = True
cssutils.ser.prefs.omitLastSemicolon = False
cssutils.ser.prefs.indentClosingBrace = False
cssutils.ser.prefs.validOnly = False
# Default view cutoff *in lines*
VIEW_CUTOFF = 512
KEY_MAX = 30
def format_dict(d):
"""
Helper function that transforms the given dictionary into a list of
("key", key )
("value", value)
tuples, where key is padded to a uniform width.
"""
max_key_len = max(len(k) for k in d.keys())
max_key_len = min(max_key_len, KEY_MAX)
for key, value in d.items():
key += ":"
key = key.ljust(max_key_len + 2)
yield [
("header", key),
("text", value)
]
def format_text(text):
"""
Helper function that transforms bytes into the view output format.
"""
for line in text.splitlines():
yield [("text", line)]
class View(object):
name = None
prompt = ()
content_types = []
def __call__(self, data, **metadata):
"""
Transform raw data into human-readable output.
Args:
data: the data to decode/format as bytes.
metadata: optional keyword-only arguments for metadata. Implementations must not
rely on a given argument being present.
Returns:
A (description, content generator) tuple.
The content generator yields lists of (style, text) tuples, where each list represents
a single line. ``text`` is a unfiltered byte string which may need to be escaped,
depending on the used output.
Caveats:
The content generator must not yield tuples of tuples,
because urwid cannot process that. You have to yield a *list* of tuples per line.
"""
raise NotImplementedError()
class ViewAuto(View):
name = "Auto"
prompt = ("auto", "a")
content_types = []
def __call__(self, data, **metadata):
headers = metadata.get("headers", {})
ctype = headers.get("content-type")
if ctype:
ct = parse_content_type(ctype) if ctype else None
ct = "%s/%s" % (ct[0], ct[1])
if ct in content_types_map:
return content_types_map[ct][0](data, **metadata)
elif utils.isXML(data):
return get("XML")(data, **metadata)
if utils.isMostlyBin(data):
return get("Hex")(data)
return get("Raw")(data)
class ViewRaw(View):
name = "Raw"
prompt = ("raw", "r")
content_types = []
def __call__(self, data, **metadata):
return "Raw", format_text(data)
class ViewHex(View):
name = "Hex"
prompt = ("hex", "e")
content_types = []
@staticmethod
def _format(data):
for offset, hexa, s in hexdump(data):
yield [
("offset", offset + " "),
("text", hexa + " "),
("text", s)
]
def __call__(self, data, **metadata):
return "Hex", self._format(data)
class ViewXML(View):
name = "XML"
prompt = ("xml", "x")
content_types = ["text/xml"]
def __call__(self, data, **metadata):
parser = lxml.etree.XMLParser(
remove_blank_text=True,
resolve_entities=False,
strip_cdata=False,
recover=False
)
try:
document = lxml.etree.fromstring(data, parser)
except lxml.etree.XMLSyntaxError:
return None
docinfo = document.getroottree().docinfo
prev = []
p = document.getroottree().getroot().getprevious()
while p is not None:
prev.insert(
0,
lxml.etree.tostring(p)
)
p = p.getprevious()
doctype = docinfo.doctype
if prev:
doctype += "\n".join(prev).strip()
doctype = doctype.strip()
s = lxml.etree.tostring(
document,
pretty_print=True,
xml_declaration=True,
doctype=doctype or None,
encoding=docinfo.encoding
)
return "XML-like data", format_text(s)
class ViewJSON(View):
name = "JSON"
prompt = ("json", "s")
content_types = ["application/json"]
def __call__(self, data, **metadata):
pretty_json = utils.pretty_json(data)
if pretty_json:
return "JSON", format_text(pretty_json)
class ViewHTML(View):
name = "HTML"
prompt = ("html", "h")
content_types = ["text/html"]
def __call__(self, data, **metadata):
if utils.isXML(data):
parser = lxml.etree.HTMLParser(
strip_cdata=True,
remove_blank_text=True
)
d = lxml.html.fromstring(data, parser=parser)
docinfo = d.getroottree().docinfo
s = lxml.etree.tostring(
d,
pretty_print=True,
doctype=docinfo.doctype,
encoding='utf8'
)
return "HTML", format_text(s)
class ViewHTMLOutline(View):
name = "HTML Outline"
prompt = ("html outline", "o")
content_types = ["text/html"]
def __call__(self, data, **metadata):
data = data.decode("utf-8")
h = html2text.HTML2Text(baseurl="")
h.ignore_images = True
h.body_width = 0
outline = h.handle(data)
return "HTML Outline", format_text(outline)
class ViewURLEncoded(View):
name = "URL-encoded"
prompt = ("urlencoded", "u")
content_types = ["application/x-www-form-urlencoded"]
def __call__(self, data, **metadata):
d = urldecode(data)
return "URLEncoded form", format_dict(ODict(d))
class ViewMultipart(View):
name = "Multipart Form"
prompt = ("multipart", "m")
content_types = ["multipart/form-data"]
@staticmethod
def _format(v):
yield [("highlight", "Form data:\n")]
for message in format_dict(ODict(v)):
yield message
def __call__(self, data, **metadata):
headers = metadata.get("headers", {})
v = multipartdecode(headers, data)
if v:
return "Multipart form", self._format(v)
if pyamf:
class DummyObject(dict):
def __init__(self, alias):
dict.__init__(self)
def __readamf__(self, input):
data = input.readObject()
self["data"] = data
def pyamf_class_loader(s):
for i in pyamf.CLASS_LOADERS:
if i != pyamf_class_loader:
v = i(s)
if v:
return v
return DummyObject
pyamf.register_class_loader(pyamf_class_loader)
class ViewAMF(View):
name = "AMF"
prompt = ("amf", "f")
content_types = ["application/x-amf"]
def unpack(self, b, seen=set([])):
if hasattr(b, "body"):
return self.unpack(b.body, seen)
if isinstance(b, DummyObject):
if id(b) in seen:
return "<recursion>"
else:
seen.add(id(b))
for k, v in b.items():
b[k] = self.unpack(v, seen)
return b
elif isinstance(b, dict):
for k, v in b.items():
b[k] = self.unpack(v, seen)
return b
elif isinstance(b, list):
return [self.unpack(i) for i in b]
elif isinstance(b, datetime.datetime):
return str(b)
elif isinstance(b, flex.ArrayCollection):
return [self.unpack(i, seen) for i in b]
else:
return b
def _format(self, envelope):
for target, message in iter(envelope):
if isinstance(message, pyamf.remoting.Request):
yield [
("header", "Request: "),
("text", str(target)),
]
else:
yield [
("header", "Response: "),
("text", "%s, code %s" % (target, message.status)),
]
s = json.dumps(self.unpack(message), indent=4)
for msg in format_text(s):
yield msg
def __call__(self, data, **metadata):
envelope = remoting.decode(data, strict=False)
if envelope:
return "AMF v%s" % envelope.amfVersion, self._format(envelope)
class ViewJavaScript(View):
name = "JavaScript"
prompt = ("javascript", "j")
content_types = [
"application/x-javascript",
"application/javascript",
"text/javascript"
]
def __call__(self, data, **metadata):
opts = jsbeautifier.default_options()
opts.indent_size = 2
res = jsbeautifier.beautify(data, opts)
return "JavaScript", format_text(res)
class ViewCSS(View):
name = "CSS"
prompt = ("css", "c")
content_types = [
"text/css"
]
def __call__(self, data, **metadata):
if cssutils:
sheet = cssutils.parseString(data)
beautified = sheet.cssText
else:
beautified = data
return "CSS", format_text(beautified)
class ViewImage(View):
name = "Image"
prompt = ("image", "i")
content_types = [
"image/png",
"image/jpeg",
"image/gif",
"image/vnd.microsoft.icon",
"image/x-icon",
]
def __call__(self, data, **metadata):
try:
img = Image.open(cStringIO.StringIO(data))
except IOError:
return None
parts = [
("Format", str(img.format_description)),
("Size", "%s x %s px" % img.size),
("Mode", str(img.mode)),
]
for i in sorted(img.info.keys()):
if i != "exif":
parts.append(
(str(i), str(img.info[i]))
)
if hasattr(img, "_getexif"):
ex = img._getexif()
if ex:
for i in sorted(ex.keys()):
tag = TAGS.get(i, i)
parts.append(
(str(tag), str(ex[i]))
)
fmt = format_dict(ODict(parts))
return "%s image" % img.format, fmt
class ViewProtobuf(View):
"""Human friendly view of protocol buffers
The view uses the protoc compiler to decode the binary
"""
name = "Protocol Buffer"
prompt = ("protobuf", "p")
content_types = [
"application/x-protobuf",
"application/x-protobuffer",
]
@staticmethod
def is_available():
try:
p = subprocess.Popen(
["protoc", "--version"],
stdout=subprocess.PIPE
)
out, _ = p.communicate()
return out.startswith("libprotoc")
except:
return False
def decode_protobuf(self, content):
# if Popen raises OSError, it will be caught in
# get_content_view and fall back to Raw
p = subprocess.Popen(['protoc', '--decode_raw'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(input=content)
if out:
return out
else:
return err
def __call__(self, data, **metadata):
decoded = self.decode_protobuf(data)
return "Protobuf", format_text(decoded)
class ViewWBXML(View):
name = "WBXML"
prompt = ("wbxml", "w")
content_types = [
"application/vnd.wap.wbxml",
"application/vnd.ms-sync.wbxml"
]
def __call__(self, data, **metadata):
try:
parser = ASCommandResponse(data)
parsedContent = parser.xmlString
if parsedContent:
return "WBXML", format_text(parsedContent)
except:
return None
views = []
content_types_map = {}
view_prompts = []
def get(name):
for i in views:
if i.name == name:
return i
def get_by_shortcut(c):
for i in views:
if i.prompt[1] == c:
return i
def add(view):
# TODO: auto-select a different name (append an integer?)
for i in views:
if i.name == view.name:
raise ContentViewException("Duplicate view: " + view.name)
# TODO: the UI should auto-prompt for a replacement shortcut
for prompt in view_prompts:
if prompt[1] == view.prompt[1]:
raise ContentViewException("Duplicate view shortcut: " + view.prompt[1])
views.append(view)
for ct in view.content_types:
l = content_types_map.setdefault(ct, [])
l.append(view)
view_prompts.append(view.prompt)
def remove(view):
for ct in view.content_types:
l = content_types_map.setdefault(ct, [])
l.remove(view)
if not len(l):
del content_types_map[ct]
view_prompts.remove(view.prompt)
views.remove(view)
add(ViewAuto())
add(ViewRaw())
add(ViewHex())
add(ViewJSON())
add(ViewXML())
add(ViewWBXML())
add(ViewHTML())
add(ViewHTMLOutline())
add(ViewJavaScript())
add(ViewCSS())
add(ViewURLEncoded())
add(ViewMultipart())
add(ViewImage())
if pyamf:
add(ViewAMF())
if ViewProtobuf.is_available():
add(ViewProtobuf())
def safe_to_print(lines, encoding="utf8"):
"""
Wraps a content generator so that each text portion is a *safe to print* unicode string.
"""
for line in lines:
clean_line = []
for (style, text) in line:
try:
text = clean_bin(text.decode(encoding, "strict"))
except UnicodeDecodeError:
text = clean_bin(text).decode(encoding, "strict")
clean_line.append((style, text))
yield clean_line
def get_content_view(viewmode, data, **metadata):
"""
Args:
viewmode: the view to use.
data, **metadata: arguments passed to View instance.
Returns:
A (description, content generator) tuple.
In contrast to calling the views directly, text is always safe-to-print unicode.
Raises:
ContentViewException, if the content view threw an error.
"""
if not data:
return "No content", []
msg = []
headers = metadata.get("headers", {})
enc = headers.get("content-encoding")
if enc and enc != "identity":
decoded = encoding.decode(enc, data)
if decoded:
data = decoded
msg.append("[decoded %s]" % enc)
try:
ret = viewmode(data, **metadata)
# Third-party viewers can fail in unexpected ways...
except Exception as e:
six.reraise(
ContentViewException,
ContentViewException(str(e)),
sys.exc_info()[2]
)
if not ret:
ret = get("Raw")(data, **metadata)
msg.append("Couldn't parse: falling back to Raw")
else:
msg.append(ret[0])
return " ".join(msg), safe_to_print(ret[1])
| mit |
airspeed-velocity/asv | asv/plugins/virtualenv.py | 2 | 5810 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from distutils.version import LooseVersion
import sys
import re
import os
import six
from .. import environment
from ..console import log
from .. import util
WIN = (os.name == "nt")
class Virtualenv(environment.Environment):
"""
Manage an environment using virtualenv.
"""
tool_name = "virtualenv"
def __init__(self, conf, python, requirements, tagged_env_vars):
"""
Parameters
----------
conf : Config instance
python : str
Version of Python. Must be of the form "MAJOR.MINOR".
executable : str
Path to Python executable.
requirements : dict
Dictionary mapping a PyPI package name to a version
identifier string.
"""
executable = Virtualenv._find_python(python)
if executable is None:
raise environment.EnvironmentUnavailable(
"No executable found for python {0}".format(python))
self._executable = executable
self._python = python
self._requirements = requirements
super(Virtualenv, self).__init__(conf,
python,
requirements,
tagged_env_vars)
try:
import virtualenv
except ImportError:
raise environment.EnvironmentUnavailable(
"virtualenv package not installed")
@staticmethod
def _find_python(python):
"""Find Python executable for the given Python version"""
is_pypy = python.startswith("pypy")
# Parse python specifier
if is_pypy:
executable = python
if python == 'pypy':
python_version = '2'
else:
python_version = python[4:]
else:
python_version = python
executable = "python{0}".format(python_version)
# Find Python executable on path
try:
return util.which(executable)
except IOError:
pass
# Maybe the current one is correct?
current_is_pypy = hasattr(sys, 'pypy_version_info')
current_versions = ['{0[0]}'.format(sys.version_info),
'{0[0]}.{0[1]}'.format(sys.version_info)]
if is_pypy == current_is_pypy and python_version in current_versions:
return sys.executable
return None
@property
def name(self):
"""
Get a name to uniquely identify this environment.
"""
python = self._python
if self._python.startswith('pypy'):
# get_env_name adds py-prefix
python = python[2:]
return environment.get_env_name(self.tool_name,
python,
self._requirements,
self._tagged_env_vars)
@classmethod
def matches(self, python):
if not (re.match(r'^[0-9].*$', python) or re.match(r'^pypy[0-9.]*$', python)):
# The python name should be a version number, or pypy+number
return False
try:
import virtualenv
except ImportError:
return False
else:
if LooseVersion(virtualenv.__version__) == LooseVersion('1.11.0'):
log.warning(
"asv is not compatible with virtualenv 1.11 due to a bug in "
"setuptools.")
if LooseVersion(virtualenv.__version__) < LooseVersion('1.10'):
log.warning(
"If using virtualenv, it much be at least version 1.10")
executable = Virtualenv._find_python(python)
return executable is not None
def _setup(self):
"""
Setup the environment on disk using virtualenv.
Then, all of the requirements are installed into
it using `pip install`.
"""
env = dict(os.environ)
env.update(self.build_env_vars)
log.info("Creating virtualenv for {0}".format(self.name))
util.check_call([
sys.executable,
"-mvirtualenv",
"-p",
self._executable,
self._path], env=env)
log.info("Installing requirements for {0}".format(self.name))
self._install_requirements()
def _install_requirements(self):
if sys.version_info[:2] == (3, 2):
pip_args = ['install', '-v', 'wheel<0.29.0', 'pip<8']
else:
pip_args = ['install', '-v', 'wheel', 'pip>=8']
env = dict(os.environ)
env.update(self.build_env_vars)
self._run_pip(pip_args, env=env)
if self._requirements:
args = ['install', '-v', '--upgrade']
for key, val in six.iteritems(self._requirements):
pkg = key
if key.startswith('pip+'):
pkg = key[4:]
if val:
args.append("{0}=={1}".format(pkg, val))
else:
args.append(pkg)
self._run_pip(args, timeout=self._install_timeout, env=env)
def _run_pip(self, args, **kwargs):
# Run pip via python -m pip, so that it works on Windows when
# upgrading pip itself, and avoids shebang length limit on Linux
return self.run_executable('python', ['-mpip'] + list(args), **kwargs)
def run(self, args, **kwargs):
log.debug("Running '{0}' in {1}".format(' '.join(args), self.name))
return self.run_executable('python', args, **kwargs)
| bsd-3-clause |
akirk/youtube-dl | youtube_dl/extractor/pyvideo.py | 158 | 1983 | from __future__ import unicode_literals
import re
import os
from .common import InfoExtractor
class PyvideoIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?pyvideo\.org/video/(?P<id>\d+)/(.*)'
_TESTS = [
{
'url': 'http://pyvideo.org/video/1737/become-a-logging-expert-in-30-minutes',
'md5': 'de317418c8bc76b1fd8633e4f32acbc6',
'info_dict': {
'id': '24_4WWkSmNo',
'ext': 'mp4',
'title': 'Become a logging expert in 30 minutes',
'description': 'md5:9665350d466c67fb5b1598de379021f7',
'upload_date': '20130320',
'uploader': 'NextDayVideo',
'uploader_id': 'NextDayVideo',
},
'add_ie': ['Youtube'],
},
{
'url': 'http://pyvideo.org/video/2542/gloriajw-spotifywitherikbernhardsson182m4v',
'md5': '5fe1c7e0a8aa5570330784c847ff6d12',
'info_dict': {
'id': '2542',
'ext': 'm4v',
'title': 'Gloriajw-SpotifyWithErikBernhardsson182',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', webpage)
if m_youtube is not None:
return self.url_result(m_youtube.group(1), 'Youtube')
title = self._html_search_regex(
r'<div class="section">\s*<h3(?:\s+class="[^"]*"[^>]*)?>([^>]+?)</h3>',
webpage, 'title', flags=re.DOTALL)
video_url = self._search_regex(
[r'<source src="(.*?)"', r'<dt>Download</dt>.*?<a href="(.+?)"'],
webpage, 'video url', flags=re.DOTALL)
return {
'id': video_id,
'title': os.path.splitext(title)[0],
'url': video_url,
}
| unlicense |
armando-migliaccio/tempest | tempest/api/compute/images/test_images_oneserver_negative.py | 1 | 6602 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.test import skip_because
LOG = logging.getLogger(__name__)
class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
def tearDown(self):
"""Terminate test instances created after a test is executed."""
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
super(ImagesOneServerNegativeTestJSON, self).tearDown()
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ImagesOneServerNegativeTestJSON, self).setUp()
# Check if the server is in a clean state after test
try:
self.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
except Exception as exc:
LOG.exception(exc)
# Rebuild server if cannot reach the ACTIVE state
# Usually it means the server had a serius accident
self._reset_server()
def _reset_server(self):
self.__class__.server_id = self.rebuild_server(self.server_id)
@classmethod
def setUpClass(cls):
super(ImagesOneServerNegativeTestJSON, cls).setUpClass()
cls.client = cls.images_client
if not cls.config.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
try:
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
except Exception:
cls.tearDownClass()
raise
cls.image_ids = []
if cls.multi_user:
if cls.config.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.images_client
@skip_because(bug="1006725")
@attr(type=['negative', 'gate'])
def test_create_image_specify_multibyte_character_image_name(self):
# Return an error if the image name has multi-byte characters
snapshot_name = data_utils.rand_name('\xef\xbb\xbf')
self.assertRaises(exceptions.BadRequest,
self.client.create_image, self.server_id,
snapshot_name)
@attr(type=['negative', 'gate'])
def test_create_image_specify_invalid_metadata(self):
# Return an error when creating image with invalid metadata
snapshot_name = data_utils.rand_name('test-snap-')
meta = {'': ''}
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server_id, snapshot_name, meta)
@attr(type=['negative', 'gate'])
def test_create_image_specify_metadata_over_limits(self):
# Return an error when creating image with meta data over 256 chars
snapshot_name = data_utils.rand_name('test-snap-')
meta = {'a' * 260: 'b' * 260}
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server_id, snapshot_name, meta)
@attr(type=['negative', 'gate'])
def test_create_second_image_when_first_image_is_being_saved(self):
# Disallow creating another image when first image is being saved
# Create first snapshot
snapshot_name = data_utils.rand_name('test-snap-')
resp, body = self.client.create_image(self.server_id,
snapshot_name)
self.assertEqual(202, resp.status)
image_id = data_utils.parse_image_id(resp['location'])
self.image_ids.append(image_id)
self.addCleanup(self._reset_server)
# Create second snapshot
alt_snapshot_name = data_utils.rand_name('test-snap-')
self.assertRaises(exceptions.Conflict, self.client.create_image,
self.server_id, alt_snapshot_name)
@attr(type=['negative', 'gate'])
def test_create_image_specify_name_over_256_chars(self):
# Return an error if snapshot name over 256 characters is passed
snapshot_name = data_utils.rand_name('a' * 260)
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server_id, snapshot_name)
@attr(type=['negative', 'gate'])
def test_delete_image_that_is_not_yet_active(self):
# Return an error while trying to delete an image what is creating
snapshot_name = data_utils.rand_name('test-snap-')
resp, body = self.client.create_image(self.server_id, snapshot_name)
self.assertEqual(202, resp.status)
image_id = data_utils.parse_image_id(resp['location'])
self.image_ids.append(image_id)
self.addCleanup(self._reset_server)
# Do not wait, attempt to delete the image, ensure it's successful
resp, body = self.client.delete_image(image_id)
self.assertEqual('204', resp['status'])
self.image_ids.remove(image_id)
self.assertRaises(exceptions.NotFound, self.client.get_image, image_id)
class ImagesOneServerNegativeTestXML(ImagesOneServerNegativeTestJSON):
_interface = 'xml'
| apache-2.0 |
jay-tyler/ansible | lib/ansible/plugins/lookup/flattened.py | 103 | 2506 | # (c) 2013, Serge van Ginderachter <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _check_list_of_one_list(self, term):
# make sure term is not a list of one (list of one..) item
# return the final non list item if so
if isinstance(term,list) and len(term) == 1:
term = term[0]
if isinstance(term,list):
term = self._check_list_of_one_list(term)
return term
def _do_flatten(self, terms, variables):
ret = []
for term in terms:
term = self._check_list_of_one_list(term)
if term == 'None' or term == 'null':
# ignore undefined items
break
if isinstance(term, basestring):
# convert a variable to a list
term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader)
# but avoid converting a plain string to a list of one string
if term2 != [ term ]:
term = term2
if isinstance(term, list):
# if it's a list, check recursively for items that are a list
term = self._do_flatten(term, variables)
ret.extend(term)
else:
ret.append(term)
return ret
def run(self, terms, variables, **kwargs):
### FIXME: Is this needed now that listify is run on all lookup plugin terms?
if not isinstance(terms, list):
raise AnsibleError("with_flattened expects a list")
return self._do_flatten(terms, variables)
| gpl-3.0 |
bennojoy/ansible | v1/ansible/runner/__init__.py | 77 | 69625 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import multiprocessing
import signal
import os
import pwd
import Queue
import random
import traceback
import tempfile
import time
import collections
import socket
import base64
import sys
import pipes
import jinja2
import subprocess
import getpass
import ansible.constants as C
import ansible.inventory
from ansible import utils
from ansible.utils import template
from ansible.utils import check_conditional
from ansible.utils import string_functions
from ansible import errors
from ansible import module_common
import poller
import connection
from return_data import ReturnData
from ansible.callbacks import DefaultRunnerCallbacks, vv
from ansible.module_common import ModuleReplacer
from ansible.module_utils.splitter import split_args, unquote
from ansible.cache import FactCache
from ansible.utils import update_hash
module_replacer = ModuleReplacer(strip_comments=False)
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
HAS_ATFORK=True
try:
from Crypto.Random import atfork
except ImportError:
HAS_ATFORK=False
multiprocessing_runner = None
OUTPUT_LOCKFILE = tempfile.TemporaryFile()
PROCESS_LOCKFILE = tempfile.TemporaryFile()
################################################
def _executor_hook(job_queue, result_queue, new_stdin):
# attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
# this function also not present in CentOS 6
if HAS_ATFORK:
atfork()
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not job_queue.empty():
try:
host = job_queue.get(block=False)
return_data = multiprocessing_runner._executor(host, new_stdin)
result_queue.put(return_data)
except Queue.Empty:
pass
except:
traceback.print_exc()
class HostVars(dict):
''' A special view of vars_cache that adds values from the inventory when needed. '''
def __init__(self, vars_cache, inventory, vault_password=None):
self.vars_cache = vars_cache
self.inventory = inventory
self.lookup = {}
self.update(vars_cache)
self.vault_password = vault_password
def __getitem__(self, host):
if host not in self.lookup:
result = self.inventory.get_variables(host, vault_password=self.vault_password).copy()
result.update(self.vars_cache.get(host, {}))
self.lookup[host] = template.template('.', result, self.vars_cache)
return self.lookup[host]
class Runner(object):
''' core API interface to ansible '''
# see bin/ansible for how this is used...
def __init__(self,
host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
module_path=None, # ex: /usr/share/ansible
module_name=C.DEFAULT_MODULE_NAME, # ex: copy
module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b"
forks=C.DEFAULT_FORKS, # parallelism level
timeout=C.DEFAULT_TIMEOUT, # SSH timeout
pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org'
remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
remote_port=None, # if SSH on different ports
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable
setup_cache=None, # used to share fact data w/ other tasks
vars_cache=None, # used to store variables about hosts
transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
conditional='True', # run only if this fact expression evals to true
callbacks=None, # used for output
module_vars=None, # a playbooks internals thing
play_vars=None, #
play_file_vars=None, #
role_vars=None, #
role_params=None, #
default_vars=None, #
extra_vars=None, # extra vars specified with he playbook(s)
is_playbook=False, # running from playbook or not?
inventory=None, # reference to Inventory object
subset=None, # subset pattern
check=False, # don't make any changes, just try to probe for potential changes
diff=False, # whether to show diffs for template files that change
environment=None, # environment variables (as dict) to use inside the command
complex_args=None, # structured data in addition to module_args, must be a dict
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False
accelerate=False, # use accelerated connection
accelerate_ipv6=False, # accelerated connection w/ IPv6
accelerate_port=None, # port to use with accelerated connection
vault_pass=None,
run_hosts=None, # an optional list of pre-calculated hosts to run on
no_log=False, # option to enable/disable logging for a given task
run_once=False, # option to enable/disable host bypass loop for a given task
become=False, # whether to run privilege escalation or not
become_method=C.DEFAULT_BECOME_METHOD,
become_user=C.DEFAULT_BECOME_USER, # ex: 'root'
become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None
become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo
):
# used to lock multiprocess inputs and outputs at various levels
self.output_lockfile = OUTPUT_LOCKFILE
self.process_lockfile = PROCESS_LOCKFILE
if not complex_args:
complex_args = {}
# storage & defaults
self.check = check
self.diff = diff
self.setup_cache = utils.default(setup_cache, lambda: ansible.cache.FactCache())
self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
self.basedir = utils.default(basedir, lambda: os.getcwd())
self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
self.generated_jid = str(random.randint(0, 999999999999))
self.transport = transport
self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list))
self.module_vars = utils.default(module_vars, lambda: {})
self.play_vars = utils.default(play_vars, lambda: {})
self.play_file_vars = utils.default(play_file_vars, lambda: {})
self.role_vars = utils.default(role_vars, lambda: {})
self.role_params = utils.default(role_params, lambda: {})
self.default_vars = utils.default(default_vars, lambda: {})
self.extra_vars = utils.default(extra_vars, lambda: {})
self.always_run = None
self.connector = connection.Connector(self)
self.conditional = conditional
self.delegate_to = None
self.module_name = module_name
self.forks = int(forks)
self.pattern = pattern
self.module_args = module_args
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.private_key_file = private_key_file
self.background = background
self.become = become
self.become_method = become_method
self.become_user_var = become_user
self.become_user = None
self.become_pass = become_pass
self.become_exe = become_exe
self.is_playbook = is_playbook
self.environment = environment
self.complex_args = complex_args
self.error_on_undefined_vars = error_on_undefined_vars
self.accelerate = accelerate
self.accelerate_port = accelerate_port
self.accelerate_ipv6 = accelerate_ipv6
self.callbacks.runner = self
self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self.vault_pass = vault_pass
self.no_log = no_log
self.run_once = run_once
if self.transport == 'smart':
# If the transport is 'smart', check to see if certain conditions
# would prevent us from using ssh, and fallback to paramiko.
# 'smart' is the default since 1.2.1/1.3
self.transport = "ssh"
if sys.platform.startswith('darwin') and self.remote_pass:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
self.transport = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if "Bad configuration option" in err:
self.transport = "paramiko"
# save the original transport, in case it gets
# changed later via options like accelerate
self.original_transport = self.transport
# misc housekeeping
if subset and self.inventory._subset is None:
# don't override subset when passed from playbook
self.inventory.subset(subset)
# If we get a pre-built list of hosts to run on, from say a playbook, use them.
# Also where we will store the hosts to run on once discovered
self.run_hosts = run_hosts
if self.transport == 'local':
self.remote_user = pwd.getpwuid(os.geteuid())[0]
if module_path is not None:
for i in module_path.split(os.pathsep):
utils.plugins.module_finder.add_directory(i)
utils.plugins.push_basedir(self.basedir)
# ensure we are using unique tmp paths
random.seed()
# *****************************************************
def _complex_args_hack(self, complex_args, module_args):
"""
ansible-playbook both allows specifying key=value string arguments and complex arguments
however not all modules use our python common module system and cannot
access these. An example might be a Bash module. This hack allows users to still pass "args"
as a hash of simple scalars to those arguments and is short term. We could technically
just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented
it does mean values in 'args' have LOWER priority than those on the key=value line, allowing
args to provide yet another way to have pluggable defaults.
"""
if complex_args is None:
return module_args
if not isinstance(complex_args, dict):
raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args)
for (k,v) in complex_args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
return module_args
# *****************************************************
def _transfer_str(self, conn, tmp, name, data):
''' transfer string to remote file '''
if type(data) == dict:
data = utils.jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
if not isinstance(data, unicode):
#ensure the data is valid UTF-8
data.decode('utf-8')
else:
data = data.encode('utf-8')
afo.write(data)
except:
raise errors.AnsibleError("failure encoding into utf-8")
afo.flush()
afo.close()
remote = conn.shell.join_path(tmp, name)
try:
conn.put_file(afile, remote)
finally:
os.unlink(afile)
return remote
# *****************************************************
def _compute_environment_string(self, conn, inject=None):
''' what environment variables to use when running the command? '''
enviro = {}
if self.environment:
enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
enviro = utils.safe_eval(enviro)
if type(enviro) != dict:
raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
return conn.shell.env_prefix(**enviro)
# *****************************************************
def _compute_delegate(self, password, remote_inject):
""" Build a dictionary of all attributes for the delegate host """
delegate = {}
# allow delegated host to be templated
delegate['inject'] = remote_inject.copy()
# set any interpreters
interpreters = []
for i in delegate['inject']:
if i.startswith("ansible_") and i.endswith("_interpreter"):
interpreters.append(i)
for i in interpreters:
del delegate['inject'][i]
port = C.DEFAULT_REMOTE_PORT
# get the vars for the delegate by its name
try:
this_info = delegate['inject']['hostvars'][self.delegate_to]
except:
# make sure the inject is empty for non-inventory hosts
this_info = {}
# get the real ssh_address for the delegate
# and allow ansible_ssh_host to be templated
delegate['ssh_host'] = template.template(
self.basedir,
this_info.get('ansible_ssh_host', self.delegate_to),
this_info,
fail_on_undefined=True
)
delegate['port'] = this_info.get('ansible_ssh_port', port)
delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject'])
delegate['pass'] = this_info.get('ansible_ssh_pass', password)
delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file)
delegate['transport'] = this_info.get('ansible_connection', self.transport)
delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass))
# Last chance to get private_key_file from global variables.
# this is useful if delegated host is not defined in the inventory
if delegate['private_key_file'] is None:
delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
if delegate['private_key_file'] is not None:
delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
for i in this_info:
if i.startswith("ansible_") and i.endswith("_interpreter"):
delegate['inject'][i] = this_info[i]
return delegate
def _compute_delegate_user(self, host, inject):
""" Calculate the remote user based on an order of preference """
# inventory > playbook > original_host
actual_user = inject.get('ansible_ssh_user', self.remote_user)
thisuser = None
try:
if host in inject['hostvars']:
if inject['hostvars'][host].get('ansible_ssh_user'):
# user for delegate host in inventory
thisuser = inject['hostvars'][host].get('ansible_ssh_user')
else:
# look up the variables for the host directly from inventory
host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
if 'ansible_ssh_user' in host_vars:
thisuser = host_vars['ansible_ssh_user']
except errors.AnsibleError, e:
# the hostname was not found in the inventory, so
# we just ignore this and try the next method
pass
if thisuser is None and self.remote_user:
# user defined by play/runner
thisuser = self.remote_user
if thisuser is not None:
actual_user = thisuser
else:
# fallback to the inventory user of the play host
#actual_user = inject.get('ansible_ssh_user', actual_user)
actual_user = inject.get('ansible_ssh_user', self.remote_user)
return actual_user
def _count_module_args(self, args, allow_dupes=False):
'''
Count the number of k=v pairs in the supplied module args. This is
basically a specialized version of parse_kv() from utils with a few
minor changes.
'''
options = {}
if args is not None:
try:
vargs = split_args(args)
except Exception, e:
if "unbalanced jinja2 block or quotes" in str(e):
raise errors.AnsibleError("error parsing argument string '%s', try quoting the entire line." % args)
else:
raise
for x in vargs:
quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
if "=" in x and not quoted:
k, v = x.split("=",1)
is_shell_module = self.module_name in ('command', 'shell')
is_shell_param = k in ('creates', 'removes', 'chdir', 'executable')
if k in options and not allow_dupes:
if not(is_shell_module and not is_shell_param):
raise errors.AnsibleError("a duplicate parameter was found in the argument string (%s)" % k)
if is_shell_module and is_shell_param or not is_shell_module:
options[k] = v
return len(options)
# *****************************************************
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True):
''' transfer and run a module along with its arguments on the remote side'''
# hack to support fireball mode
if module_name == 'fireball':
args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
if 'port' not in args:
args += " port=%s" % C.ZEROMQ_PORT
(
module_style,
shebang,
module_data
) = self._configure_module(conn, module_name, args, inject, complex_args)
# a remote tmp path may be necessary and not already created
if self._late_needs_tmp_path(conn, tmp, module_style):
tmp = self._make_tmp_path(conn)
remote_module_path = conn.shell.join_path(tmp, module_name)
if (module_style != 'new'
or async_jid is not None
or not conn.has_pipelining
or not C.ANSIBLE_SSH_PIPELINING
or C.DEFAULT_KEEP_REMOTE_FILES
or self.become_method == 'su'):
self._transfer_str(conn, tmp, module_name, module_data)
environment_string = self._compute_environment_string(conn, inject)
if "tmp" in tmp and (self.become and self.become_user != 'root'):
# deal with possible umask issues once you become another user
self._remote_chmod(conn, 'a+r', remote_module_path, tmp)
cmd = ""
in_data = None
if module_style != 'new':
if 'CHECKMODE=True' in args:
# if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to
# do --check mode, so to be safe we will not run it.
return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules"))
elif 'NO_LOG' in args:
return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules"))
args = template.template(self.basedir, args, inject)
# decide whether we need to transfer JSON or key=value
argsfile = None
if module_style == 'non_native_want_json':
if complex_args:
complex_args.update(utils.parse_kv(args))
argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args))
else:
argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args)))
else:
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
if self.become and self.become_user != 'root':
# deal with possible umask issues once become another user
self._remote_chmod(conn, 'a+r', argsfile, tmp)
if async_jid is None:
cmd = "%s %s" % (remote_module_path, argsfile)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
else:
if async_jid is None:
if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su':
in_data = module_data
else:
cmd = "%s" % (remote_module_path)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
if not shebang:
raise errors.AnsibleError("module is missing interpreter line")
rm_tmp = None
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self.become or self.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the become_user
sudoable = False
res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data)
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self.become and self.become_user != 'root':
# not becoming root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = conn.shell.remove(tmp, recurse=True)
self._low_level_exec_command(conn, cmd2, tmp, sudoable=False)
data = utils.parse_json(res['stdout'], from_remote=True, no_exceptions=True)
if 'parsed' in data and data['parsed'] == False:
data['msg'] += res['stderr']
return ReturnData(conn=conn, result=data)
# *****************************************************
def _executor(self, host, new_stdin):
''' handler for multiprocessing library '''
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
try:
self._new_stdin = new_stdin
if not new_stdin and fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
exec_rc = self._executor_internal(host, new_stdin)
if type(exec_rc) != ReturnData:
raise Exception("unexpected return type: %s" % type(exec_rc))
# redundant, right?
if not exec_rc.comm_ok:
self.callbacks.on_unreachable(host, exec_rc.result)
return exec_rc
except errors.AnsibleError, ae:
msg = str(ae)
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
except Exception:
msg = traceback.format_exc()
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
# *****************************************************
def get_combined_cache(self):
# merge the VARS and SETUP caches for this host
combined_cache = self.setup_cache.copy()
return utils.merge_hash(combined_cache, self.vars_cache)
def get_inject_vars(self, host):
host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
combined_cache = self.get_combined_cache()
# use combined_cache and host_variables to template the module_vars
# we update the inject variables with the data we're about to template
# since some of the variables we'll be replacing may be contained there too
module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {}))
module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject)
module_vars = template.template(self.basedir, self.module_vars, module_vars_inject)
# remove bad variables from the module vars, which may be in there due
# the way role declarations are specified in playbooks
if 'tags' in module_vars:
del module_vars['tags']
if 'when' in module_vars:
del module_vars['when']
# start building the dictionary of injected variables
inject = {}
# default vars are the lowest priority
inject = utils.combine_vars(inject, self.default_vars)
# next come inventory variables for the host
inject = utils.combine_vars(inject, host_variables)
# then the setup_cache which contains facts gathered
inject = utils.combine_vars(inject, self.setup_cache.get(host, {}))
# next come variables from vars and vars files
inject = utils.combine_vars(inject, self.play_vars)
inject = utils.combine_vars(inject, self.play_file_vars)
# next come variables from role vars/main.yml files
inject = utils.combine_vars(inject, self.role_vars)
# then come the module variables
inject = utils.combine_vars(inject, module_vars)
# followed by vars_cache things (set_fact, include_vars, and
# vars_files which had host-specific templating done)
inject = utils.combine_vars(inject, self.vars_cache.get(host, {}))
# role parameters next
inject = utils.combine_vars(inject, self.role_params)
# and finally -e vars are the highest priority
inject = utils.combine_vars(inject, self.extra_vars)
# and then special vars
inject.setdefault('ansible_ssh_user', self.remote_user)
inject['group_names'] = host_variables.get('group_names', [])
inject['groups'] = self.inventory.groups_list()
inject['vars'] = self.module_vars
inject['defaults'] = self.default_vars
inject['environment'] = self.environment
inject['playbook_dir'] = os.path.abspath(self.basedir)
inject['omit'] = self.omit_token
inject['combined_cache'] = combined_cache
return inject
def _executor_internal(self, host, new_stdin):
''' executes any module one or more times '''
# We build the proper injected dictionary for all future
# templating operations in this run
inject = self.get_inject_vars(host)
# Then we selectively merge some variable dictionaries down to a
# single dictionary, used to template the HostVars for this host
temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
temp_vars = utils.combine_vars(temp_vars, self.play_vars)
temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
# and we save the HostVars in the injected dictionary so they
# may be referenced from playbooks/templates
inject['hostvars'] = hostvars
host_connection = inject.get('ansible_connection', self.transport)
if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]:
port = hostvars.get('ansible_ssh_port', self.remote_port)
if port is None:
port = C.DEFAULT_REMOTE_PORT
else:
# fireball, local, etc
port = self.remote_port
if self.inventory.basedir() is not None:
inject['inventory_dir'] = self.inventory.basedir()
if self.inventory.src() is not None:
inject['inventory_file'] = self.inventory.src()
# could be already set by playbook code
inject.setdefault('ansible_version', utils.version_info(gitinfo=False))
# allow with_foo to work in playbooks...
items = None
items_plugin = self.module_vars.get('items_lookup_plugin', None)
if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
basedir = self.basedir
if '_original_file' in inject:
basedir = os.path.dirname(inject['_original_file'])
filesdir = os.path.join(basedir, '..', 'files')
if os.path.exists(filesdir):
basedir = filesdir
try:
items_terms = self.module_vars.get('items_lookup_terms', '')
items_terms = template.template(basedir, items_terms, inject)
items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject)
except errors.AnsibleUndefinedVariable, e:
if 'has no attribute' in str(e):
# the undefined variable was an attribute of a variable that does
# exist, so try and run this through the conditional check to see
# if the user wanted to skip something on being undefined
if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True):
# the conditional check passed, so we have to fail here
raise
else:
# the conditional failed, so we skip this task
result = utils.jsonify(dict(changed=False, skipped=True))
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, result=result)
except errors.AnsibleError, e:
raise
except Exception, e:
raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e))
# strip out any jinja2 template syntax within
# the data returned by the lookup plugin
items = utils._clean_data_struct(items, from_remote=True)
if items is None:
items = []
else:
if type(items) != list:
raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
if len(items) and utils.is_list_of_strings(items) and self.module_name in ( 'apt', 'yum', 'pkgng', 'zypper', 'dnf' ):
# hack for apt, yum, and pkgng so that with_items maps back into a single module call
use_these_items = []
for x in items:
inject['item'] = x
if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
use_these_items.append(x)
inject['item'] = ",".join(use_these_items)
items = None
def _safe_template_complex_args(args, inject):
# Ensure the complex args here are a dictionary, but
# first template them if they contain a variable
returned_args = args
if isinstance(args, basestring):
# If the complex_args were evaluated to a dictionary and there are
# more keys in the templated version than the evaled version, some
# param inserted additional keys (the template() call also runs
# safe_eval on the var if it looks like it's a datastructure). If the
# evaled_args are not a dict, it's most likely a whole variable (ie.
# args: {{var}}), in which case there's no way to detect the proper
# count of params in the dictionary.
templated_args = template.template(self.basedir, args, inject, convert_bare=True)
evaled_args = utils.safe_eval(args)
if isinstance(evaled_args, dict) and len(evaled_args) > 0 and len(evaled_args) != len(templated_args):
raise errors.AnsibleError("a variable tried to insert extra parameters into the args for this task")
# set the returned_args to the templated_args
returned_args = templated_args
# and a final check to make sure the complex args are a dict
if returned_args is not None and not isinstance(returned_args, dict):
raise errors.AnsibleError("args must be a dictionary, received %s" % returned_args)
return returned_args
# logic to decide how to run things depends on whether with_items is used
if items is None:
complex_args = _safe_template_complex_args(self.complex_args, inject)
return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args)
elif len(items) > 0:
# executing using with_items, so make multiple calls
# TODO: refactor
if self.background > 0:
raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks")
all_comm_ok = True
all_changed = False
all_failed = False
results = []
for x in items:
# use a fresh inject for each item
this_inject = inject.copy()
this_inject['item'] = x
complex_args = _safe_template_complex_args(self.complex_args, this_inject)
result = self._executor_internal_inner(
host,
self.module_name,
self.module_args,
this_inject,
port,
complex_args=complex_args
)
if 'stdout' in result.result and 'stdout_lines' not in result.result:
result.result['stdout_lines'] = result.result['stdout'].splitlines()
results.append(result.result)
if result.comm_ok == False:
all_comm_ok = False
all_failed = True
break
for x in results:
if x.get('changed') == True:
all_changed = True
if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]:
all_failed = True
break
msg = 'All items completed'
if all_failed:
msg = "One or more items failed."
rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
if not all_failed:
del rd_result['failed']
return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
else:
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True))
# *****************************************************
def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None):
''' decides how to invoke a module '''
# late processing of parameterized become_user (with_items,..)
if self.become_user_var is not None:
self.become_user = template.template(self.basedir, self.become_user_var, inject)
# module_name may be dynamic (but cannot contain {{ ansible_ssh_user }})
module_name = template.template(self.basedir, module_name, inject)
if module_name in utils.plugins.action_loader:
if self.background != 0:
raise errors.AnsibleError("async mode is not supported with the %s module" % module_name)
handler = utils.plugins.action_loader.get(module_name, self)
elif self.background == 0:
handler = utils.plugins.action_loader.get('normal', self)
else:
handler = utils.plugins.action_loader.get('async', self)
if type(self.conditional) != list:
self.conditional = [ self.conditional ]
for cond in self.conditional:
if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
result = dict(changed=False, skipped=True)
if self.no_log:
result = utils.censor_unlogged_data(result)
self.callbacks.on_skipped(host, result)
else:
self.callbacks.on_skipped(host, inject.get('item',None))
return ReturnData(host=host, result=utils.jsonify(result))
if getattr(handler, 'setup', None) is not None:
handler.setup(module_name, inject)
conn = None
actual_host = inject.get('ansible_ssh_host', host)
# allow ansible_ssh_host to be templated
actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True)
actual_port = port
actual_user = inject.get('ansible_ssh_user', self.remote_user)
actual_pass = inject.get('ansible_ssh_pass', self.remote_pass)
actual_transport = inject.get('ansible_connection', self.transport)
actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
self.become_method = inject.get('ansible_become_method', self.become_method)
# select default root user in case self.become requested
# but no user specified; happens e.g. in host vars when
# just ansible_become=True is specified
if self.become and self.become_user is None:
self.become_user = 'root'
if actual_private_key_file is not None:
actual_private_key_file = os.path.expanduser(actual_private_key_file)
if self.accelerate and actual_transport != 'local':
#Fix to get the inventory name of the host to accelerate plugin
if inject.get('ansible_ssh_host', None):
self.accelerate_inventory_host = host
else:
self.accelerate_inventory_host = None
# if we're using accelerated mode, force the
# transport to accelerate
actual_transport = "accelerate"
if not self.accelerate_port:
self.accelerate_port = C.ACCELERATE_PORT
actual_port = inject.get('ansible_ssh_port', port)
# the delegated host may have different SSH port configured, etc
# and we need to transfer those, and only those, variables
self.delegate_to = inject.get('delegate_to', None)
if self.delegate_to:
self.delegate_to = template.template(self.basedir, self.delegate_to, inject)
if self.delegate_to is not None:
delegate = self._compute_delegate(actual_pass, inject)
actual_transport = delegate['transport']
actual_host = delegate['ssh_host']
actual_port = delegate['port']
actual_user = delegate['user']
actual_pass = delegate['pass']
actual_private_key_file = delegate['private_key_file']
self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass'))
inject = delegate['inject']
# set resolved delegate_to into inject so modules can call _remote_checksum
inject['delegate_to'] = self.delegate_to
# user/pass may still contain variables at this stage
actual_user = template.template(self.basedir, actual_user, inject)
try:
actual_pass = template.template(self.basedir, actual_pass, inject)
self.become_pass = template.template(self.basedir, self.become_pass, inject)
except:
# ignore password template errors, could be triggered by password charaters #10468
pass
# make actual_user available as __magic__ ansible_ssh_user variable
inject['ansible_ssh_user'] = actual_user
try:
if actual_transport == 'accelerate':
# for accelerate, we stuff both ports into a single
# variable so that we don't have to mangle other function
# calls just to accommodate this one case
actual_port = [actual_port, self.accelerate_port]
elif actual_port is not None:
actual_port = int(template.template(self.basedir, actual_port, inject))
except ValueError, e:
result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port)
return ReturnData(host=host, comm_ok=False, result=result)
try:
if self.delegate_to or host != actual_host:
delegate_host = host
else:
delegate_host = None
conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file, delegate_host)
default_shell = getattr(conn, 'default_shell', '')
shell_type = inject.get('ansible_shell_type')
if not shell_type:
if default_shell:
shell_type = default_shell
else:
shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
shell_plugin = utils.plugins.shell_loader.get(shell_type)
if shell_plugin is None:
shell_plugin = utils.plugins.shell_loader.get('sh')
conn.shell = shell_plugin
except errors.AnsibleConnectionFailed, e:
result = dict(failed=True, msg="FAILED: %s" % str(e))
return ReturnData(host=host, comm_ok=False, result=result)
tmp = ''
# action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir
if self._early_needs_tmp_path(module_name, handler):
tmp = self._make_tmp_path(conn)
# allow module args to work as a dictionary
# though it is usually a string
if isinstance(module_args, dict):
module_args = utils.serialize_args(module_args)
# render module_args and complex_args templates
try:
# When templating module_args, we need to be careful to ensure
# that no variables inadvertently (or maliciously) add params
# to the list of args. We do this by counting the number of k=v
# pairs before and after templating.
num_args_pre = self._count_module_args(module_args, allow_dupes=True)
module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars)
num_args_post = self._count_module_args(module_args)
if num_args_pre != num_args_post:
raise errors.AnsibleError("A variable inserted a new parameter into the module args. " + \
"Be sure to quote variables if they contain equal signs (for example: \"{{var}}\").")
# And we also make sure nothing added in special flags for things
# like the command/shell module (ie. #USE_SHELL)
if '#USE_SHELL' in module_args:
raise errors.AnsibleError("A variable tried to add #USE_SHELL to the module arguments.")
complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars)
except jinja2.exceptions.UndefinedError, e:
raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
# filter omitted arguments out from complex_args
if complex_args:
complex_args = dict(filter(lambda x: x[1] != self.omit_token, complex_args.iteritems()))
# Filter omitted arguments out from module_args.
# We do this with split_args instead of parse_kv to ensure
# that things are not unquoted/requoted incorrectly
args = split_args(module_args)
final_args = []
for arg in args:
if '=' in arg:
k,v = arg.split('=', 1)
if unquote(v) != self.omit_token:
final_args.append(arg)
else:
# not a k=v param, append it
final_args.append(arg)
module_args = ' '.join(final_args)
result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
# Code for do until feature
until = self.module_vars.get('until', None)
if until is not None and result.comm_ok:
inject[self.module_vars.get('register')] = result.result
cond = template.template(self.basedir, until, inject, expand_lists=False)
if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False)
delay = self.module_vars.get('delay')
for x in range(1, int(retries) + 1):
# template the delay, cast to float and sleep
delay = template.template(self.basedir, delay, inject, expand_lists=False)
delay = float(delay)
time.sleep(delay)
tmp = ''
if self._early_needs_tmp_path(module_name, handler):
tmp = self._make_tmp_path(conn)
result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
result.result['attempts'] = x
vv("Result from run %i is: %s" % (x, result.result))
inject[self.module_vars.get('register')] = result.result
cond = template.template(self.basedir, until, inject, expand_lists=False)
if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
break
if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
result.result['failed'] = True
result.result['msg'] = "Task failed as maximum retries was encountered"
else:
result.result['attempts'] = 0
conn.close()
if not result.comm_ok:
# connection or parsing errors...
self.callbacks.on_unreachable(host, result.result)
else:
data = result.result
# https://github.com/ansible/ansible/issues/4958
if hasattr(sys.stdout, "isatty"):
if "stdout" in data and sys.stdout.isatty():
if not string_functions.isprintable(data['stdout']):
data['stdout'] = ''.join(c for c in data['stdout'] if string_functions.isprintable(c))
if 'item' in inject:
result.result['item'] = inject['item']
result.result['invocation'] = dict(
module_args=module_args,
module_name=module_name
)
changed_when = self.module_vars.get('changed_when')
failed_when = self.module_vars.get('failed_when')
if (changed_when is not None or failed_when is not None) and self.background == 0:
register = self.module_vars.get('register')
if register is not None:
if 'stdout' in data:
data['stdout_lines'] = data['stdout'].splitlines()
inject[register] = data
# only run the final checks if the async_status has finished,
# or if we're not running an async_status check at all
if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status':
if changed_when is not None and 'skipped' not in data:
data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
if failed_when is not None and 'skipped' not in data:
data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
if is_chained:
# no callbacks
return result
if 'skipped' in data:
self.callbacks.on_skipped(host, inject.get('item',None))
if self.no_log:
data = utils.censor_unlogged_data(data)
if not result.is_successful():
ignore_errors = self.module_vars.get('ignore_errors', False)
self.callbacks.on_failed(host, data, ignore_errors)
else:
if self.diff:
self.callbacks.on_file_diff(conn.host, result.diff)
self.callbacks.on_ok(host, data)
return result
def _early_needs_tmp_path(self, module_name, handler):
''' detect if a tmp path should be created before the handler is called '''
if module_name in utils.plugins.action_loader:
return getattr(handler, 'TRANSFERS_FILES', False)
# other modules never need tmp path at early stage
return False
def _late_needs_tmp_path(self, conn, tmp, module_style):
if "tmp" in tmp:
# tmp has already been created
return False
if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su':
# tmp is necessary to store module source code
return True
if not conn.has_pipelining:
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
# *****************************************************
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False,
executable=None, become=False, in_data=None):
''' execute a command string over SSH, return the output '''
# this can be skipped with powershell modules when there is no analog to a Windows command (like chmod)
if cmd:
if executable is None:
executable = C.DEFAULT_EXECUTABLE
become_user = self.become_user
# compare connection user to (su|sudo)_user and disable if the same
# assume connection type is local if no user attribute
this_user = getattr(conn, 'user', getpass.getuser())
if (not become and this_user == become_user):
sudoable = False
become = False
rc, stdin, stdout, stderr = conn.exec_command(cmd,
tmp,
become_user=become_user,
sudoable=sudoable,
executable=executable,
in_data=in_data)
if type(stdout) not in [ str, unicode ]:
out = ''.join(stdout.readlines())
else:
out = stdout
if type(stderr) not in [ str, unicode ]:
err = ''.join(stderr.readlines())
else:
err = stderr
if rc is not None:
return dict(rc=rc, stdout=out, stderr=err)
else:
return dict(stdout=out, stderr=err)
return dict(rc=None, stdout='', stderr='')
# *****************************************************
def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False):
''' issue a remote chmod command '''
cmd = conn.shell.chmod(mode, path)
return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become)
# *****************************************************
def _remote_expand_user(self, conn, path, tmp):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'):
return path
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self.become and self.become_user:
expand_path = '~%s' % self.become_user
cmd = conn.shell.expand_user(expand_path)
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False)
initial_fragment = utils.last_non_blank_line(data['stdout'])
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return conn.shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
# *****************************************************
def _remote_checksum(self, conn, tmp, path, inject):
''' takes a remote checksum and returns 1 if no file '''
# Lookup the python interp from the host or delegate
# host == inven_host when there is no delegate
host = inject['inventory_hostname']
if 'delegate_to' in inject:
delegate = inject['delegate_to']
if delegate:
# host == None when the delegate is not in inventory
host = None
# delegate set, check whether the delegate has inventory vars
delegate = template.template(self.basedir, delegate, inject)
if delegate in inject['hostvars']:
# host == delegate if we need to lookup the
# python_interpreter from the delegate's inventory vars
host = delegate
if host:
python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python')
else:
python_interp = 'python'
cmd = conn.shell.checksum(path, python_interp)
#TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods
if self.become_method == 'sudo':
sudoable = True
else:
sudoable = False
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable)
data2 = utils.last_non_blank_line(data['stdout'])
try:
if data2 == '':
# this may happen if the connection to the remote server
# failed, so just return "INVALIDCHECKSUM" to avoid errors
return "INVALIDCHECKSUM"
else:
return data2.split()[0]
except IndexError:
sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
sys.stderr.write("command: %s\n" % cmd)
sys.stderr.write("----\n")
sys.stderr.write("output: %s\n" % data)
sys.stderr.write("----\n")
# this will signal that it changed and allow things to keep going
return "INVALIDCHECKSUM"
# *****************************************************
def _make_tmp_path(self, conn):
''' make and return a temporary path on a remote box '''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self.become and self.become_user != 'root':
use_system_tmp = True
tmp_mode = None
if self.remote_user != 'root' or (self.become and self.become_user != 'root'):
tmp_mode = 'a+rx'
cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self.transport in ['ssh']:
if utils.VERBOSITY > 3:
output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
else:
output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
elif 'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
if 'stdout' in result and result['stdout'] != '':
output = output + ": %s" % result['stdout']
raise errors.AnsibleError(output)
rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd))
return rc
# *****************************************************
def _remove_tmp_path(self, conn, tmp_path):
''' Remove a tmp_path. '''
if "-tmp-" in tmp_path:
cmd = conn.shell.remove(tmp_path, recurse=True)
self._low_level_exec_command(conn, cmd, None, sudoable=False)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
# *****************************************************
def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None):
''' transfer a module over SFTP, does not run it '''
(
module_style,
module_shebang,
module_data
) = self._configure_module(conn, module_name, module_args, inject, complex_args)
module_remote_path = conn.shell.join_path(tmp, module_name)
self._transfer_str(conn, tmp, module_name, module_data)
return (module_remote_path, module_style, module_shebang)
# *****************************************************
def _configure_module(self, conn, module_name, module_args, inject, complex_args=None):
''' find module and configure it '''
# Search module path(s) for named module.
module_suffixes = getattr(conn, 'default_suffixes', None)
module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes)
if module_path is None:
module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes)
if module_path2 is not None:
raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name))
else:
raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = module_replacer.modify_module(
module_path, complex_args, module_args, inject
)
return (module_style, module_shebang, module_data)
# *****************************************************
def _parallel_exec(self, hosts):
''' handles mulitprocessing when more than 1 fork is required '''
manager = multiprocessing.Manager()
job_queue = manager.Queue()
for host in hosts:
job_queue.put(host)
result_queue = manager.Queue()
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
workers = []
for i in range(self.forks):
new_stdin = None
if fileno is not None:
try:
new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
prc = multiprocessing.Process(target=_executor_hook,
args=(job_queue, result_queue, new_stdin))
prc.start()
workers.append(prc)
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
for worker in workers:
worker.terminate()
worker.join()
results = []
try:
while not result_queue.empty():
results.append(result_queue.get(block=False))
except socket.error:
raise errors.AnsibleError("<interrupted>")
return results
# *****************************************************
def _partition_results(self, results):
''' separate results by ones we contacted & ones we didn't '''
if results is None:
return None
results2 = dict(contacted={}, dark={})
for result in results:
host = result.host
if host is None:
raise Exception("internal error, host not set")
if result.communicated_ok():
results2["contacted"][host] = result.result
else:
results2["dark"][host] = result.result
# hosts which were contacted but never got a chance to return
for host in self.run_hosts:
if not (host in results2['dark'] or host in results2['contacted']):
results2["dark"][host] = {}
return results2
# *****************************************************
def run(self):
''' xfer & run module on all matched hosts '''
# find hosts that match the pattern
if not self.run_hosts:
self.run_hosts = self.inventory.list_hosts(self.pattern)
hosts = self.run_hosts
if len(hosts) == 0:
self.callbacks.on_no_hosts()
return dict(contacted={}, dark={})
global multiprocessing_runner
multiprocessing_runner = self
results = None
# Check if this is an action plugin. Some of them are designed
# to be ran once per group of hosts. Example module: pause,
# run once per hostgroup, rather than pausing once per each
# host.
p = utils.plugins.action_loader.get(self.module_name, self)
if self.forks == 0 or self.forks > len(hosts):
self.forks = len(hosts)
if (p and (getattr(p, 'BYPASS_HOST_LOOP', None)) or self.run_once):
# Expose the current hostgroup to the bypassing plugins
self.host_set = hosts
# We aren't iterating over all the hosts in this
# group. So, just choose the "delegate_to" host if that is defined and is
# one of the targeted hosts, otherwise pick the first host in our group to
# construct the conn object with.
if self.delegate_to is not None and self.delegate_to in hosts:
host = self.delegate_to
else:
host = hosts[0]
result_data = self._executor(host, None).result
# Create a ResultData item for each host in this group
# using the returned result. If we didn't do this we would
# get false reports of dark hosts.
results = [ ReturnData(host=h, result=result_data, comm_ok=True) \
for h in hosts ]
del self.host_set
elif self.forks > 1:
try:
results = self._parallel_exec(hosts)
except IOError, ie:
print ie.errno
if ie.errno == 32:
# broken pipe from Ctrl+C
raise errors.AnsibleError("interrupted")
raise
else:
results = [ self._executor(h, None) for h in hosts ]
return self._partition_results(results)
# *****************************************************
def run_async(self, time_limit):
''' Run this module asynchronously and return a poller. '''
self.background = time_limit
results = self.run()
return results, poller.AsyncPoller(results, self)
# *****************************************************
def noop_on_check(self, inject):
''' Should the runner run in check mode or not ? '''
# initialize self.always_run on first call
if self.always_run is None:
self.always_run = self.module_vars.get('always_run', False)
self.always_run = check_conditional(
self.always_run, self.basedir, inject, fail_on_undefined=True)
return (self.check and not self.always_run)
| gpl-3.0 |
lesserwhirls/scipy-cwt | scipy/odr/models.py | 57 | 4522 | """ Collection of Model instances for use with the odrpack fitting package.
"""
import numpy as np
from scipy.odr.odrpack import Model
def _lin_fcn(B, x):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + (x*b).sum(axis=0)
def _lin_fjb(B, x):
a = np.ones(x.shape[-1], float)
res = np.concatenate((a, x.ravel()))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _lin_fjd(B, x):
b = B[1:]
b = np.repeat(b, (x.shape[-1],)*b.shape[-1],axis=0)
b.shape = x.shape
return b
def _lin_est(data):
# Eh. The answer is analytical, so just return all ones.
# Don't return zeros since that will interfere with
# ODRPACK's auto-scaling procedures.
if len(data.x.shape) == 2:
m = data.x.shape[0]
else:
m = 1
return np.ones((m + 1,), float)
def _poly_fcn(B, x, powers):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + np.sum(b * np.power(x, powers), axis=0)
def _poly_fjacb(B, x, powers):
res = np.concatenate((np.ones(x.shape[-1], float), np.power(x,
powers).flat))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _poly_fjacd(B, x, powers):
b = B[1:]
b.shape = (b.shape[0], 1)
b = b * powers
return np.sum(b * np.power(x, powers-1),axis=0)
def _exp_fcn(B, x):
return B[0] + np.exp(B[1] * x)
def _exp_fjd(B, x):
return B[1] * np.exp(B[1] * x)
def _exp_fjb(B, x):
res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
res.shape = (2, x.shape[-1])
return res
def _exp_est(data):
# Eh.
return np.array([1., 1.])
multilinear = Model(_lin_fcn, fjacb=_lin_fjb,
fjacd=_lin_fjd, estimate=_lin_est,
meta={'name': 'Arbitrary-dimensional Linear',
'equ':'y = B_0 + Sum[i=1..m, B_i * x_i]',
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^m \\beta_i x_i$'})
def polynomial(order):
""" Factory function for a general polynomial model.
Parameters
----------
order : int or sequence
If an integer, it becomes the order of the polynomial to fit. If
a sequence of numbers, then these are the explicit powers in the
polynomial.
A constant term (power 0) is always included, so don't include 0.
Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
Returns
-------
model : Model instance
"""
powers = np.asarray(order)
if powers.shape == ():
# Scalar.
powers = np.arange(1, powers + 1)
powers.shape = (len(powers), 1)
len_beta = len(powers) + 1
def _poly_est(data, len_beta=len_beta):
# Eh. Ignore data and return all ones.
return np.ones((len_beta,), float)
return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
estimate=_poly_est, extra_args=(powers,),
meta={'name': 'Sorta-general Polynomial',
'equ':'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^{%s} \\beta_i x^i$' %\
(len_beta-1)})
exponential = Model(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
estimate=_exp_est, meta={'name':'Exponential',
'equ':'y= B_0 + exp(B_1 * x)',
'TeXequ':'$y=\\beta_0 + e^{\\beta_1 x}$'})
def _unilin(B, x):
return x*B[0] + B[1]
def _unilin_fjd(B, x):
return np.ones(x.shape, float) * B[0]
def _unilin_fjb(B, x):
_ret = np.concatenate((x, np.ones(x.shape, float)))
_ret.shape = (2,) + x.shape
return _ret
def _unilin_est(data):
return (1., 1.)
def _quadratic(B, x):
return x*(x*B[0] + B[1]) + B[2]
def _quad_fjd(B, x):
return 2*x*B[0] + B[1]
def _quad_fjb(B, x):
_ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
_ret.shape = (3,) + x.shape
return _ret
def _quad_est(data):
return (1.,1.,1.)
unilinear = Model(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
estimate=_unilin_est, meta={'name': 'Univariate Linear',
'equ': 'y = B_0 * x + B_1',
'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
quadratic = Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb,
estimate=_quad_est, meta={'name': 'Quadratic',
'equ': 'y = B_0*x**2 + B_1*x + B_2',
'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
#### EOF #######################################################################
| bsd-3-clause |
resmo/ansible | lib/ansible/modules/network/netvisor/pn_admin_syslog.py | 38 | 6606 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_admin_syslog
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to create/modify/delete admin-syslog
description:
- This module can be used to create the scope and other parameters of syslog event collection.
- This module can be used to modify parameters of syslog event collection.
- This module can be used to delete the scope and other parameters of syslog event collection.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(present) to create admin-syslog and
C(absent) to delete admin-syslog C(update) to modify the admin-syslog.
required: True
type: str
choices: ['present', 'absent', 'update']
pn_scope:
description:
- Scope of the system log.
required: False
type: str
choices: ['local', 'fabric']
pn_host:
description:
- Hostname to log system events.
required: False
type: str
pn_port:
description:
- Host port.
required: False
type: str
pn_transport:
description:
- Transport for log events - tcp/tls or udp.
required: False
type: str
choices: ['tcp-tls', 'udp']
default: 'udp'
pn_message_format:
description:
- message-format for log events - structured or legacy.
required: False
choices: ['structured', 'legacy']
type: str
pn_name:
description:
- name of the system log.
required: False
type: str
"""
EXAMPLES = """
- name: admin-syslog functionality
pn_admin_syslog:
pn_cliswitch: "sw01"
state: "absent"
pn_name: "foo"
pn_scope: "local"
- name: admin-syslog functionality
pn_admin_syslog:
pn_cliswitch: "sw01"
state: "present"
pn_name: "foo"
pn_scope: "local"
pn_host: "166.68.224.46"
pn_message_format: "structured"
- name: admin-syslog functionality
pn_admin_syslog:
pn_cliswitch: "sw01"
state: "update"
pn_name: "foo"
pn_host: "166.68.224.10"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the admin-syslog command.
returned: always
type: list
stderr:
description: set of error responses from the admin-syslog command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the admin-syslog-show command.
If a user with given name exists, return as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
cli += ' admin-syslog-show format name no-show-headers'
out = run_commands(module, cli)[1]
out = out.split()
return True if name in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='admin-syslog-create',
absent='admin-syslog-delete',
update='admin-syslog-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_scope=dict(required=False, type='str',
choices=['local', 'fabric']),
pn_host=dict(required=False, type='str'),
pn_port=dict(required=False, type='str'),
pn_transport=dict(required=False, type='str',
choices=['tcp-tls', 'udp'], default='udp'),
pn_message_format=dict(required=False, type='str',
choices=['structured', 'legacy']),
pn_name=dict(required=False, type='str'),
),
required_if=(
['state', 'present', ['pn_name', 'pn_host', 'pn_scope']],
['state', 'absent', ['pn_name']],
['state', 'update', ['pn_name']]
),
required_one_of=[['pn_port', 'pn_message_format',
'pn_host', 'pn_transport', 'pn_scope']]
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
scope = module.params['pn_scope']
host = module.params['pn_host']
port = module.params['pn_port']
transport = module.params['pn_transport']
message_format = module.params['pn_message_format']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
SYSLOG_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'admin-syslog-modify':
if SYSLOG_EXISTS is False:
module.fail_json(
failed=True,
msg='admin syslog with name %s does not exist' % name
)
if command == 'admin-syslog-delete':
if SYSLOG_EXISTS is False:
module.exit_json(
skipped=True,
msg='admin syslog with name %s does not exist' % name
)
if command == 'admin-syslog-create':
if SYSLOG_EXISTS is True:
module.exit_json(
skipped=True,
msg='admin syslog user with name %s already exists' % name
)
if command == 'admin-syslog-create':
if scope:
cli += ' scope ' + scope
if command != 'admin-syslog-delete':
if host:
cli += ' host ' + host
if port:
cli += ' port ' + port
if transport:
cli += ' transport ' + transport
if message_format:
cli += ' message-format ' + message_format
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
koniiiik/django | django/utils/text.py | 7 | 14950 | from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import (
SimpleLazyObject, keep_lazy, keep_lazy_text, lazy,
)
from django.utils.safestring import SafeText, mark_safe
from django.utils.six.moves import html_entities
from django.utils.translation import pgettext, ugettext as _, ugettext_lazy
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode # NOQA
# Capitalizes the first letter of a string.
def capfirst(x):
return x and force_text(x)[0].upper() + force_text(x)[1:]
capfirst = keep_lazy_text(capfirst)
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S)
re_chars = re.compile(r'<.*?>|(.)', re.U | re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
All white space is preserved except added line breaks consume the space on
which they break the line.
Long words are not wrapped, so the output text may have lines longer than
``width``.
"""
text = force_text(text)
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""
Truncates a string after a certain number of chars.
"""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncates HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Closes opened tags if they were correctly closed in the given HTML.
Newlines in the HTML are preserved.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
@keep_lazy_text
def normalize_newlines(text):
"""Normalizes CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
@keep_lazy_text
def phone2numeric(phone):
"""Converts a phone number with letters into its numeric equivalent."""
char2number = {
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
zfile.write(s)
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, force_text(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy(six.text_type, SafeText)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub(r'[^\w\s-]', '', value, flags=re.U).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value, flags=re.U))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
def camel_case_to_spaces(value):
"""
Splits CamelCase and converts to lower case. Also strips leading and
trailing whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, six.text_type)
| bsd-3-clause |
dropbox/changes | changes/listeners/mail.py | 1 | 8772 | from __future__ import absolute_import, print_function
from itertools import imap
import logging
import toronado
from email.utils import parseaddr
from flask import current_app, render_template
from flask_mail import Message, sanitize_address
from jinja2 import Markup
from typing import List # NOQA
from changes.config import db, mail
from changes.constants import Result, Status
from changes.db.utils import try_create
from changes.lib import build_context_lib, build_type
from changes.lib.build_context_lib import CollectionContext # NOQA
from changes.models.event import Event, EventType
from changes.models.build import Build
from changes.models.job import Job
from changes.models.jobplan import JobPlan
from changes.models.project import ProjectOption
def filter_recipients(email_list, domain_whitelist=None):
"""
Returns emails from email_list that have been white-listed by
domain_whitelist.
"""
if domain_whitelist is None:
domain_whitelist = current_app.config['MAIL_DOMAIN_WHITELIST']
if not domain_whitelist:
return email_list
return [
e for e in email_list
if parseaddr(e)[1].split('@', 1)[-1] in domain_whitelist
]
class MailNotificationHandler(object):
logger = logging.getLogger('mail')
def send(self, msg, build):
msg.recipients = filter_recipients(msg.recipients)
if not msg.recipients:
self.logger.info(
'Exiting for collection_id={} because its message has no '
'recipients.'.format(build.collection_id))
return
event = try_create(Event, where={
'type': EventType.email,
'item_id': build.collection_id,
'data': {
'triggering_build_id': build.id.hex,
'recipients': msg.recipients,
}
})
# If we were unable to create the Event, we must've done so (and thus sent the mail) already.
if not event:
self.logger.warning('An email has already been sent for collection_id=%s, (build_id=%s).',
build.collection_id, build.id.hex)
return
mail.send(msg)
def get_msg(self, builds):
# type: (List[Build]) -> Message
context = build_context_lib.get_collection_context(builds) # type: CollectionContext
if context.result == Result.passed:
return None
max_shown = current_app.config.get('MAX_SHOWN_ITEMS_PER_BUILD_MAIL', 3)
context_dict = context._asdict()
context_dict.update({
'MAX_SHOWN_ITEMS_PER_BUILD': max_shown,
'showing_failing_tests_count':
sum([min(b['failing_tests_count'], max_shown) for b in context.builds])
})
recipients = self.get_collection_recipients(context)
msg = Message(context.title, recipients=recipients, extra_headers={
'Reply-To': ', '.join(sanitize_address(r) for r in recipients),
})
msg.body = render_template('listeners/mail/notification.txt', **context_dict)
msg.html = Markup(toronado.from_string(
render_template('listeners/mail/notification.html', **context_dict)
))
return msg
def get_collection_recipients(self, collection_context):
# type: (CollectionContext) -> List[unicode]
"""
Returns a list of recipients for a collection context created by
get_collection_context. Only recipients for failing builds will be
returned.
"""
recipient_lists = map(
lambda build_context: self.get_build_recipients(build_context['build']),
collection_context.builds)
return list(set([r for rs in recipient_lists for r in rs]))
def get_build_recipients(self, build):
# type: (Build) -> List[unicode]
"""
Returns a list of recipients for a build.
The build author is included unless the build and all failing jobs
have turned off the mail.notify-author option.
Successful builds will return the empty list.
Recipients are also collected from each failing job's
mail.notify-addresses and mail.notify-addresses-revisions options.
Should there be no failing jobs (is that possible?), recipients are
collected from the build's own mail.notify-addresses and
mail.notify-addresses-revisions options.
"""
if build.result == Result.passed:
return []
recipients = []
options = self.get_build_options(build)
if options['mail.notify-author']:
author = build.author
if author:
recipients.append(u'%s <%s>' % (author.name, author.email))
recipients.extend(options['mail.notify-addresses'])
if build_type.is_initial_commit_build(build):
recipients.extend(options['mail.notify-addresses-revisions'])
return recipients
def get_build_options(self, build):
"""
Returns a build's mail options as a
{
'mail.notify-author': bool,
'mail.notify-addresses': set,
'mail.notify-addresses-revisions': set,
} dict.
The 'mail.notify-author' option is True unless the build and all
failing jobs have turned off the mail.notify-author option.
The mail.notify-addresses and mail.notify-addresses-revisions options
respectively are sets of email addresses constructed by merging the
corresponding options of all failing jobs. Note that the build's
options are used as defaults when constructing the options for
each job, so that the job options override the build options.
Finally, the build's own options are used if there are no failing jobs.
"""
default_options = {
'mail.notify-author': '1',
'mail.notify-addresses': '',
'mail.notify-addresses-revisions': '',
}
build_options = dict(
default_options,
**dict(db.session.query(
ProjectOption.name, ProjectOption.value
).filter(
ProjectOption.project_id == build.project_id,
ProjectOption.name.in_(default_options.keys()),
))
)
# Get options for all failing jobs.
jobs_options = []
for job in list(Job.query.filter(Job.build_id == build.id)):
if job.result != Result.passed:
jobs_options.append(dict(
build_options, **self.get_job_options(job)))
# Merge all options.
# Fallback to build options in case there are no failing jobs.
all_options = jobs_options or [build_options]
merged_options = {
# Notify the author unless all jobs and the build have turned the
# notify-author option off.
'mail.notify-author': any(
imap(
lambda options: options.get('mail.notify-author') == '1',
all_options,
),
),
'mail.notify-addresses': set(),
'mail.notify-addresses-revisions': set(),
}
recipient_keys = ['mail.notify-addresses', 'mail.notify-addresses-revisions']
for options in all_options:
for key in recipient_keys:
# XXX(dcramer): we dont have option validators so lets assume
# people enter slightly incorrect values
merged_options[key] |= set(
[x.strip() for x in options[key].split(',') if x.strip()]
)
return merged_options
def get_job_options(self, job):
jobplan = JobPlan.query.filter(
JobPlan.job_id == job.id,
).first()
options = {}
if jobplan and 'snapshot' in jobplan.data:
options = jobplan.data['snapshot']['options']
return options
def build_finished_handler(build_id, *args, **kwargs):
build = Build.query.get(build_id)
if not build:
return
if not build.collection_id:
# If there isn't a collection_id, assume the build stands alone.
# All builds should probably have collection_id set.
builds = [build]
else:
builds = list(
Build.query.filter(Build.collection_id == build.collection_id))
# Exit if there are no builds for the given build_id, or any build hasn't
# finished.
if not builds or any(map(lambda build: build.status != Status.finished, builds)):
return
notification_handler = MailNotificationHandler()
msg = notification_handler.get_msg(builds)
if msg is not None:
notification_handler.send(msg, build)
| apache-2.0 |
cecep-edu/edx-platform | lms/djangoapps/instructor/views/coupons.py | 61 | 6574 | """
E-commerce Tab Instructor Dashboard Coupons Operations views
"""
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from util.json_request import JsonResponse
from shoppingcart.models import Coupon, CourseRegistrationCode
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import datetime
import pytz
import logging
log = logging.getLogger(__name__)
@require_POST
@login_required
def remove_coupon(request, course_id): # pylint: disable=unused-argument
"""
remove the coupon against the coupon id
set the coupon is_active flag to false
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _('coupon id is None')
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) DoesNotExist').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) is already inactive').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
coupon.is_active = False
coupon.save()
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
@require_POST
@login_required
def add_coupon(request, course_id):
"""
add coupon in the Coupons Table
"""
code = request.POST.get('code')
# check if the code is already in the Coupons Table and active
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
coupon = Coupon.objects.get(is_active=True, code=code, course_id=course_id)
except Coupon.DoesNotExist:
# check if the coupon code is in the CourseRegistrationCode Table
course_registration_code = CourseRegistrationCode.objects.filter(code=code)
if course_registration_code:
return JsonResponse(
{'message': _("The code ({code}) that you have tried to define is already in use as a registration code").format(code=code)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
course_id = request.POST.get('course_id')
try:
discount = int(request.POST.get('discount'))
except ValueError:
return JsonResponse({
'message': _("Please Enter the Integer Value for Coupon Discount")
}, status=400) # status code 400: Bad Request
if discount > 100 or discount < 0:
return JsonResponse({
'message': _("Please Enter the Coupon Discount Value Less than or Equal to 100")
}, status=400) # status code 400: Bad Request
expiration_date = None
if request.POST.get('expiration_date'):
expiration_date = request.POST.get('expiration_date')
try:
expiration_date = datetime.datetime.strptime(expiration_date, "%m/%d/%Y").replace(tzinfo=pytz.UTC) + datetime.timedelta(days=1)
except ValueError:
return JsonResponse({
'message': _("Please enter the date in this format i-e month/day/year")
}, status=400) # status code 400: Bad Request
coupon = Coupon(
code=code, description=description,
course_id=course_id,
percentage_discount=discount,
created_by_id=request.user.id,
expiration_date=expiration_date
)
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) added successfully").format(code=code)}
)
if coupon:
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) already exists for this course").format(code=code)},
status=400) # status code 400: Bad Request
@require_POST
@login_required
def update_coupon(request, course_id): # pylint: disable=unused-argument
"""
update the coupon object in the database
"""
coupon_id = request.POST.get('coupon_id', None)
if not coupon_id:
return JsonResponse({'message': _("coupon id not found")}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(pk=coupon_id)
except ObjectDoesNotExist:
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
coupon.description = description
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) updated Successfully").format(coupon_id=coupon_id)}
)
@require_POST
@login_required
def get_coupon_info(request, course_id): # pylint: disable=unused-argument
"""
get the coupon information to display in the pop up form
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _("coupon id not found")
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) is already inactive").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
expiry_date = coupon.display_expiry_date
return JsonResponse({
'coupon_code': coupon.code,
'coupon_description': coupon.description,
'coupon_course_id': coupon.course_id.to_deprecated_string(),
'coupon_discount': coupon.percentage_discount,
'expiry_date': expiry_date,
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
| agpl-3.0 |
qz267/zerorpc-python | zerorpc/core.py | 53 | 15303 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import traceback
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
import gevent_zmq as zmq
from .exceptions import TimeoutExpired, RemoteError, LostRemote
from .channel import ChannelMultiplexer, BufferedChannel
from .socket import SocketBase
from .heartbeat import HeartBeatOnChannel
from .context import Context
from .decorators import DecoratorBase, rep
import patterns
from logging import getLogger
logger = getLogger(__name__)
class ServerBase(object):
def __init__(self, channel, methods=None, name=None, context=None,
pool_size=None, heartbeat=5):
self._multiplexer = ChannelMultiplexer(channel)
if methods is None:
methods = self
self._context = context or Context.get_instance()
self._name = name or self._extract_name()
self._task_pool = gevent.pool.Pool(size=pool_size)
self._acceptor_task = None
self._methods = self._filter_methods(ServerBase, self, methods)
self._inject_builtins()
self._heartbeat_freq = heartbeat
for (k, functor) in self._methods.items():
if not isinstance(functor, DecoratorBase):
self._methods[k] = rep(functor)
@staticmethod
def _filter_methods(cls, self, methods):
if hasattr(methods, '__getitem__'):
return methods
server_methods = set(getattr(self, k) for k in dir(cls) if not
k.startswith('_'))
return dict((k, getattr(methods, k))
for k in dir(methods)
if (callable(getattr(methods, k))
and not k.startswith('_')
and getattr(methods, k) not in server_methods
))
@staticmethod
def _extract_name(methods):
return getattr(type(methods), '__name__', None) or repr(methods)
def close(self):
self.stop()
self._multiplexer.close()
def _format_args_spec(self, args_spec, r=None):
if args_spec:
r = [dict(name=name) for name in args_spec[0]]
default_values = args_spec[3]
if default_values is not None:
for arg, def_val in zip(reversed(r), reversed(default_values)):
arg['default'] = def_val
return r
def _zerorpc_inspect(self):
methods = dict((m, f) for m, f in self._methods.items()
if not m.startswith('_'))
detailled_methods = dict((m,
dict(args=self._format_args_spec(f._zerorpc_args()),
doc=f._zerorpc_doc())) for (m, f) in methods.items())
return {'name': self._name,
'methods': detailled_methods}
def _inject_builtins(self):
self._methods['_zerorpc_list'] = lambda: [m for m in self._methods
if not m.startswith('_')]
self._methods['_zerorpc_name'] = lambda: self._name
self._methods['_zerorpc_ping'] = lambda: ['pong', self._name]
self._methods['_zerorpc_help'] = lambda m: \
self._methods[m]._zerorpc_doc()
self._methods['_zerorpc_args'] = \
lambda m: self._methods[m]._zerorpc_args()
self._methods['_zerorpc_inspect'] = self._zerorpc_inspect
def __call__(self, method, *args):
if method not in self._methods:
raise NameError(method)
return self._methods[method](*args)
def _print_traceback(self, protocol_v1, exc_infos):
logger.exception('')
exc_type, exc_value, exc_traceback = exc_infos
if protocol_v1:
return (repr(exc_value),)
human_traceback = traceback.format_exc()
name = exc_type.__name__
human_msg = str(exc_value)
return (name, human_msg, human_traceback)
def _async_task(self, initial_event):
protocol_v1 = initial_event.header.get('v', 1) < 2
channel = self._multiplexer.channel(initial_event)
hbchan = HeartBeatOnChannel(channel, freq=self._heartbeat_freq,
passive=protocol_v1)
bufchan = BufferedChannel(hbchan)
exc_infos = None
event = bufchan.recv()
try:
self._context.hook_load_task_context(event.header)
functor = self._methods.get(event.name, None)
if functor is None:
raise NameError(event.name)
functor.pattern.process_call(self._context, bufchan, event, functor)
except LostRemote:
exc_infos = list(sys.exc_info())
self._print_traceback(protocol_v1, exc_infos)
except Exception:
exc_infos = list(sys.exc_info())
human_exc_infos = self._print_traceback(protocol_v1, exc_infos)
reply_event = bufchan.create_event('ERR', human_exc_infos,
self._context.hook_get_task_context())
self._context.hook_server_inspect_exception(event, reply_event, exc_infos)
bufchan.emit_event(reply_event)
finally:
del exc_infos
bufchan.close()
def _acceptor(self):
while True:
initial_event = self._multiplexer.recv()
self._task_pool.spawn(self._async_task, initial_event)
def run(self):
self._acceptor_task = gevent.spawn(self._acceptor)
try:
self._acceptor_task.get()
finally:
self.stop()
self._task_pool.join(raise_error=True)
def stop(self):
if self._acceptor_task is not None:
self._acceptor_task.kill()
self._acceptor_task = None
class ClientBase(object):
def __init__(self, channel, context=None, timeout=30, heartbeat=5,
passive_heartbeat=False):
self._multiplexer = ChannelMultiplexer(channel,
ignore_broadcast=True)
self._context = context or Context.get_instance()
self._timeout = timeout
self._heartbeat_freq = heartbeat
self._passive_heartbeat = passive_heartbeat
def close(self):
self._multiplexer.close()
def _handle_remote_error(self, event):
exception = self._context.hook_client_handle_remote_error(event)
if not exception:
if event.header.get('v', 1) >= 2:
(name, msg, traceback) = event.args
exception = RemoteError(name, msg, traceback)
else:
(msg,) = event.args
exception = RemoteError('RemoteError', msg, None)
return exception
def _select_pattern(self, event):
for pattern in patterns.patterns_list:
if pattern.accept_answer(event):
return pattern
msg = 'Unable to find a pattern for: {0}'.format(event)
raise RuntimeError(msg)
def _process_response(self, request_event, bufchan, timeout):
try:
reply_event = bufchan.recv(timeout)
pattern = self._select_pattern(reply_event)
return pattern.process_answer(self._context, bufchan, request_event,
reply_event, self._handle_remote_error)
except TimeoutExpired:
bufchan.close()
ex = TimeoutExpired(timeout,
'calling remote method {0}'.format(request_event.name))
self._context.hook_client_after_request(request_event, None, ex)
raise ex
except:
bufchan.close()
raise
def __call__(self, method, *args, **kargs):
timeout = kargs.get('timeout', self._timeout)
channel = self._multiplexer.channel()
hbchan = HeartBeatOnChannel(channel, freq=self._heartbeat_freq,
passive=self._passive_heartbeat)
bufchan = BufferedChannel(hbchan, inqueue_size=kargs.get('slots', 100))
xheader = self._context.hook_get_task_context()
request_event = bufchan.create_event(method, args, xheader)
self._context.hook_client_before_request(request_event)
bufchan.emit_event(request_event)
try:
if kargs.get('async', False) is False:
return self._process_response(request_event, bufchan, timeout)
async_result = gevent.event.AsyncResult()
gevent.spawn(self._process_response, request_event, bufchan,
timeout).link(async_result)
return async_result
except:
# XXX: This is going to be closed twice if async is false and
# _process_response raises an exception. I wonder if the above
# async branch can raise an exception too, if no we can just remove
# this code.
bufchan.close()
raise
def __getattr__(self, method):
return lambda *args, **kargs: self(method, *args, **kargs)
class Server(SocketBase, ServerBase):
def __init__(self, methods=None, name=None, context=None, pool_size=None,
heartbeat=5):
SocketBase.__init__(self, zmq.ROUTER, context)
if methods is None:
methods = self
name = name or ServerBase._extract_name(methods)
methods = ServerBase._filter_methods(Server, self, methods)
ServerBase.__init__(self, self._events, methods, name, context,
pool_size, heartbeat)
def close(self):
ServerBase.close(self)
SocketBase.close(self)
class Client(SocketBase, ClientBase):
def __init__(self, connect_to=None, context=None, timeout=30, heartbeat=5,
passive_heartbeat=False):
SocketBase.__init__(self, zmq.DEALER, context=context)
ClientBase.__init__(self, self._events, context, timeout, heartbeat,
passive_heartbeat)
if connect_to:
self.connect(connect_to)
def close(self):
ClientBase.close(self)
SocketBase.close(self)
class Pusher(SocketBase):
def __init__(self, context=None, zmq_socket=zmq.PUSH):
super(Pusher, self).__init__(zmq_socket, context=context)
def __call__(self, method, *args):
self._events.emit(method, args,
self._context.hook_get_task_context())
def __getattr__(self, method):
return lambda *args: self(method, *args)
class Puller(SocketBase):
def __init__(self, methods=None, context=None, zmq_socket=zmq.PULL):
super(Puller, self).__init__(zmq_socket, context=context)
if methods is None:
methods = self
self._methods = ServerBase._filter_methods(Puller, self, methods)
self._receiver_task = None
def close(self):
self.stop()
super(Puller, self).close()
def __call__(self, method, *args):
if method not in self._methods:
raise NameError(method)
return self._methods[method](*args)
def _receiver(self):
while True:
event = self._events.recv()
try:
if event.name not in self._methods:
raise NameError(event.name)
self._context.hook_load_task_context(event.header)
self._context.hook_server_before_exec(event)
self._methods[event.name](*event.args)
# In Push/Pull their is no reply to send, hence None for the
# reply_event argument
self._context.hook_server_after_exec(event, None)
except Exception:
exc_infos = sys.exc_info()
try:
logger.exception('')
self._context.hook_server_inspect_exception(event, None, exc_infos)
finally:
del exc_infos
def run(self):
self._receiver_task = gevent.spawn(self._receiver)
try:
self._receiver_task.get()
finally:
self._receiver_task = None
def stop(self):
if self._receiver_task is not None:
self._receiver_task.kill(block=False)
class Publisher(Pusher):
def __init__(self, context=None):
super(Publisher, self).__init__(context=context, zmq_socket=zmq.PUB)
class Subscriber(Puller):
def __init__(self, methods=None, context=None):
super(Subscriber, self).__init__(methods=methods, context=context,
zmq_socket=zmq.SUB)
self._events.setsockopt(zmq.SUBSCRIBE, '')
def fork_task_context(functor, context=None):
'''Wrap a functor to transfer context.
Usage example:
gevent.spawn(zerorpc.fork_task_context(myfunction), args...)
The goal is to permit context "inheritance" from a task to another.
Consider the following example:
zerorpc.Server receive a new event
- task1 is created to handle this event this task will be linked
to the initial event context. zerorpc.Server does that for you.
- task1 make use of some zerorpc.Client instances, the initial
event context is transfered on every call.
- task1 spawn a new task2.
- task2 make use of some zerorpc.Client instances, it's a fresh
context. Thus there is no link to the initial context that
spawned task1.
- task1 spawn a new fork_task_context(task3).
- task3 make use of some zerorpc.Client instances, the initial
event context is transfered on every call.
A real use case is a distributed tracer. Each time a new event is
created, a trace_id is injected in it or copied from the current task
context. This permit passing the trace_id from a zerorpc.Server to
another via zerorpc.Client.
The simple rule to know if a task need to be wrapped is:
- if the new task will make any zerorpc call, it should be wrapped.
'''
context = context or Context.get_instance()
header = context.hook_get_task_context()
def wrapped(*args, **kargs):
context.hook_load_task_context(header)
return functor(*args, **kargs)
return wrapped
| mit |
KyleJamesWalker/ansible | lib/ansible/utils/module_docs_fragments/avi.py | 48 | 1616 | #
# Created on December 12, 2016
# @author: Gaurav Rastogi ([email protected])
# Avi Version: 16.3.4
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Avi common documentation fragment
DOCUMENTATION = """
options:
controller:
description:
- IP address or hostname of the controller. The default value is the environment variable C(AVI_CONTROLLER).
username:
description:
- Username used for accessing Avi controller. The default value is the environment variable C(AVI_USERNAME).
password:
description:
- Password of Avi user in Avi controller. The default value is the environment variable C(AVI_PASSWORD).
tenant:
description:
- Name of tenant used for all Avi API calls and context of object.
default: admin
tenant_uuid:
description:
- UUID of tenant used for all Avi API calls and context of object.
default: ''
"""
| gpl-3.0 |
noiselabs/box-linux-sync | src/noiselabs/box/pms/apt.py | 1 | 1248 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of box-linux-sync.
#
# Copyright (C) 2013 Vítor Brandão <[email protected]>
#
# box-linux-sync is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# box-linux-sync is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with box-linux-sync; if not, see
# <http://www.gnu.org/licenses/>.
from noiselabs.box.pms.pms import BasePMS
class APT(BasePMS):
"""The Advanced Packaging Tool used in the Debian family of Linux operating
systems (Ubuntu included)."""
def __str__(self):
return 'APT'
def search(self, pkg):
return "apt-cache search %s" % pkg
def install(self, pkg):
return "apt-get install %s" % pkg
def remove(self, pkg):
return "apt-get remove %s" % pkg
| lgpl-3.0 |
rhyolight/nupic.research | projects/l2_pooling/convergence_activity.py | 10 | 9793 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file plots activity of single vs multiple columns as they converge.
"""
import random
import os
import pprint
import plotly
import plotly.graph_objs as go
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
plotlyUser = os.environ['PLOTLY_USERNAME']
plotlyAPIKey = os.environ['PLOTLY_API_KEY']
plotly.plotly.sign_in(plotlyUser, plotlyAPIKey)
def plotActivity(l2ActiveCellsMultiColumn):
maxTouches = 15
numTouches = min(maxTouches, len(l2ActiveCellsMultiColumn))
numColumns = len(l2ActiveCellsMultiColumn[0])
fig = plotly.tools.make_subplots(
rows=1, cols=numColumns, shared_yaxes=True,
subplot_titles=('Column 1', 'Column 2', 'Column 3')[0:numColumns]
)
# pprint.pprint(fig)
data = go.Scatter(x=[], y=[])
shapes = []
for t,sdrs in enumerate(l2ActiveCellsMultiColumn):
if t <= numTouches:
for c, activeCells in enumerate(sdrs):
# print t, c, len(activeCells)
for cell in activeCells:
shapes.append(
{
'type': 'rect',
'xref': 'x'+str((c+1)),
'yref': 'y1',
'x0': t,
'x1': t + 0.6,
'y0': cell,
'y1': cell + 1,
'line': {
# 'color': 'rgba(128, 0, 128, 1)',
'width': 2,
},
# 'fillcolor': 'rgba(128, 0, 128, 0.7)',
},
)
# Add red rectangle
if numColumns==1:
shapes.append(
{
'type': 'rect',
'x0': 6,
'x1': 6.6,
'y0': -95,
'y1': 4100,
'line': {
'color': 'rgba(255, 0, 0, 0.5)',
'width': 3,
},
},
)
else:
shapes.append(
{
'type': 'rect',
'x0': 3,
'x1': 3.6,
'y0': -95,
'y1': 4100,
'line': {
'color': 'rgba(255, 0, 0, 0.5)',
'width': 3,
},
},
)
# Legend for x-axis and appropriate title
fig['layout']['annotations'].append({
'font': {'size': 20},
'xanchor': 'center',
'yanchor': 'bottom',
'text': 'Number of touches',
'xref': 'paper',
'yref': 'paper',
'x': 0.5,
'y': -0.15,
'showarrow': False,
})
fig['layout']['annotations'].append({
'font': {'size': 24},
'xanchor': 'center',
'yanchor': 'bottom',
'text': ['','<b>One cortical column</b>','',
'<b>Three cortical columns</b>'][numColumns],
'xref': 'paper',
'yref': 'paper',
'x': 0.5,
'y': 1.1,
'showarrow': False,
})
layout = {
'height': 600,
'font': {'size': 18},
'yaxis': {
'title': "Neuron #",
'range': [-100, 4201],
'showgrid': False,
},
'shapes': shapes,
}
if numColumns == 1: layout.update(width=320)
else: layout.update(width=700)
for c in range(numColumns):
fig.append_trace(data, 1, c+1)
fig['layout']['xaxis'+str(c+1)].update({
'title': "",
'range': [0, numTouches],
'showgrid': False,
'showticklabels': True,
}),
fig['layout'].update(layout)
# Save plots as HTM and/or PDF
basename='plots/activity_c'+str(numColumns)
plotly.offline.plot(fig, filename=basename+'.html', auto_open=True)
# Can't save image files in offline mode
plotly.plotly.image.save_as(fig, filename=basename+'.pdf', scale=4)
def plotL2ObjectRepresentations(exp1):
shapes = []
numObjects = len(exp1.objectL2Representations)
for obj in range(numObjects):
activeCells = exp1.objectL2Representations[obj][0]
for cell in activeCells:
shapes.append(
{
'type': 'rect',
'x0': obj,
'x1': obj + 0.75,
'y0': cell,
'y1': cell + 2,
'line': {
# 'color': 'rgba(128, 0, 128, 1)',
'width': 2,
},
# 'fillcolor': 'rgba(128, 0, 128, 0.7)',
},
)
# Add red rectangle
shapes.append(
{
'type': 'rect',
'x0': 0,
'x1': 0.9,
'y0': -95,
'y1': 4100,
'line': {
'color': 'rgba(255, 0, 0, 0.5)',
'width': 3,
},
},
)
data = [go.Scatter(x=[], y=[])]
layout = {
'width': 320,
'height': 600,
'font': {'size': 20},
'xaxis': {
'title': "Object #",
'range': [0, 10],
'showgrid': False,
'showticklabels': True,
},
'yaxis': {
'title': "Neuron #",
'range': [-100, 4201],
'showgrid': False,
},
'shapes': shapes,
'annotations': [ {
'xanchor': 'middle',
'yanchor': 'bottom',
'text': 'Target object',
'x': 1,
'y': 4100,
'ax': 10,
'ay': -25,
'arrowcolor': 'rgba(255, 0, 0, 1)',
},
{
'font': {'size': 24},
'xanchor': 'center',
'yanchor': 'bottom',
'text': '<b>Object representations</b>',
'xref': 'paper',
'yref': 'paper',
'x': 0.5,
'y': 1.1,
'showarrow': False,
}
]
}
fig = {
'data': data,
'layout': layout,
}
plotPath = plotly.offline.plot(fig, filename='plots/shapes-rectangle.html',
auto_open=True)
print "url=", plotPath
# Can't save image files in offline mode
plotly.plotly.image.save_as(
fig, filename='plots/target_object_representations.pdf', scale=4)
if __name__ == "__main__":
numColumns = 3
numFeatures = 3
numPoints = 10
numLocations = 10
numObjects = 10
numRptsPerSensation = 2
objectMachine = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=3,
seed=40,
)
objectMachine.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
objects = objectMachine.provideObjectsToLearn()
# single-out the inputs to the column #1
objectsSingleColumn = {}
for i in range(numObjects):
featureLocations = []
for j in range(numLocations):
featureLocations.append({0: objects[i][j][0]})
objectsSingleColumn[i] = featureLocations
# we will run two experiments side by side, with either single column
# or 3 columns
exp3 = L4L2Experiment(
'three_column',
numCorticalColumns=3,
seed=1
)
exp1 = L4L2Experiment(
'single_column',
numCorticalColumns=1,
seed=1
)
print "train single column "
exp1.learnObjects(objectsSingleColumn)
print "train multi-column "
exp3.learnObjects(objects)
# test on the first object
objectId = 0
obj = objectMachine[objectId]
# Create sequence of sensations for this object for all columns
# We need to set the seed to get specific convergence points for the red
# rectangle in the graph.
objectSensations = {}
random.seed(12)
for c in range(numColumns):
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
# stay multiple steps on each sensation
sensations = []
for pair in objectCopy:
for _ in xrange(numRptsPerSensation):
sensations.append(pair)
objectSensations[c] = sensations
sensationStepsSingleColumn = []
sensationStepsMultiColumn = []
for step in xrange(len(objectSensations[0])):
pairs = [
objectSensations[col][step] for col in xrange(numColumns)
]
sdrs = objectMachine._getSDRPairs(pairs)
sensationStepsMultiColumn.append(sdrs)
sensationStepsSingleColumn.append({0: sdrs[0]})
print "inference: multi-columns "
exp3.sendReset()
l2ActiveCellsMultiColumn = []
L2ActiveCellNVsTimeMultiColumn = []
for sensation in sensationStepsMultiColumn:
exp3.infer([sensation], objectName=objectId, reset=False)
l2ActiveCellsMultiColumn.append(exp3.getL2Representations())
activeCellNum = 0
for c in range(numColumns):
activeCellNum += len(exp3.getL2Representations()[c])
L2ActiveCellNVsTimeMultiColumn.append(activeCellNum/numColumns)
print "inference: single column "
exp1.sendReset()
l2ActiveCellsSingleColumn = []
L2ActiveCellNVsTimeSingleColumn = []
for sensation in sensationStepsSingleColumn:
exp1.infer([sensation], objectName=objectId, reset=False)
l2ActiveCellsSingleColumn.append(exp1.getL2Representations())
L2ActiveCellNVsTimeSingleColumn.append(len(exp1.getL2Representations()[0]))
# Used to figure out where to put the red rectangle!
print numFeatures
for i,sdrs in enumerate(l2ActiveCellsSingleColumn):
print i,len(l2ActiveCellsSingleColumn[i][0]),len(l2ActiveCellsMultiColumn[i][0])
plotActivity(l2ActiveCellsMultiColumn)
plotActivity(l2ActiveCellsSingleColumn)
plotL2ObjectRepresentations(exp1)
| gpl-3.0 |
pwollstadt/trentoolxl | dev/search_GPU/test_neighbour_search_cuda.py | 2 | 28060 | """Provide unit tests for neighbour searches using CUDA GPU-code.
Tests are based on unit tests by Pedro Mediano
https://github.com/pmediano/jidt/tree/master/java/source/infodynamics/
measures/continuous/kraskov/cuda
"""
import pytest
import numpy as np
from idtxl.neighbour_search_cuda import cudaFindKnnSetGPU, knn_search
# TODO pass 'float64' to high-level functions
def test_knn_one_dim():
"""Test kNN search in 1D."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 1
pointset = np.array([-1, -1.2, 1, 1.1]).astype('float32')
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.1), 'Distance 3 not correct.'
# Call high-level function.
(indexes2, distances2) = knn_search(np.expand_dims(pointset, axis=1),
np.expand_dims(pointset, axis=1),
knn_k, theiler_t, n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 0, 'Index 1 not correct.'
assert indexes2[0][2] == 3, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances2[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances2[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances2[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances2[0][3], 0.1), 'Distance 3 not correct.'
def test_knn_two_dim():
"""Test kNN search in 2D."""
theiler_t = 0
n_points = 4
n_dims = 2
knn_k = 1
n_chunks = 1
pointset = np.array([-1, 0.5, 1.1, 2,
-1, 0.5, 1.1, 2]).astype('float32')
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 2, 'Index 1 not correct.'
assert indexes[0][2] == 1, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances[0][3], 0.9), 'Distances 3 not correct.'
# Call high-level function.
pointset2 = pointset.reshape((n_points, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 2, 'Index 1 not correct.'
assert indexes2[0][2] == 1, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances2[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances2[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances2[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances2[0][3], 0.9), 'Distances 3 not correct.'
def test_one_dim_longer_sequence():
"""Test kNN search in 1D."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 1
pointset = np.array([-1, -1.2, 1, 1.1, 10, 11, 10.5, -100, -50, 666]).astype('float32')
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.1), 'Distance 3 not correct.'
# Call high-level function.
(indexes2, distances2) = knn_search(np.expand_dims(pointset, axis=1),
np.expand_dims(pointset, axis=1),
knn_k, theiler_t, n_chunks, gpu_id)
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.1), 'Distance 3 not correct.'
def test_two_dim_longer_sequence():
"""Test kNN with longer sequences.
Note:
The expected results differ from the C++ unit tests because we use the
maximum norm when searching for neighbours.
"""
theiler_t = 0
n_points = 10
n_dims = 2
knn_k = 1
n_chunks = 1
gpu_id = 0
# This is the same sequence as in the previous test case, padded with a
# bunch of points very far away.
pointset = np.array([-1, 0.5, 1.1, 2, 10, 11, 10.5, -100, -50, 666,
-1, 0.5, 1.1, 2, 98, -9, -200, 45.3, -53, 0.1]).astype('float32')
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 2, 'Index 1 not correct.'
assert indexes[0][2] == 1, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances[0][3], 0.9), 'Distances 3 not correct.'
# Call high-level function.
pointset2 = pointset.reshape((n_points, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 2, 'Index 1 not correct.'
assert indexes[0][2] == 1, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances[0][3], 0.9), 'Distances 3 not correct.'
def test_random_data():
"""Smoke kNN test with big random dataset"""
theiler_t = 0
n_points = 1000
n_dims = 5
knn_k = 4
n_chunks = 1
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
pointset = np.random.randn(n_points, n_dims).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
# Call high-level function.
pointset2 = pointset.reshape((n_points, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert np.all(indexes == indexes2), ('High- and low-level calls returned '
'different indices.')
assert np.all(distances == distances2), ('High- and low-level calls '
'returned different distances.')
def test_two_chunks():
"""Run knn search for two chunks."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 2
signal_length = n_points * n_chunks
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset = np.array([5, 6, -5, -7,
50, -50, 60, -70]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert indexes[0][4] == 2, 'Index 4 not correct.'
assert indexes[0][5] == 3, 'Index 5 not correct.'
assert indexes[0][6] == 0, 'Index 6 not correct.'
assert indexes[0][7] == 1, 'Index 7 not correct.'
assert np.isclose(distances[0][0], 1), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 1), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 2), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 2), 'Distance 3 not correct.'
assert np.isclose(distances[0][4], 10), 'Distance 4 not correct.'
assert np.isclose(distances[0][5], 20), 'Distance 5 not correct.'
assert np.isclose(distances[0][6], 10), 'Distance 6 not correct.'
assert np.isclose(distances[0][7], 20), 'Distance 7 not correct.'
# Call high-level function.
pointset2 = np.expand_dims(pointset, axis=1)
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 0, 'Index 1 not correct.'
assert indexes2[0][2] == 3, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert indexes2[0][4] == 2, 'Index 4 not correct.'
assert indexes2[0][5] == 3, 'Index 5 not correct.'
assert indexes2[0][6] == 0, 'Index 6 not correct.'
assert indexes2[0][7] == 1, 'Index 7 not correct.'
assert np.isclose(distances2[0][0], 1), 'Distance 0 not correct.'
assert np.isclose(distances2[0][1], 1), 'Distance 1 not correct.'
assert np.isclose(distances2[0][2], 2), 'Distance 2 not correct.'
assert np.isclose(distances2[0][3], 2), 'Distance 3 not correct.'
assert np.isclose(distances2[0][4], 10), 'Distance 4 not correct.'
assert np.isclose(distances2[0][5], 20), 'Distance 5 not correct.'
assert np.isclose(distances2[0][6], 10), 'Distance 6 not correct.'
assert np.isclose(distances2[0][7], 20), 'Distance 7 not correct.'
def test_three_chunks():
"""Run knn search for three chunks."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 3
signal_length = n_points*n_chunks
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset = np.array([5, 6, -5, -7,
50, -50, 60, -70,
500, -500, 600, -700]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 nor correct.'
assert indexes[0][1] == 0, 'Index 1 nor correct.'
assert indexes[0][2] == 3, 'Index 2 nor correct.'
assert indexes[0][3] == 2, 'Index 3 nor correct.'
assert indexes[0][4] == 2, 'Index 4 nor correct.'
assert indexes[0][5] == 3, 'Index 5 nor correct.'
assert indexes[0][6] == 0, 'Index 6 nor correct.'
assert indexes[0][7] == 1, 'Index 7 nor correct.'
assert indexes[0][8] == 2, 'Index 8 nor correct.'
assert indexes[0][9] == 3, 'Index 9 nor correct.'
assert indexes[0][10] == 0, 'Index 10 nor correct.'
assert indexes[0][11] == 1, 'Index 11 nor correct.'
assert np.isclose(distances[0][0], 1), 'Distance 0 is not correct.'
assert np.isclose(distances[0][1], 1), 'Distance 1 is not correct.'
assert np.isclose(distances[0][2], 2), 'Distance 2 is not correct.'
assert np.isclose(distances[0][3], 2), 'Distance 3 is not correct.'
assert np.isclose(distances[0][4], 10), 'Distance 4 is not correct.'
assert np.isclose(distances[0][5], 20), 'Distance 5 is not correct.'
assert np.isclose(distances[0][6], 10), 'Distance 6 is not correct.'
assert np.isclose(distances[0][7], 20), 'Distance 7 is not correct.'
assert np.isclose(distances[0][8], 100), 'Distance 8 is not correct.'
assert np.isclose(distances[0][9], 200), 'Distance 9 is not correct.'
assert np.isclose(distances[0][10], 100), 'Distance 10 is not correct.'
assert np.isclose(distances[0][11], 200), 'Distance 11 is not correct.'
# Call high-level function.
pointset2 = np.expand_dims(pointset, axis=1)
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 nor correct.'
assert indexes2[0][1] == 0, 'Index 1 nor correct.'
assert indexes2[0][2] == 3, 'Index 2 nor correct.'
assert indexes2[0][3] == 2, 'Index 3 nor correct.'
assert indexes2[0][4] == 2, 'Index 4 nor correct.'
assert indexes2[0][5] == 3, 'Index 5 nor correct.'
assert indexes2[0][6] == 0, 'Index 6 nor correct.'
assert indexes2[0][7] == 1, 'Index 7 nor correct.'
assert indexes2[0][8] == 2, 'Index 8 nor correct.'
assert indexes2[0][9] == 3, 'Index 9 nor correct.'
assert indexes2[0][10] == 0, 'Index 10 nor correct.'
assert indexes2[0][11] == 1, 'Index 11 nor correct.'
assert np.isclose(distances2[0][0], 1), 'Distance 0 is not correct.'
assert np.isclose(distances2[0][1], 1), 'Distance 1 is not correct.'
assert np.isclose(distances2[0][2], 2), 'Distance 2 is not correct.'
assert np.isclose(distances2[0][3], 2), 'Distance 3 is not correct.'
assert np.isclose(distances2[0][4], 10), 'Distance 4 is not correct.'
assert np.isclose(distances2[0][5], 20), 'Distance 5 is not correct.'
assert np.isclose(distances2[0][6], 10), 'Distance 6 is not correct.'
assert np.isclose(distances2[0][7], 20), 'Distance 7 is not correct.'
assert np.isclose(distances2[0][8], 100), 'Distance 8 is not correct.'
assert np.isclose(distances2[0][9], 200), 'Distance 9 is not correct.'
assert np.isclose(distances2[0][10], 100), 'Distance 10 is not correct.'
assert np.isclose(distances2[0][11], 200), 'Distance 11 is not correct.'
def test_two_chunks_two_dim():
"""Test kNN with two chunks of 2D data in the same call."""
theiler_t = 0
n_points = 4
n_dims = 2
knn_k = 1
n_chunks = 2
gpu_id = 0
signal_length = n_points * n_chunks
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
# Points: X Y y
# 1 1 | o o
# 1.1 1 |
# -1 -1 ----+----x
# -1.2 -1 |
# o o |
pointset = np.array([1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2,
1, 1, -1, -1, 1, 1, -1, -1]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert indexes[0][4] == 1, 'Index 4 not correct.'
assert indexes[0][5] == 0, 'Index 5 not correct.'
assert indexes[0][6] == 3, 'Index 6 not correct.'
assert indexes[0][7] == 2, 'Index 7 not correct.'
assert np.isclose(distances[0][0], 0.1), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.1), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.2), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.2), 'Distance 3 not correct.'
assert np.isclose(distances[0][4], 0.1), 'Distance 4 not correct.'
assert np.isclose(distances[0][5], 0.1), 'Distance 5 not correct.'
assert np.isclose(distances[0][6], 0.2), 'Distance 6 not correct.'
assert np.isclose(distances[0][7], 0.2), 'Distance 7 not correct.'
# Call high-level function.
pointset2 = pointset.reshape((signal_length, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 0, 'Index 1 not correct.'
assert indexes2[0][2] == 3, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert indexes2[0][4] == 1, 'Index 4 not correct.'
assert indexes2[0][5] == 0, 'Index 5 not correct.'
assert indexes2[0][6] == 3, 'Index 6 not correct.'
assert indexes2[0][7] == 2, 'Index 7 not correct.'
assert np.isclose(distances2[0][0], 0.1), 'Distance 0 not correct.'
assert np.isclose(distances2[0][1], 0.1), 'Distance 1 not correct.'
assert np.isclose(distances2[0][2], 0.2), 'Distance 2 not correct.'
assert np.isclose(distances2[0][3], 0.2), 'Distance 3 not correct.'
assert np.isclose(distances2[0][4], 0.1), 'Distance 4 not correct.'
assert np.isclose(distances2[0][5], 0.1), 'Distance 5 not correct.'
assert np.isclose(distances2[0][6], 0.2), 'Distance 6 not correct.'
assert np.isclose(distances2[0][7], 0.2), 'Distance 7 not correct.'
def test_two_chunks_odd_dim():
"""Test kNN with two chunks of data with odd dimension."""
theiler_t = 0
n_points = 4
n_dims = 3
knn_k = 1
n_chunks = 2
gpu_id = 0
signal_length = n_points * n_chunks
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
# Points: X Y Z y
# 1 1 1.02 | o o
# 1.1 1 1.03 |
# -1 -1 -1.04 ----+----x
# -1.2 -1 -1.05 |
# o o |
pointset = np.array([1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2,
1, 1, -1, -1, 1, 1, -1, -1,
1.02, 1.03, 1.04, 1.05, 1.02, 1.03, 1.04, 1.05]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 is not correct.'
assert indexes[0][1] == 0, 'Index 1 is not correct.'
assert indexes[0][2] == 3, 'Index 2 is not correct.'
assert indexes[0][3] == 2, 'Index 3 is not correct.'
assert indexes[0][4] == 1, 'Index 4 is not correct.'
assert indexes[0][5] == 0, 'Index 5 is not correct.'
assert indexes[0][6] == 3, 'Index 6 is not correct.'
assert indexes[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances[0][7], 0.2), 'Distance 7 ist not correct.'
# Call high-level function.
pointset2 = pointset.reshape((signal_length, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 is not correct.'
assert indexes2[0][1] == 0, 'Index 1 is not correct.'
assert indexes2[0][2] == 3, 'Index 2 is not correct.'
assert indexes2[0][3] == 2, 'Index 3 is not correct.'
assert indexes2[0][4] == 1, 'Index 4 is not correct.'
assert indexes2[0][5] == 0, 'Index 5 is not correct.'
assert indexes2[0][6] == 3, 'Index 6 is not correct.'
assert indexes2[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances2[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances2[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances2[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances2[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances2[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances2[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances2[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances2[0][7], 0.2), 'Distance 7 ist not correct.'
def test_one_dim_two_dim_arg():
"""Test kNN with two chunks of data with odd dimension."""
theiler_t = 0
n_points = 4
n_dims = 3
knn_k = 1
n_chunks = 2
gpu_id = 0
signal_length = n_points * n_chunks
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
# Points: X Y Z y
# 1 1 1.02 | o o
# 1.1 1 1.03 |
# -1 -1 -1.04 ----+----x
# -1.2 -1 -1.05 |
# o o |
pointset = np.array([1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2,
1, 1, -1, -1, 1, 1, -1, -1,
1.02, 1.03, 1.04, 1.05, 1.02, 1.03, 1.04, 1.05]).astype('float32')
# Call low-level function with 1D numpy array. Numpy arranges data in
# C-order (row major) by default. This is what's expected by CUDA/pyopencl.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 is not correct.'
assert indexes[0][1] == 0, 'Index 1 is not correct.'
assert indexes[0][2] == 3, 'Index 2 is not correct.'
assert indexes[0][3] == 2, 'Index 3 is not correct.'
assert indexes[0][4] == 1, 'Index 4 is not correct.'
assert indexes[0][5] == 0, 'Index 5 is not correct.'
assert indexes[0][6] == 3, 'Index 6 is not correct.'
assert indexes[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances[0][7], 0.2), 'Distance 7 ist not correct.'
# Call low-level function with 2D numpy array. Transposing doesn't change
# anything about the memory layout.
indexes2 = np.zeros((knn_k, signal_length), dtype=np.int32)
distances2 = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset2 = pointset.reshape((signal_length, n_dims)).copy()
err = cudaFindKnnSetGPU(indexes2, distances2, pointset2, pointset2, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes2[0][0] == 1, 'Index 0 is not correct.'
assert indexes2[0][1] == 0, 'Index 1 is not correct.'
assert indexes2[0][2] == 3, 'Index 2 is not correct.'
assert indexes2[0][3] == 2, 'Index 3 is not correct.'
assert indexes2[0][4] == 1, 'Index 4 is not correct.'
assert indexes2[0][5] == 0, 'Index 5 is not correct.'
assert indexes2[0][6] == 3, 'Index 6 is not correct.'
assert indexes2[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances2[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances2[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances2[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances2[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances2[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances2[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances2[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances2[0][7], 0.2), 'Distance 7 ist not correct.'
# Call low-level function with 2D numpy array in Fortran order.
indexes3 = np.zeros((knn_k, signal_length), dtype=np.int32)
distances3 = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset3 = np.asfortranarray(pointset2)
print(pointset3.flags['C_CONTIGUOUS'])
with pytest.raises(AssertionError):
cudaFindKnnSetGPU(indexes3, distances3, pointset3, pointset3, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
if __name__ == '__main__':
test_one_dim_two_dim_arg()
test_one_dim_two_dim_arg()
test_two_chunks_odd_dim()
test_two_chunks_odd_dim()
test_two_chunks_two_dim()
test_two_chunks()
test_three_chunks()
test_random_data()
test_one_dim_longer_sequence
test_two_dim_longer_sequence()
test_knn_one_dim()
test_knn_two_dim()
| gpl-3.0 |
pidydx/grr | grr/lib/flows/general/audit.py | 1 | 2003 | #!/usr/bin/env python
"""This implements the auditing system.
How does it work?
Noteworthy events within the GRR system (such as approval granting, flow
execution etc) generate events to notify listeners about the event.
The audit system consists of a group of event listeners which receive these
events and act upon them.
"""
from grr.lib import aff4
from grr.lib import events
from grr.lib import flow
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import sequential_collection
AUDIT_EVENT = "Audit"
class AuditEventCollection(sequential_collection.IndexedSequentialCollection):
RDF_TYPE = events.AuditEvent
def AllAuditLogs(token=None):
# TODO(user): This is not great, we should store this differently.
for log in aff4.FACTORY.Open("aff4:/audit/logs", token=token).ListChildren():
yield AuditEventCollection(log, token=token)
def AuditLogsForTimespan(start_time, end_time, token=None):
# TODO(user): This is not great, we should store this differently.
for log in aff4.FACTORY.Open(
"aff4:/audit/logs", token=token).ListChildren(age=(start_time, end_time)):
yield AuditEventCollection(log, token=token)
class AuditEventListener(flow.EventListener):
"""Receive the audit events."""
well_known_session_id = rdfvalue.SessionID(
base="aff4:/audit", queue=queues.FLOWS, flow_name="listener")
EVENTS = [AUDIT_EVENT]
created_logs = set()
def EnsureLogIsIndexed(self, log_urn):
if log_urn not in self.created_logs:
# Just write any type to the aff4 space so we can determine
# which audit logs exist easily.
aff4.FACTORY.Create(
log_urn, aff4.AFF4Volume, mode="w", token=self.token).Close()
self.created_logs.add(log_urn)
return log_urn
@flow.EventHandler(auth_required=False)
def ProcessMessage(self, message=None, event=None):
_ = message
log_urn = aff4.CurrentAuditLog()
self.EnsureLogIsIndexed(log_urn)
AuditEventCollection.StaticAdd(log_urn, self.token, event)
| apache-2.0 |
jmmease/pandas | pandas/tests/tseries/test_timezones.py | 2 | 69288 | # pylint: disable-msg=E1101,W0612
import pytest
import pytz
import dateutil
import numpy as np
from dateutil.parser import parse
from pytz import NonExistentTimeError
from distutils.version import LooseVersion
from dateutil.tz import tzlocal, tzoffset
from datetime import datetime, timedelta, tzinfo, date
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
from pandas.compat import lrange, zip
from pandas.core.indexes.datetimes import bdate_range, date_range
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas._libs import tslib
from pandas._libs.tslibs import timezones
from pandas import (Index, Series, DataFrame, isna, Timestamp, NaT,
DatetimeIndex, to_datetime)
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
set_timezone)
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, '-07:00')
fixed_off_no_name = FixedOffset(-330, None)
class TestTimeZoneSupportPytz(object):
def tz(self, tz):
# Construct a timezone object from a string. Overridden in subclass to
# parameterize tests.
return pytz.timezone(tz)
def tzstr(self, tz):
# Construct a timezone string from a string. Overridden in subclass to
# parameterize tests.
return tz
def localize(self, tz, x):
return tz.localize(x)
def cmptz(self, tz1, tz2):
# Compare two timezones. Overridden in subclass to parameterize
# tests.
return tz1.zone == tz2.zone
def test_utc_to_local_no_modify(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# Values are unmodified
assert np.array_equal(rng.asi8, rng_eastern.asi8)
assert self.cmptz(rng_eastern.tz, self.tz('US/Eastern'))
def test_utc_to_local_no_modify_explicit(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tz('US/Eastern'))
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert rng_eastern.tz == self.tz('US/Eastern')
def test_localize_utc_conversion(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(self.tzstr('US/Eastern'))
expected_naive = rng + offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
pytest.raises(NonExistentTimeError, rng.tz_localize,
self.tzstr('US/Eastern'))
def test_localize_utc_conversion_explicit(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(self.tz('US/Eastern'))
expected_naive = rng + offsets.Hour(5)
assert np.array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
pytest.raises(NonExistentTimeError, rng.tz_localize,
self.tz('US/Eastern'))
def test_timestamp_tz_localize(self):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(self.tzstr('US/Eastern'))
expected = Timestamp('3/11/2012 04:00', tz=self.tzstr('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_tz_localize_explicit(self):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(self.tz('US/Eastern'))
expected = Timestamp('3/11/2012 04:00', tz=self.tz('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_constructed_by_date_and_tz(self):
# Fix Issue 2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=self.tzstr('US/Eastern'))
expected = Timestamp('3/11/2012', tz=self.tzstr('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_constructed_by_date_and_tz_explicit(self):
# Fix Issue 2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=self.tz('US/Eastern'))
expected = Timestamp('3/11/2012', tz=self.tz('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_constructor_near_dst_boundary(self):
# GH 11481 & 15777
# Naive string timestamps were being localized incorrectly
# with tz_convert_single instead of tz_localize_to_utc
for tz in ['Europe/Brussels', 'Europe/Prague']:
result = Timestamp('2015-10-25 01:00', tz=tz)
expected = Timestamp('2015-10-25 01:00').tz_localize(tz)
assert result == expected
with pytest.raises(pytz.AmbiguousTimeError):
Timestamp('2015-10-25 02:00', tz=tz)
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
# GH 11708
result = to_datetime("2015-11-18 15:30:00+05:30").tz_localize(
'UTC').tz_convert('Asia/Kolkata')
expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')
assert result == expected
# GH 15823
result = Timestamp('2017-03-26 00:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris')
expected = Timestamp(result.value).tz_localize(
'UTC').tz_convert('Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 03:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris')
assert result == expected
def test_timestamp_to_datetime_tzoffset(self):
tzinfo = tzoffset(None, 7200)
expected = Timestamp('3/11/2012 04:00', tz=tzinfo)
result = Timestamp(expected.to_pydatetime())
assert expected == result
def test_timedelta_push_over_dst_boundary(self):
# #1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=self.tzstr('US/Eastern'))
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))
assert result == expected
def test_timedelta_push_over_dst_boundary_explicit(self):
# #1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=self.tz('US/Eastern'))
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=self.tz('US/Eastern'))
assert result == expected
def test_tz_localize_dti(self):
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize(self.tzstr('US/Eastern'))
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L', tz='utc')
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(self.tzstr('US/Pacific'))
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00',
freq='L')
pytest.raises(pytz.AmbiguousTimeError, dti.tz_localize,
self.tzstr('US/Eastern'))
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
pytest.raises(pytz.NonExistentTimeError, dti.tz_localize,
self.tzstr('US/Eastern'))
def test_tz_localize_empty_series(self):
# #2248
ts = Series()
ts2 = ts.tz_localize('utc')
assert ts2.index.tz == pytz.utc
ts2 = ts.tz_localize(self.tzstr('US/Eastern'))
assert self.cmptz(ts2.index.tz, self.tz('US/Eastern'))
def test_astimezone(self):
utc = Timestamp('3/11/2012 22:00', tz='UTC')
expected = utc.tz_convert(self.tzstr('US/Eastern'))
result = utc.astimezone(self.tzstr('US/Eastern'))
assert expected == result
assert isinstance(result, Timestamp)
def test_create_with_tz(self):
stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))
assert stamp.hour == 5
rng = date_range('3/11/2012 04:00', periods=10, freq='H',
tz=self.tzstr('US/Eastern'))
assert stamp == rng[1]
utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')
assert utc_stamp.tzinfo is pytz.utc
assert utc_stamp.hour == 5
utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')
assert utc_stamp.hour == 5
def test_create_with_fixed_tz(self):
off = FixedOffset(420, '+07:00')
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range('3/11/2012 05:00:00+07:00',
'6/11/2012 05:00:00+07:00')
assert (rng.values == rng3.values).all()
def test_create_with_fixedoffset_noname(self):
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = Index([start, end])
assert off == idx.tz
def test_date_range_localize(self):
rng = date_range('3/11/2012 03:00', periods=15, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'],
tz='US/Eastern')
rng3 = date_range('3/11/2012 03:00', periods=15, freq='H')
rng3 = rng3.tz_localize('US/Eastern')
tm.assert_index_equal(rng, rng3)
# DST transition time
val = rng[0]
exp = Timestamp('3/11/2012 03:00', tz='US/Eastern')
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range('3/11/2012 00:00', periods=2, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'],
tz='US/Eastern')
tm.assert_index_equal(rng, rng2)
exp = Timestamp('3/11/2012 00:00', tz='US/Eastern')
assert exp.hour == 0
assert rng[0] == exp
exp = Timestamp('3/11/2012 01:00', tz='US/Eastern')
assert exp.hour == 1
assert rng[1] == exp
rng = date_range('3/11/2012 00:00', periods=10, freq='H',
tz='US/Eastern')
assert rng[2].hour == 3
def test_utc_box_timestamp_and_localize(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
tz = self.tz('US/Eastern')
expected = rng[-1].astimezone(tz)
stamp = rng_eastern[-1]
assert stamp == expected
assert stamp.tzinfo == expected.tzinfo
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# test not valid for dateutil timezones.
# assert 'EDT' in repr(rng_eastern[0].tzinfo)
assert ('EDT' in repr(rng_eastern[0].tzinfo) or
'tzfile' in repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
conv = idx[0].tz_convert(self.tzstr('US/Pacific'))
expected = idx.tz_convert(self.tzstr('US/Pacific'))[0]
assert conv == expected
def test_pass_dates_localize_to_utc(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(self.tzstr('US/Eastern'))
fromdates = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
assert (rng.hour == 0).all()
# a more unusual time zone, #1946
dr = date_range('2011-10-02 00:00', freq='h', periods=10,
tz=self.tzstr('America/Atikokan'))
expected = Index(np.arange(10, dtype=np.int64))
tm.assert_index_equal(dr.hour, expected)
def test_with_tz(self):
tz = self.tz('US/Central')
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=offsets.Hour())
assert dr.tz is pytz.utc
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
assert central.tz is tz
comp = self.localize(tz, central[0].to_pydatetime().replace(
tzinfo=None)).tzinfo
assert central[0].tz is comp
# compare vs a localized tz
comp = self.localize(tz,
dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo
assert central[0].tz is comp
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=pytz.utc)
pytest.raises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009',
tz=tz)
def test_tz_localize(self):
dr = bdate_range('1/1/2009', '1/1/2010')
dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
def test_with_tz_ambiguous_times(self):
tz = self.tz('US/Eastern')
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=offsets.Hour())
pytest.raises(pytz.NonExistentTimeError, dr.tz_localize, tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=offsets.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=offsets.Hour())
pytest.raises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=offsets.Minute(30), tz=pytz.utc)
def test_ambiguous_infer(self):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
tz = self.tz('US/Eastern')
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour())
pytest.raises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# With repeated hours, we can infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='infer')
tm.assert_index_equal(dr, localized)
with tm.assert_produces_warning(FutureWarning):
localized_old = di.tz_localize(tz, infer_dst=True)
tm.assert_index_equal(dr, localized_old)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous='infer'))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous='infer')
tm.assert_index_equal(localized, localized_infer)
with tm.assert_produces_warning(FutureWarning):
localized_infer_old = dr.tz_localize(tz, infer_dst=True)
tm.assert_index_equal(localized, localized_infer_old)
def test_ambiguous_flags(self):
# November 6, 2011, fall back, repeat 2 AM hour
tz = self.tz('US/Eastern')
# Pass in flags to determine right dst transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz,
ambiguous=np.array(is_dst).astype('bool'))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where infer_dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
pytest.raises(Exception, di.tz_localize, tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# construction with an ambiguous end-point
# GH 11626
tz = self.tzstr("Europe/London")
def f():
date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London", freq="H")
pytest.raises(pytz.AmbiguousTimeError, f)
times = date_range("2013-10-26 23:00", "2013-10-27 01:00", freq="H",
tz=tz, ambiguous='infer')
assert times[0] == Timestamp('2013-10-26 23:00', tz=tz, freq="H")
if str(tz).startswith('dateutil'):
if dateutil.__version__ < LooseVersion('2.6.0'):
# see gh-14621
assert times[-1] == Timestamp('2013-10-27 01:00:00+0000',
tz=tz, freq="H")
elif dateutil.__version__ > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert times[-1] == Timestamp('2013-10-27 01:00:00+0100',
tz=tz, freq="H")
else:
assert times[-1] == Timestamp('2013-10-27 01:00:00+0000',
tz=tz, freq="H")
def test_ambiguous_nat(self):
tz = self.tz('US/Eastern')
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='NaT')
times = ['11/06/2011 00:00', np.NaN, np.NaN, '11/06/2011 02:00',
'11/06/2011 03:00']
di_test = DatetimeIndex(times, tz='US/Eastern')
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
def test_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# gh-14402
t = Timestamp('2015-11-01 01:00:03')
expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
def f():
t.tz_localize('US/Central')
pytest.raises(pytz.AmbiguousTimeError, f)
result = t.tz_localize('US/Central', ambiguous=True)
assert result == expected0
result = t.tz_localize('US/Central', ambiguous=False)
assert result == expected1
s = Series([t])
expected0 = Series([expected0])
expected1 = Series([expected1])
def f():
s.dt.tz_localize('US/Central')
pytest.raises(pytz.AmbiguousTimeError, f)
result = s.dt.tz_localize('US/Central', ambiguous=True)
assert_series_equal(result, expected0)
result = s.dt.tz_localize('US/Central', ambiguous=[True])
assert_series_equal(result, expected0)
result = s.dt.tz_localize('US/Central', ambiguous=False)
assert_series_equal(result, expected1)
result = s.dt.tz_localize('US/Central', ambiguous=[False])
assert_series_equal(result, expected1)
def test_nonexistent_raise_coerce(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
times = ['2015-03-08 01:00', '2015-03-08 02:00', '2015-03-08 03:00']
index = DatetimeIndex(times)
tz = 'US/Eastern'
pytest.raises(NonExistentTimeError,
index.tz_localize, tz=tz)
pytest.raises(NonExistentTimeError,
index.tz_localize, tz=tz, errors='raise')
result = index.tz_localize(tz=tz, errors='coerce')
test_times = ['2015-03-08 01:00-05:00', 'NaT',
'2015-03-08 03:00-04:00']
expected = DatetimeIndex(test_times)\
.tz_localize('UTC').tz_convert('US/Eastern')
tm.assert_index_equal(result, expected)
# test utility methods
def test_infer_tz(self):
eastern = self.tz('US/Eastern')
utc = pytz.utc
_start = datetime(2001, 1, 1)
_end = datetime(2009, 1, 1)
start = self.localize(eastern, _start)
end = self.localize(eastern, _end)
assert (timezones.infer_tzinfo(start, end) is
self.localize(eastern, _start).tzinfo)
assert (timezones.infer_tzinfo(start, None) is
self.localize(eastern, _start).tzinfo)
assert (timezones.infer_tzinfo(None, end) is
self.localize(eastern, _end).tzinfo)
start = utc.localize(_start)
end = utc.localize(_end)
assert (timezones.infer_tzinfo(start, end) is utc)
end = self.localize(eastern, _end)
pytest.raises(Exception, timezones.infer_tzinfo, start, end)
pytest.raises(Exception, timezones.infer_tzinfo, end, start)
def test_tz_string(self):
result = date_range('1/1/2000', periods=10,
tz=self.tzstr('US/Eastern'))
expected = date_range('1/1/2000', periods=10, tz=self.tz('US/Eastern'))
tm.assert_index_equal(result, expected)
def test_take_dont_lose_meta(self):
rng = date_range('1/1/2000', periods=20, tz=self.tzstr('US/Eastern'))
result = rng.take(lrange(5))
assert result.tz == rng.tz
assert result.freq == rng.freq
def test_index_with_timezone_repr(self):
rng = date_range('4/13/2010', '5/6/2010')
rng_eastern = rng.tz_localize(self.tzstr('US/Eastern'))
rng_repr = repr(rng_eastern)
assert '2010-04-13 00:00:00' in rng_repr
def test_index_astype_asobject_tzinfos(self):
# #1345
# dates around a dst transition
rng = date_range('2/13/2010', '5/6/2010', tz=self.tzstr('US/Eastern'))
objs = rng.asobject
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
def test_localized_at_time_between_time(self):
from datetime import time
rng = date_range('4/16/2012', '5/1/2012', freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(self.tzstr('US/Eastern'))
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(
'US/Eastern'))
assert_series_equal(result, expected)
assert self.cmptz(result.index.tz, self.tz('US/Eastern'))
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1,
t2).tz_localize(self.tzstr('US/Eastern'))
assert_series_equal(result, expected)
assert self.cmptz(result.index.tz, self.tz('US/Eastern'))
def test_string_index_alias_tz_aware(self):
rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern'))
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts['1/3/2000']
tm.assert_almost_equal(result, ts[2])
def test_fixed_offset(self):
dates = [datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)]
result = to_datetime(dates)
assert result.tz == fixed_off
def test_fixedtz_topydatetime(self):
dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)])
result = to_datetime(dates).to_pydatetime()
tm.assert_numpy_array_equal(dates, result)
result = to_datetime(dates)._mpl_repr()
tm.assert_numpy_array_equal(dates, result)
def test_convert_tz_aware_datetime_datetime(self):
# #1581
tz = self.tz('US/Eastern')
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)]
dates_aware = [self.localize(tz, x) for x in dates]
result = to_datetime(dates_aware)
assert self.cmptz(result.tz, self.tz('US/Eastern'))
converted = to_datetime(dates_aware, utc=True)
ex_vals = np.array([Timestamp(x).value for x in dates_aware])
tm.assert_numpy_array_equal(converted.asi8, ex_vals)
assert converted.tz is pytz.utc
def test_to_datetime_utc(self):
arr = np.array([parse('2012-06-13T01:39:00Z')], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_tzlocal(self):
dt = parse('2012-06-13T01:39:00Z')
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_frame_no_datetime64_dtype(self):
# after 7822
# these retain the timezones on dict construction
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))
e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)
tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo)
assert e['B'].dtype == tz_expected
# GH 2810 (with timezones)
datetimes_naive = [ts.to_pydatetime() for ts in dr]
datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]
df = DataFrame({'dr': dr,
'dr_tz': dr_tz,
'datetimes_naive': datetimes_naive,
'datetimes_with_tz': datetimes_with_tz})
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
# #1673
dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong')
# it works!
dr.hour
def test_tz_convert_unsorted(self):
dr = date_range('2012-03-09', freq='H', periods=100, tz='utc')
dr = dr.tz_convert(self.tzstr('US/Eastern'))
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
def test_shift_localized(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))
result = dr_tz.shift(1, '10T')
assert result.tz == dr_tz.tz
def test_tz_aware_asfreq(self):
dr = date_range('2011-12-01', '2012-07-20', freq='D',
tz=self.tzstr('US/Eastern'))
s = Series(np.random.randn(len(dr)), index=dr)
# it works!
s.asfreq('T')
def test_static_tzinfo(self):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST'))
index.hour
index[0]
def test_tzaware_datetime_to_index(self):
d = [datetime(2012, 8, 19, tzinfo=self.tz('US/Eastern'))]
index = DatetimeIndex(d)
assert self.cmptz(index.tz, self.tz('US/Eastern'))
def test_date_range_span_dst_transition(self):
# #1778
# Standard -> Daylight Savings Time
dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI',
tz='US/Eastern')
assert (dr.hour == 0).all()
dr = date_range('2012-11-02', periods=10, tz=self.tzstr('US/Eastern'))
assert (dr.hour == 0).all()
def test_convert_datetime_list(self):
dr = date_range('2012-06-02', periods=10,
tz=self.tzstr('US/Eastern'), name='foo')
dr2 = DatetimeIndex(list(dr), name='foo')
tm.assert_index_equal(dr, dr2)
assert dr.tz == dr2.tz
assert dr2.name == 'foo'
def test_frame_from_records_utc(self):
rec = {'datum': 1.5,
'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index='begin_time')
def test_frame_reset_index(self):
dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern'))
df = DataFrame(np.random.randn(len(dr)), dr)
roundtripped = df.reset_index().set_index('index')
xp = df.index.tz
rs = roundtripped.index.tz
assert xp == rs
def test_dateutil_tzoffset_support(self):
values = [188.5, 328.25]
tzinfo = tzoffset(None, 7200)
index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo),
datetime(2012, 5, 11, 12, tzinfo=tzinfo)]
series = Series(data=values, index=index)
assert series.index.tz == tzinfo
# it works! #2443
repr(series.index[0])
def test_getitem_pydatetime_tz(self):
index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00',
freq='H', tz=self.tzstr('Europe/Berlin'))
ts = Series(index=index, data=index.hour)
time_pandas = Timestamp('2012-12-24 17:00',
tz=self.tzstr('Europe/Berlin'))
time_datetime = self.localize(
self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0))
assert ts[time_pandas] == ts[time_datetime]
def test_index_drop_dont_lose_tz(self):
# #2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_datetimeindex_tz(self):
""" Test different DatetimeIndex constructions with timezone
Follow-up of #4229
"""
arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00']
idx1 = to_datetime(arr).tz_localize(self.tzstr('US/Eastern'))
idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2,
tz=self.tzstr('US/Eastern'))
idx3 = DatetimeIndex(arr, tz=self.tzstr('US/Eastern'))
idx4 = DatetimeIndex(np.array(arr), tz=self.tzstr('US/Eastern'))
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
def test_datetimeindex_tz_nat(self):
idx = to_datetime([Timestamp("2013-1-1", tz=self.tzstr('US/Eastern')),
NaT])
assert isna(idx[1])
assert idx[0].tzinfo is not None
class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz):
def tz(self, tz):
"""
Construct a dateutil timezone.
Use tslib.maybe_get_tz so that we get the filename on the tz right
on windows. See #7337.
"""
return timezones.maybe_get_tz('dateutil/' + tz)
def tzstr(self, tz):
""" Construct a timezone string from a string. Overridden in subclass
to parameterize tests. """
return 'dateutil/' + tz
def cmptz(self, tz1, tz2):
""" Compare two timezones. Overridden in subclass to parameterize
tests. """
return tz1 == tz2
def localize(self, tz, x):
return x.replace(tzinfo=tz)
def test_utc_with_system_utc(self):
# Skipped on win32 due to dateutil bug
tm._skip_if_windows()
from pandas._libs.tslibs.timezones import maybe_get_tz
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
def test_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2009-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2009-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2008-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2008-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
def test_tz_convert_hour_overflow_dst_timestamps(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
tz = self.tzstr('US/Eastern')
# sorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2009-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2009-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2008-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2008-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
def test_tslib_tz_convert_trans_pos_plus_1__bug(self):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
idx = date_range(datetime(2011, 3, 26, 23),
datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize('UTC')
idx = idx.tz_convert('Europe/Moscow')
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_tslib_tz_convert_dst(self):
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
# Start DST
idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([18, 19, 20, 21, 22, 23,
0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([19, 20, 21, 22, 23,
0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n,
n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tzlocal(self):
# GH 13583
ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal())
assert ts.tz == dateutil.tz.tzlocal()
assert "tz='tzlocal()')" in repr(ts)
tz = timezones.maybe_get_tz('tzlocal()')
assert tz == dateutil.tz.tzlocal()
# get offset using normal datetime for test
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = offset.total_seconds() * 1000000000
assert ts.value + offset == Timestamp('2011-01-01').value
def test_tz_localize_tzlocal(self):
# GH 13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start='2001-01-01', end='2001-03-01')
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
def test_tz_convert_tzlocal(self):
# GH 13583
# tz_convert doesn't affect to internal
dti = date_range(start='2001-01-01', end='2001-03-01', tz='UTC')
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
class TestTimeZoneCacheKey(object):
def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self):
tzs = pytz.common_timezones
for tz_name in tzs:
if tz_name == 'UTC':
# skip utc as it's a special case in dateutil
continue
tz_p = timezones.maybe_get_tz(tz_name)
tz_d = timezones.maybe_get_tz('dateutil/' + tz_name)
if tz_d is None:
# skip timezones that dateutil doesn't know about.
continue
assert (timezones._p_tz_cache_key(tz_p) !=
timezones._p_tz_cache_key(tz_d))
class TestTimeZones(object):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
def test_replace(self):
# GH 14621
# GH 7825
# replacing datetime components with and w/o presence of a timezone
dt = Timestamp('2016-01-01 09:00:00')
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00')
assert result == expected
for tz in self.timezones:
dt = Timestamp('2016-01-01 09:00:00', tz=tz)
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00', tz=tz)
assert result == expected
# we preserve nanoseconds
dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00.000000123', tz=tz)
assert result == expected
# test all
dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
result = dt.replace(year=2015, month=2, day=2, hour=0, minute=5,
second=5, microsecond=5, nanosecond=5)
expected = Timestamp('2015-02-02 00:05:05.000005005', tz=tz)
assert result == expected
# error
def f():
dt.replace(foo=5)
pytest.raises(TypeError, f)
def f():
dt.replace(hour=0.1)
pytest.raises(ValueError, f)
# assert conversion to naive is the same as replacing tzinfo with None
dt = Timestamp('2013-11-03 01:59:59.999999-0400', tz='US/Eastern')
assert dt.tz_localize(None) == dt.replace(tzinfo=None)
def test_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
pytz_zone = 'Europe/London'
dateutil_zone = 'dateutil/Europe/London'
result_pytz = (Timestamp('2013-10-27 01:00:00')
.tz_localize(pytz_zone, ambiguous=0))
result_dateutil = (Timestamp('2013-10-27 01:00:00')
.tz_localize(dateutil_zone, ambiguous=0))
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
if dateutil.__version__ < LooseVersion('2.6.0'):
# dateutil 2.6 buggy w.r.t. ambiguous=0
# see gh-14621
# see https://github.com/dateutil/dateutil/issues/321
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
assert str(result_pytz) == str(result_dateutil)
elif dateutil.__version__ > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert result_pytz.to_pydatetime().tzname() == 'GMT'
assert result_dateutil.to_pydatetime().tzname() == 'BST'
assert str(result_pytz) != str(result_dateutil)
# 1 hour difference
result_pytz = (Timestamp('2013-10-27 01:00:00')
.tz_localize(pytz_zone, ambiguous=1))
result_dateutil = (Timestamp('2013-10-27 01:00:00')
.tz_localize(dateutil_zone, ambiguous=1))
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382832000000000000
# dateutil < 2.6 is buggy w.r.t. ambiguous timezones
if dateutil.__version__ > LooseVersion('2.5.3'):
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
def test_replace_tzinfo(self):
# GH 15683
dt = datetime(2016, 3, 27, 1)
tzinfo = pytz.timezone('CET').localize(dt, is_dst=False).tzinfo
result_dt = dt.replace(tzinfo=tzinfo)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo)
if hasattr(result_dt, 'timestamp'): # New method in Py 3.3
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None)
if hasattr(result_dt, 'timestamp'): # New method in Py 3.3
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')
assert not left.equals(right)
def test_tz_localize_naive(self):
rng = date_range('1/1/2011', periods=100, freq='H')
conv = rng.tz_localize('US/Pacific')
exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific')
tm.assert_index_equal(conv, exp)
def test_tz_localize_roundtrip(self):
for tz in self.timezones:
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx in [idx1, idx2, idx3, idx4]:
localized = idx.tz_localize(tz)
expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq,
tz=tz)
tm.assert_index_equal(localized, expected)
with pytest.raises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
tm.assert_index_equal(reset, idx)
assert reset.tzinfo is None
def test_series_frame_tz_localize(self):
rng = date_range('1/1/2011', periods=100, freq='H')
ts = Series(1, index=rng)
result = ts.tz_localize('utc')
assert result.index.tz.zone == 'UTC'
df = DataFrame({'a': 1}, index=rng)
result = df.tz_localize('utc')
expected = DataFrame({'a': 1}, rng.tz_localize('UTC'))
assert result.index.tz.zone == 'UTC'
assert_frame_equal(result, expected)
df = df.T
result = df.tz_localize('utc', axis=1)
assert result.columns.tz.zone == 'UTC'
assert_frame_equal(result, expected.T)
# Can't localize if already tz-aware
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
ts = Series(1, index=rng)
tm.assert_raises_regex(TypeError, 'Already tz-aware',
ts.tz_localize, 'US/Eastern')
def test_series_frame_tz_convert(self):
rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern')
ts = Series(1, index=rng)
result = ts.tz_convert('Europe/Berlin')
assert result.index.tz.zone == 'Europe/Berlin'
df = DataFrame({'a': 1}, index=rng)
result = df.tz_convert('Europe/Berlin')
expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin'))
assert result.index.tz.zone == 'Europe/Berlin'
assert_frame_equal(result, expected)
df = df.T
result = df.tz_convert('Europe/Berlin', axis=1)
assert result.columns.tz.zone == 'Europe/Berlin'
assert_frame_equal(result, expected.T)
# can't convert tz-naive
rng = date_range('1/1/2011', periods=200, freq='D')
ts = Series(1, index=rng)
tm.assert_raises_regex(TypeError, "Cannot convert tz-naive",
ts.tz_convert, 'US/Eastern')
def test_tz_convert_roundtrip(self):
for tz in self.timezones:
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M',
tz='UTC')
exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D',
tz='UTC')
exp2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H',
tz='UTC')
exp3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T',
tz='UTC')
exp4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3),
(idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
tm.assert_index_equal(reset, converted.tz_convert(
'UTC').tz_localize(None))
def test_join_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng.tz_convert('US/Eastern')
right = rng.tz_convert('Europe/Berlin')
for how in ['inner', 'outer', 'left', 'right']:
result = left.join(left[:-5], how=how)
assert isinstance(result, DatetimeIndex)
assert result.tz == left.tz
result = left.join(right[:-5], how=how)
assert isinstance(result, DatetimeIndex)
assert result.tz.zone == 'UTC'
def test_join_aware(self):
rng = date_range('1/1/2011', periods=10, freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_utc = ts.tz_localize('utc')
pytest.raises(Exception, ts.__add__, ts_utc)
pytest.raises(Exception, ts_utc.__add__, ts)
test1 = DataFrame(np.zeros((6, 3)),
index=date_range("2012-11-15 00:00:00", periods=6,
freq="100L", tz="US/Central"))
test2 = DataFrame(np.zeros((3, 3)),
index=date_range("2012-11-15 00:00:00", periods=3,
freq="250L", tz="US/Central"),
columns=lrange(3, 6))
result = test1.join(test2, how='outer')
ex_index = test1.index.union(test2.index)
tm.assert_index_equal(result.index, ex_index)
assert result.index.tz.zone == 'US/Central'
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6, freq="H",
tz="US/Central")
rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H",
tz="US/Eastern")
result = rng.union(rng2)
assert result.tz.zone == 'UTC'
def test_align_aware(self):
idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')
idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern')
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
new1, new2 = df1.align(df2)
assert df1.index.tz == new1.index.tz
assert df2.index.tz == new2.index.tz
# # different timezones convert to UTC
# frame
df1_central = df1.tz_convert('US/Central')
new1, new2 = df1.align(df1_central)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
# series
new1, new2 = df1[0].align(df1_central[0])
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
# combination
new1, new2 = df1.align(df1_central[0], axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
df1[0].align(df1_central, axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
def test_append_aware(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='US/Eastern')
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
assert ts_result.index.tz == rng1.tz
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='UTC')
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
utc = rng1.tz
assert utc == ts_result.index.tz
# GH 7795
# different tz coerces to object dtype, not UTC
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Central')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'),
Timestamp('1/1/2011 02:00', tz='US/Central')])
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
def test_append_dst(self):
rng1 = date_range('1/1/2016 01:00', periods=3, freq='H',
tz='US/Eastern')
rng2 = date_range('8/1/2016 01:00', periods=3, freq='H',
tz='US/Eastern')
ts1 = Series([1, 2, 3], index=rng1)
ts2 = Series([10, 11, 12], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00',
'2016-01-01 03:00', '2016-08-01 01:00',
'2016-08-01 02:00', '2016-08-01 03:00'],
tz='US/Eastern')
exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)
assert_series_equal(ts_result, exp)
assert ts_result.index.tz == rng1.tz
def test_append_aware_naive(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
assert ts_result.index.equals(ts1.index.asobject.append(
ts2.index.asobject))
# mixed
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = lrange(100)
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
assert ts_result.index.equals(ts1.index.asobject.append(
ts2.index))
def test_equal_join_ensure_utc(self):
rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ts.tz_convert('Europe/Moscow')
result = ts + ts_moscow
assert result.index.tz is pytz.utc
result = ts_moscow + ts
assert result.index.tz is pytz.utc
df = DataFrame({'a': ts})
df_moscow = df.tz_convert('Europe/Moscow')
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_arith_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
perm = np.random.permutation(100)[:90]
ts1 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('US/Eastern'))
perm = np.random.permutation(100)[:90]
ts2 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('Europe/Berlin'))
result = ts1 + ts2
uts1 = ts1.tz_convert('utc')
uts2 = ts2.tz_convert('utc')
expected = uts1 + uts2
assert result.index.tz == pytz.UTC
assert_series_equal(result, expected)
def test_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng[10:90][::-1]
right = rng[20:80][::-1]
assert left.tz == rng.tz
result = left.intersection(right)
assert result.tz == left.tz
def test_timestamp_equality_different_timezones(self):
utc_range = date_range('1/1/2000', periods=20, tz='UTC')
eastern_range = utc_range.tz_convert('US/Eastern')
berlin_range = utc_range.tz_convert('Europe/Berlin')
for a, b, c in zip(utc_range, eastern_range, berlin_range):
assert a == b
assert b == c
assert a == c
assert (utc_range == eastern_range).all()
assert (utc_range == berlin_range).all()
assert (berlin_range == eastern_range).all()
def test_datetimeindex_tz(self):
rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI',
tz='US/Eastern')
rng2 = DatetimeIndex(data=rng, tz='US/Eastern')
tm.assert_index_equal(rng, rng2)
def test_normalize_tz(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz='US/Eastern')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz='US/Eastern')
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC')
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_tz_local(self):
# see gh-13459
timezones = ['US/Pacific', 'US/Eastern', 'UTC', 'Asia/Kolkata',
'Asia/Shanghai', 'Australia/Canberra']
for timezone in timezones:
with set_timezone(timezone):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_tzaware_offset(self):
dates = date_range('2012-11-01', periods=3, tz='US/Pacific')
offset = dates + offsets.Hour(5)
assert dates[0] + offsets.Hour(5) == offset[0]
# GH 6818
for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']:
dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=tz)
offset = dates + offsets.Hour(5)
tm.assert_index_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
tm.assert_index_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_index_equal(offset, expected)
def test_nat(self):
# GH 5546
dates = [NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Eastern'))
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='UTC'))
dates = ['2010-12-01 00:00', '2010-12-02 00:00', NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 03:00', '2010-12-02 03:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx + offsets.Hour(5)
expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx.tz_convert('US/Pacific')
expected = ['2010-12-01 05:00', '2010-12-02 05:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx + np.timedelta64(3, 'h')
expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
class TestTslib(object):
def test_tslib_tz_convert(self):
def compare_utc_to_local(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)
result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')
result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:
# US: 2014-03-09 - 2014-11-11
# MOSCOW: 2014-10-26 / 2014-12-31
tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)
utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')
compare_utc_to_local(tz_didx, utc_didx)
# local tz to UTC can be differ in hourly (or higher) freqs because
# of DST
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)
utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)
utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
# Check empty array
result = tslib.tz_convert(np.array([], dtype=np.int64),
timezones.maybe_get_tz('US/Eastern'),
timezones.maybe_get_tz('Asia/Tokyo'))
tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
# Check all-NaT array
result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
timezones.maybe_get_tz('US/Eastern'),
timezones.maybe_get_tz('Asia/Tokyo'))
tm.assert_numpy_array_equal(result, np.array(
[tslib.iNaT], dtype=np.int64))
| bsd-3-clause |
AtomLinter/linter-pylama | bin/deps/astroid/rebuilder.py | 2 | 39135 | # Copyright (c) 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""this module contains utilities for rebuilding a _ast tree in
order to get a single Astroid representation
"""
import sys
import _ast
import astroid
from astroid import astpeephole
from astroid import nodes
_BIN_OP_CLASSES = {_ast.Add: '+',
_ast.BitAnd: '&',
_ast.BitOr: '|',
_ast.BitXor: '^',
_ast.Div: '/',
_ast.FloorDiv: '//',
_ast.Mod: '%',
_ast.Mult: '*',
_ast.Pow: '**',
_ast.Sub: '-',
_ast.LShift: '<<',
_ast.RShift: '>>',
}
if sys.version_info >= (3, 5):
_BIN_OP_CLASSES[_ast.MatMult] = '@'
_BOOL_OP_CLASSES = {_ast.And: 'and',
_ast.Or: 'or',
}
_UNARY_OP_CLASSES = {_ast.UAdd: '+',
_ast.USub: '-',
_ast.Not: 'not',
_ast.Invert: '~',
}
_CMP_OP_CLASSES = {_ast.Eq: '==',
_ast.Gt: '>',
_ast.GtE: '>=',
_ast.In: 'in',
_ast.Is: 'is',
_ast.IsNot: 'is not',
_ast.Lt: '<',
_ast.LtE: '<=',
_ast.NotEq: '!=',
_ast.NotIn: 'not in',
}
CONST_NAME_TRANSFORMS = {'None': None,
'True': True,
'False': False,
}
REDIRECT = {'arguments': 'Arguments',
'comprehension': 'Comprehension',
"ListCompFor": 'Comprehension',
"GenExprFor": 'Comprehension',
'excepthandler': 'ExceptHandler',
'keyword': 'Keyword',
}
PY3 = sys.version_info >= (3, 0)
PY34 = sys.version_info >= (3, 4)
CONTEXTS = {_ast.Load: astroid.Load,
_ast.Store: astroid.Store,
_ast.Del: astroid.Del,
_ast.Param: astroid.Store}
def _get_doc(node):
try:
if isinstance(node.body[0], _ast.Expr) and isinstance(node.body[0].value, _ast.Str):
doc = node.body[0].value.s
node.body = node.body[1:]
return node, doc
except IndexError:
pass # ast built from scratch
return node, None
def _visit_or_none(node, attr, visitor, parent, visit='visit',
**kws):
"""If the given node has an attribute, visits the attribute, and
otherwise returns None.
"""
value = getattr(node, attr, None)
if value:
return getattr(visitor, visit)(value, parent, **kws)
return None
def _get_context(node):
return CONTEXTS.get(type(node.ctx), astroid.Load)
class TreeRebuilder(object):
"""Rebuilds the _ast tree to become an Astroid tree"""
def __init__(self, manager):
self._manager = manager
self._global_names = []
self._import_from_nodes = []
self._delayed_assattr = []
self._visit_meths = {}
self._peepholer = astpeephole.ASTPeepholeOptimizer()
def visit_module(self, node, modname, modpath, package):
"""visit a Module node by returning a fresh instance of it"""
node, doc = _get_doc(node)
newnode = nodes.Module(name=modname, doc=doc, file=modpath, path=modpath,
package=package, parent=None)
newnode.postinit([self.visit(child, newnode) for child in node.body])
return newnode
def visit(self, node, parent):
cls = node.__class__
if cls in self._visit_meths:
visit_method = self._visit_meths[cls]
else:
cls_name = cls.__name__
visit_name = 'visit_' + REDIRECT.get(cls_name, cls_name).lower()
visit_method = getattr(self, visit_name)
self._visit_meths[cls] = visit_method
return visit_method(node, parent)
def _save_assignment(self, node, name=None):
"""save assignement situation since node.parent is not available yet"""
if self._global_names and node.name in self._global_names[-1]:
node.root().set_local(node.name, node)
else:
node.parent.set_local(node.name, node)
def visit_arguments(self, node, parent):
"""visit a Arguments node by returning a fresh instance of it"""
vararg, kwarg = node.vararg, node.kwarg
if PY34:
newnode = nodes.Arguments(vararg.arg if vararg else None,
kwarg.arg if kwarg else None,
parent)
else:
newnode = nodes.Arguments(vararg, kwarg, parent)
args = [self.visit(child, newnode) for child in node.args]
defaults = [self.visit(child, newnode)
for child in node.defaults]
varargannotation = None
kwargannotation = None
# change added in 82732 (7c5c678e4164), vararg and kwarg
# are instances of `_ast.arg`, not strings
if vararg:
if PY34:
if node.vararg.annotation:
varargannotation = self.visit(node.vararg.annotation,
newnode)
vararg = vararg.arg
if kwarg:
if PY34:
if node.kwarg.annotation:
kwargannotation = self.visit(node.kwarg.annotation,
newnode)
kwarg = kwarg.arg
if PY3:
kwonlyargs = [self.visit(child, newnode) for child
in node.kwonlyargs]
kw_defaults = [self.visit(child, newnode) if child else
None for child in node.kw_defaults]
annotations = [self.visit(arg.annotation, newnode) if
arg.annotation else None for arg in node.args]
kwonlyargs_annotations = [
self.visit(arg.annotation, newnode) if arg.annotation else None
for arg in node.kwonlyargs
]
else:
kwonlyargs = []
kw_defaults = []
annotations = []
kwonlyargs_annotations = []
newnode.postinit(
args=args,
defaults=defaults,
kwonlyargs=kwonlyargs,
kw_defaults=kw_defaults,
annotations=annotations,
kwonlyargs_annotations=kwonlyargs_annotations,
varargannotation=varargannotation,
kwargannotation=kwargannotation
)
# save argument names in locals:
if vararg:
newnode.parent.set_local(vararg, newnode)
if kwarg:
newnode.parent.set_local(kwarg, newnode)
return newnode
def visit_assert(self, node, parent):
"""visit a Assert node by returning a fresh instance of it"""
newnode = nodes.Assert(node.lineno, node.col_offset, parent)
if node.msg:
msg = self.visit(node.msg, newnode)
else:
msg = None
newnode.postinit(self.visit(node.test, newnode), msg)
return newnode
def visit_assign(self, node, parent):
"""visit a Assign node by returning a fresh instance of it"""
newnode = nodes.Assign(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.targets],
self.visit(node.value, newnode))
return newnode
def visit_assignname(self, node, parent, node_name=None):
'''visit a node and return a AssignName node'''
newnode = nodes.AssignName(node_name, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
self._save_assignment(newnode)
return newnode
def visit_augassign(self, node, parent):
"""visit a AugAssign node by returning a fresh instance of it"""
newnode = nodes.AugAssign(_BIN_OP_CLASSES[type(node.op)] + "=",
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.value, newnode))
return newnode
def visit_repr(self, node, parent):
"""visit a Backquote node by returning a fresh instance of it"""
newnode = nodes.Repr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_binop(self, node, parent):
"""visit a BinOp node by returning a fresh instance of it"""
if isinstance(node.left, _ast.BinOp) and self._manager.optimize_ast:
# Optimize BinOp operations in order to remove
# redundant recursion. For instance, if the
# following code is parsed in order to obtain
# its ast, then the rebuilder will fail with an
# infinite recursion, the same will happen with the
# inference engine as well. There's no need to hold
# so many objects for the BinOp if they can be reduced
# to something else (also, the optimization
# might handle only Const binops, which isn't a big
# problem for the correctness of the program).
#
# ("a" + "b" + # one thousand more + "c")
optimized = self._peepholer.optimize_binop(node, parent)
if optimized:
return optimized
newnode = nodes.BinOp(_BIN_OP_CLASSES[type(node.op)],
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.left, newnode),
self.visit(node.right, newnode))
return newnode
def visit_boolop(self, node, parent):
"""visit a BoolOp node by returning a fresh instance of it"""
newnode = nodes.BoolOp(_BOOL_OP_CLASSES[type(node.op)],
node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.values])
return newnode
def visit_break(self, node, parent):
"""visit a Break node by returning a fresh instance of it"""
return nodes.Break(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None),
parent)
def visit_call(self, node, parent):
"""visit a CallFunc node by returning a fresh instance of it"""
newnode = nodes.Call(node.lineno, node.col_offset, parent)
starargs = _visit_or_none(node, 'starargs', self, newnode)
kwargs = _visit_or_none(node, 'kwargs', self, newnode)
args = [self.visit(child, newnode)
for child in node.args]
if node.keywords:
keywords = [self.visit(child, newnode)
for child in node.keywords]
else:
keywords = None
if starargs:
new_starargs = nodes.Starred(col_offset=starargs.col_offset,
lineno=starargs.lineno,
parent=starargs.parent)
new_starargs.postinit(value=starargs)
args.append(new_starargs)
if kwargs:
new_kwargs = nodes.Keyword(arg=None, col_offset=kwargs.col_offset,
lineno=kwargs.lineno,
parent=kwargs.parent)
new_kwargs.postinit(value=kwargs)
if keywords:
keywords.append(new_kwargs)
else:
keywords = [new_kwargs]
newnode.postinit(self.visit(node.func, newnode),
args, keywords)
return newnode
def visit_classdef(self, node, parent, newstyle=None):
"""visit a ClassDef node to become astroid"""
node, doc = _get_doc(node)
newnode = nodes.ClassDef(node.name, doc, node.lineno,
node.col_offset, parent)
metaclass = None
if PY3:
for keyword in node.keywords:
if keyword.arg == 'metaclass':
metaclass = self.visit(keyword, newnode).value
break
if node.decorator_list:
decorators = self.visit_decorators(node, newnode)
else:
decorators = None
newnode.postinit([self.visit(child, newnode)
for child in node.bases],
[self.visit(child, newnode)
for child in node.body],
decorators, newstyle, metaclass,
[self.visit(kwd, newnode) for kwd in node.keywords
if kwd.arg != 'metaclass'] if PY3 else [])
return newnode
def visit_const(self, node, parent):
"""visit a Const node by returning a fresh instance of it"""
return nodes.Const(node.value,
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_continue(self, node, parent):
"""visit a Continue node by returning a fresh instance of it"""
return nodes.Continue(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None),
parent)
def visit_compare(self, node, parent):
"""visit a Compare node by returning a fresh instance of it"""
newnode = nodes.Compare(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.left, newnode),
[(_CMP_OP_CLASSES[op.__class__],
self.visit(expr, newnode))
for (op, expr) in zip(node.ops, node.comparators)])
return newnode
def visit_comprehension(self, node, parent):
"""visit a Comprehension node by returning a fresh instance of it"""
newnode = nodes.Comprehension(parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode)
for child in node.ifs],
getattr(node, 'is_async', None))
return newnode
def visit_decorators(self, node, parent):
"""visit a Decorators node by returning a fresh instance of it"""
# /!\ node is actually a _ast.FunctionDef node while
# parent is a astroid.nodes.FunctionDef node
newnode = nodes.Decorators(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.decorator_list])
return newnode
def visit_delete(self, node, parent):
"""visit a Delete node by returning a fresh instance of it"""
newnode = nodes.Delete(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.targets])
return newnode
def _visit_dict_items(self, node, parent, newnode):
for key, value in zip(node.keys, node.values):
rebuilt_value = self.visit(value, newnode)
if not key:
# Python 3.5 and extended unpacking
rebuilt_key = nodes.DictUnpack(rebuilt_value.lineno,
rebuilt_value.col_offset,
parent)
else:
rebuilt_key = self.visit(key, newnode)
yield rebuilt_key, rebuilt_value
def visit_dict(self, node, parent):
"""visit a Dict node by returning a fresh instance of it"""
newnode = nodes.Dict(node.lineno, node.col_offset, parent)
items = list(self._visit_dict_items(node, parent, newnode))
newnode.postinit(items)
return newnode
def visit_dictcomp(self, node, parent):
"""visit a DictComp node by returning a fresh instance of it"""
newnode = nodes.DictComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.key, newnode),
self.visit(node.value, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_expr(self, node, parent):
"""visit a Expr node by returning a fresh instance of it"""
newnode = nodes.Expr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_ellipsis(self, node, parent):
"""visit an Ellipsis node by returning a fresh instance of it"""
return nodes.Ellipsis(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_emptynode(self, node, parent):
"""visit an EmptyNode node by returning a fresh instance of it"""
return nodes.EmptyNode(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent)
# /!\ node.name can be a tuple
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
_visit_or_none(node, 'name', self, newnode),
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_exec(self, node, parent):
"""visit an Exec node by returning a fresh instance of it"""
newnode = nodes.Exec(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.body, newnode),
_visit_or_none(node, 'globals', self, newnode),
_visit_or_none(node, 'locals', self, newnode))
return newnode
def visit_extslice(self, node, parent):
"""visit an ExtSlice node by returning a fresh instance of it"""
newnode = nodes.ExtSlice(parent=parent)
newnode.postinit([self.visit(dim, newnode)
for dim in node.dims])
return newnode
def _visit_for(self, cls, node, parent):
"""visit a For node by returning a fresh instance of it"""
newnode = cls(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_for(self, node, parent):
return self._visit_for(nodes.For, node, parent)
def visit_importfrom(self, node, parent):
"""visit an ImportFrom node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.ImportFrom(node.module or '', names, node.level or None,
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
# store From names to add them to locals after building
self._import_from_nodes.append(newnode)
return newnode
def _visit_functiondef(self, cls, node, parent):
"""visit an FunctionDef node to become astroid"""
self._global_names.append({})
node, doc = _get_doc(node)
newnode = cls(node.name, doc, node.lineno,
node.col_offset, parent)
if node.decorator_list:
decorators = self.visit_decorators(node, newnode)
else:
decorators = None
if PY3 and node.returns:
returns = self.visit(node.returns, newnode)
else:
returns = None
newnode.postinit(self.visit(node.args, newnode),
[self.visit(child, newnode)
for child in node.body],
decorators, returns)
self._global_names.pop()
return newnode
def visit_functiondef(self, node, parent):
return self._visit_functiondef(nodes.FunctionDef, node, parent)
def visit_generatorexp(self, node, parent):
"""visit a GeneratorExp node by returning a fresh instance of it"""
newnode = nodes.GeneratorExp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_attribute(self, node, parent):
"""visit an Attribute node by returning a fresh instance of it"""
context = _get_context(node)
if context == astroid.Del:
# FIXME : maybe we should reintroduce and visit_delattr ?
# for instance, deactivating assign_ctx
newnode = nodes.DelAttr(node.attr, node.lineno, node.col_offset,
parent)
elif context == astroid.Store:
newnode = nodes.AssignAttr(node.attr, node.lineno, node.col_offset,
parent)
# Prohibit a local save if we are in an ExceptHandler.
if not isinstance(parent, astroid.ExceptHandler):
self._delayed_assattr.append(newnode)
else:
newnode = nodes.Attribute(node.attr, node.lineno, node.col_offset,
parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_global(self, node, parent):
"""visit a Global node to become astroid"""
newnode = nodes.Global(node.names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
if self._global_names: # global at the module level, no effect
for name in node.names:
self._global_names[-1].setdefault(name, []).append(newnode)
return newnode
def visit_if(self, node, parent):
"""visit an If node by returning a fresh instance of it"""
newnode = nodes.If(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_ifexp(self, node, parent):
"""visit a IfExp node by returning a fresh instance of it"""
newnode = nodes.IfExp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
self.visit(node.body, newnode),
self.visit(node.orelse, newnode))
return newnode
def visit_import(self, node, parent):
"""visit a Import node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.Import(names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
# save import names in parent's locals:
for (name, asname) in newnode.names:
name = asname or name
parent.set_local(name.split('.')[0], newnode)
return newnode
def visit_index(self, node, parent):
"""visit a Index node by returning a fresh instance of it"""
newnode = nodes.Index(parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_keyword(self, node, parent):
"""visit a Keyword node by returning a fresh instance of it"""
newnode = nodes.Keyword(node.arg, parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_lambda(self, node, parent):
"""visit a Lambda node by returning a fresh instance of it"""
newnode = nodes.Lambda(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.args, newnode),
self.visit(node.body, newnode))
return newnode
def visit_list(self, node, parent):
"""visit a List node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.List(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_listcomp(self, node, parent):
"""visit a ListComp node by returning a fresh instance of it"""
newnode = nodes.ListComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_name(self, node, parent):
"""visit a Name node by returning a fresh instance of it"""
context = _get_context(node)
# True and False can be assigned to something in py2x, so we have to
# check first the context.
if context == astroid.Del:
newnode = nodes.DelName(node.id, node.lineno, node.col_offset,
parent)
elif context == astroid.Store:
newnode = nodes.AssignName(node.id, node.lineno, node.col_offset,
parent)
elif node.id in CONST_NAME_TRANSFORMS:
newnode = nodes.Const(CONST_NAME_TRANSFORMS[node.id],
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
return newnode
else:
newnode = nodes.Name(node.id, node.lineno, node.col_offset, parent)
# XXX REMOVE me :
if context in (astroid.Del, astroid.Store): # 'Aug' ??
self._save_assignment(newnode)
return newnode
def visit_str(self, node, parent):
"""visit a String/Bytes node by returning a fresh instance of Const"""
return nodes.Const(node.s, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
visit_bytes = visit_str
def visit_num(self, node, parent):
"""visit a Num node by returning a fresh instance of Const"""
return nodes.Const(node.n, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_pass(self, node, parent):
"""visit a Pass node by returning a fresh instance of it"""
return nodes.Pass(node.lineno, node.col_offset, parent)
def visit_print(self, node, parent):
"""visit a Print node by returning a fresh instance of it"""
newnode = nodes.Print(node.nl, node.lineno, node.col_offset, parent)
newnode.postinit(_visit_or_none(node, 'dest', self, newnode),
[self.visit(child, newnode)
for child in node.values])
return newnode
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = nodes.Raise(node.lineno, node.col_offset, parent)
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
_visit_or_none(node, 'inst', self, newnode),
_visit_or_none(node, 'tback', self, newnode))
return newnode
def visit_return(self, node, parent):
"""visit a Return node by returning a fresh instance of it"""
newnode = nodes.Return(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_set(self, node, parent):
"""visit a Set node by returning a fresh instance of it"""
newnode = nodes.Set(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_setcomp(self, node, parent):
"""visit a SetComp node by returning a fresh instance of it"""
newnode = nodes.SetComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_slice(self, node, parent):
"""visit a Slice node by returning a fresh instance of it"""
newnode = nodes.Slice(parent=parent)
newnode.postinit(_visit_or_none(node, 'lower', self, newnode),
_visit_or_none(node, 'upper', self, newnode),
_visit_or_none(node, 'step', self, newnode))
return newnode
def visit_subscript(self, node, parent):
"""visit a Subscript node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.Subscript(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit(self.visit(node.value, newnode),
self.visit(node.slice, newnode))
return newnode
def visit_tryexcept(self, node, parent):
"""visit a TryExcept node by returning a fresh instance of it"""
newnode = nodes.TryExcept(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.handlers],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_tryfinally(self, node, parent):
"""visit a TryFinally node by returning a fresh instance of it"""
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.body],
[self.visit(n, newnode)
for n in node.finalbody])
return newnode
def visit_tuple(self, node, parent):
"""visit a Tuple node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.Tuple(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_unaryop(self, node, parent):
"""visit a UnaryOp node by returning a fresh instance of it"""
newnode = nodes.UnaryOp(_UNARY_OP_CLASSES[node.op.__class__],
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.operand, newnode))
return newnode
def visit_while(self, node, parent):
"""visit a While node by returning a fresh instance of it"""
newnode = nodes.While(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_with(self, node, parent):
newnode = nodes.With(node.lineno, node.col_offset, parent)
expr = self.visit(node.context_expr, newnode)
if node.optional_vars is not None:
optional_vars = self.visit(node.optional_vars, newnode)
else:
optional_vars = None
newnode.postinit([(expr, optional_vars)],
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_yield(self, node, parent):
"""visit a Yield node by returning a fresh instance of it"""
newnode = nodes.Yield(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
class TreeRebuilder3(TreeRebuilder):
"""extend and overwrite TreeRebuilder for python3k"""
def visit_arg(self, node, parent):
"""visit a arg node by returning a fresh AssName instance"""
# TODO(cpopa): introduce an Arg node instead of using AssignName.
return self.visit_assignname(node, parent, node.arg)
def visit_nameconstant(self, node, parent):
# in Python 3.4 we have NameConstant for True / False / None
return nodes.Const(node.value, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent)
if node.name:
name = self.visit_assignname(node, newnode, node.name)
else:
name = None
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
name,
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_nonlocal(self, node, parent):
"""visit a Nonlocal node and return a new instance of it"""
return nodes.Nonlocal(node.names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = nodes.Raise(node.lineno, node.col_offset, parent)
# no traceback; anyway it is not used in Pylint
newnode.postinit(_visit_or_none(node, 'exc', self, newnode),
_visit_or_none(node, 'cause', self, newnode))
return newnode
def visit_starred(self, node, parent):
"""visit a Starred node and return a new instance of it"""
context = _get_context(node)
newnode = nodes.Starred(ctx=context, lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_try(self, node, parent):
# python 3.3 introduce a new Try node replacing
# TryFinally/TryExcept nodes
if node.finalbody:
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
if node.handlers:
body = [self.visit_tryexcept(node, newnode)]
else:
body = [self.visit(child, newnode)
for child in node.body]
newnode.postinit(body,
[self.visit(n, newnode)
for n in node.finalbody])
return newnode
elif node.handlers:
return self.visit_tryexcept(node, parent)
return None
def visit_annassign(self, node, parent):
"""visit an AnnAssign node by returning a fresh instance of it"""
newnode = nodes.AnnAssign(node.lineno, node.col_offset, parent)
annotation = _visit_or_none(node, 'annotation', self, newnode)
newnode.postinit(target=self.visit(node.target, newnode),
annotation=annotation,
simple=node.simple,
value=_visit_or_none(node, 'value', self, newnode))
return newnode
def _visit_with(self, cls, node, parent):
if 'items' not in node._fields:
# python < 3.3
return super(TreeRebuilder3, self).visit_with(node, parent)
newnode = cls(node.lineno, node.col_offset, parent)
def visit_child(child):
expr = self.visit(child.context_expr, newnode)
var = _visit_or_none(child, 'optional_vars', self, newnode)
return expr, var
newnode.postinit([visit_child(child) for child in node.items],
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_with(self, node, parent):
return self._visit_with(nodes.With, node, parent)
def visit_yieldfrom(self, node, parent):
newnode = nodes.YieldFrom(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_classdef(self, node, parent, newstyle=True):
return super(TreeRebuilder3, self).visit_classdef(node, parent,
newstyle=newstyle)
# Async structs added in Python 3.5
def visit_asyncfunctiondef(self, node, parent):
return self._visit_functiondef(nodes.AsyncFunctionDef, node, parent)
def visit_asyncfor(self, node, parent):
return self._visit_for(nodes.AsyncFor, node, parent)
def visit_await(self, node, parent):
newnode = nodes.Await(node.lineno, node.col_offset, parent)
newnode.postinit(value=self.visit(node.value, newnode))
return newnode
def visit_asyncwith(self, node, parent):
return self._visit_with(nodes.AsyncWith, node, parent)
def visit_joinedstr(self, node, parent):
newnode = nodes.JoinedStr(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.values])
return newnode
def visit_formattedvalue(self, node, parent):
newnode = nodes.FormattedValue(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode),
node.conversion,
_visit_or_none(node, 'format_spec', self, newnode))
return newnode
if sys.version_info >= (3, 0):
TreeRebuilder = TreeRebuilder3
| mit |
fbocharov/au-linux-kernel-spring-2016 | linux/Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
eamosov/thrift | test/py/TestRenderedDoubleConstants.py | 17 | 10576 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from DoubleConstantsTest import constants
#
# In order to run the test under Windows. We need to create symbolic link
# name 'thrift' to '../src' folder by using:
#
# mklink /D thrift ..\src
#
class TestRenderedDoubleConstants(unittest.TestCase):
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST = \
"failed to verify a double constant generated by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_LIST_TEST =\
"failed to verify a list item by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_TYPE_CHECKS = "the rendered variable with name %s is not of double type"
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_constants(self):
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1.0
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100.0
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS,
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST")
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_list(self):
EXPECTED_DOUBLE_LIST = [1.0, -100.0, 100.0, 9223372036854775807.0, -9223372036854775807.0, 3.14159265359,
1000000.1, -1000000.1, 1.7e+308, -1.7e+308, 9223372036854775816.43,
-9223372036854775816.43]
self.assertEqual(len(constants.DOUBLE_LIST_TEST), len(EXPECTED_DOUBLE_LIST))
for i, expectedValue in enumerate(EXPECTED_DOUBLE_LIST):
self.assertAlmostEqual(constants.DOUBLE_LIST_TEST[i], expectedValue, places=7)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestRenderedDoubleConstants))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| apache-2.0 |
gatagat/lapjv | bench/overview_sparse.py | 1 | 2203 | from pytest import mark
from joblib import Memory
import numpy as np
from lap import lapjv, lapmod
from lap.lapmod import get_cost
try:
from lap_old import lapjv as lapjv_old
except ImportError:
print(
'''If you get here, you do not have the old lapjv to compare to.
git clone [email protected]:gatagat/lapjv.git lapjv-old
cd lapjv-old
git checkout old
python setup.py build_ext -i
mv lapjv lapjv_old
And run the benchmark:
LAPJV_OLD=lapjv-old bench.sh
''')
lapjv_old = None
from centrosome.lapjv import lapjv as lapjv_centrosome
from lap.tests.test_utils import (
sparse_from_masked,
sparse_from_masked_CS,
get_sparse_int,
get_platform_maxint
)
cachedir = '/tmp/lapjv-cache'
memory = Memory(cachedir=cachedir, verbose=1)
@memory.cache
def get_data(seed):
cost, mask = get_sparse_int(5000, 1000, 0.01, hard=False, seed=seed)
cost_ = cost.copy()
cost_[~mask] = get_platform_maxint()
opt = lapjv(cost_)[0]
return cost, mask, opt
seeds = [1299821, 15485867, 32452867, 49979693]
def _get_cost_CS(cost, x):
return cost[np.arange(cost.shape[0]), x].sum()
@mark.parametrize('seed', seeds)
def test_CSCY(benchmark, seed):
cost, mask, opt = get_data(seed)
i, j, cc = sparse_from_masked_CS(cost, mask)
ret = benchmark(lapjv_centrosome, i, j, cc)
assert _get_cost_CS(cost, ret[0]) == opt
if lapjv_old is not None:
@mark.parametrize('seed', seeds)
def test_JV_old(benchmark, seed):
cost, mask, opt = get_data(seed)
cost[~mask] = get_platform_maxint()
ret = benchmark(lapjv_old, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_JV(benchmark, seed):
cost, mask, opt = get_data(seed)
cost[~mask] = get_platform_maxint()
ret = benchmark(lapjv, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_MOD_c(benchmark, seed):
cost, mask, opt = get_data(seed)
n, cc, ii, kk = sparse_from_masked(cost, mask)
ret = benchmark(lapmod, n, cc, ii, kk, fast=True, return_cost=False)
assert get_cost(n, cc, ii, kk, ret[0]) == opt
| bsd-2-clause |
ryfeus/lambda-packs | Opencv_pil/source36/numpy/polynomial/tests/test_hermite.py | 6 | 18758 | """Tests for hermite module.
"""
from __future__ import division, absolute_import, print_function
from functools import reduce
import numpy as np
import numpy.polynomial.hermite as herm
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
H0 = np.array([1])
H1 = np.array([0, 2])
H2 = np.array([-2, 0, 4])
H3 = np.array([0, -12, 0, 8])
H4 = np.array([12, 0, -48, 0, 16])
H5 = np.array([0, 120, 0, -160, 0, 32])
H6 = np.array([-120, 0, 720, 0, -480, 0, 64])
H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128])
H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256])
H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512])
Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9]
def trim(x):
return herm.hermtrim(x, tol=1e-6)
class TestConstants(object):
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
def test_hermzero(self):
assert_equal(herm.hermzero, [0])
def test_hermone(self):
assert_equal(herm.hermone, [1])
def test_hermx(self):
assert_equal(herm.hermx, [0, .5])
class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herm.hermadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herm.hermsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermmulx(self):
assert_equal(herm.hermmulx([0]), [0])
assert_equal(herm.hermmulx([1]), [0, .5])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, .5]
assert_equal(herm.hermmulx(ser), tgt)
def test_hermmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herm.hermval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = herm.hermval(self.x, pol2)
pol3 = herm.hermmul(pol1, pol2)
val3 = herm.hermval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herm.hermadd(ci, cj)
quo, rem = herm.hermdiv(tgt, ci)
res = herm.hermadd(herm.hermmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermpow(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
c = np.arange(i + 1)
tgt = reduce(herm.hermmul, [c]*j, np.array([1]))
res = herm.hermpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermval(self):
#check empty input
assert_equal(herm.hermval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Hlist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herm.hermval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herm.hermval(x, [1]).shape, dims)
assert_equal(herm.hermval(x, [1, 0]).shape, dims)
assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims)
def test_hermval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herm.hermval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herm.hermval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herm.hermgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herm.hermgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(object):
def test_hermint(self):
# check exceptions
assert_raises(ValueError, herm.hermint, [0], .5)
assert_raises(ValueError, herm.hermint, [0], -1)
assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])
assert_raises(ValueError, herm.hermint, [0], lbnd=[0])
assert_raises(ValueError, herm.hermint, [0], scl=[0])
assert_raises(ValueError, herm.hermint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herm.hermint([0], m=i, k=k)
assert_almost_equal(res, [0, .5])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i])
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herm.hermval(-1, hermint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1)
res = herm.hermint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k])
res = herm.hermint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T
res = herm.hermint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c) for c in c2d])
res = herm.hermint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c, k=3) for c in c2d])
res = herm.hermint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(object):
def test_hermder(self):
# check exceptions
assert_raises(ValueError, herm.hermder, [0], .5)
assert_raises(ValueError, herm.hermder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herm.hermder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T
res = herm.hermder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermder(c) for c in c2d])
res = herm.hermder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermvander(self):
# check for 1d x
x = np.arange(3)
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
def test_hermvander2d(self):
# also tests hermval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herm.hermvander2d(x1, x2, [1, 2])
tgt = herm.hermval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermvander3d(self):
# also tests hermval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herm.hermvander3d(x1, x2, x3, [1, 2, 3])
tgt = herm.hermval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(object):
def test_hermfit(self):
def f(x):
return x*(x - 1)*(x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, herm.hermfit, [1], [1], -1)
assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)
assert_raises(TypeError, herm.hermfit, [], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)
assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, herm.hermfit, [1], [1], [-1,])
assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, herm.hermfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herm.hermfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
coef3 = herm.hermfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
#
coef4 = herm.hermfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
#
coef2d = herm.hermfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herm.hermfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])
assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5])
# test fitting only even Legendre polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = herm.hermfit(x, y, 4)
assert_almost_equal(herm.hermval(x, coef1), y)
coef2 = herm.hermfit(x, y, [0, 2, 4])
assert_almost_equal(herm.hermval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
assert_raises(ValueError, herm.hermcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herm.hermcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
class TestGauss(object):
def test_100(self):
x, w = herm.hermgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herm.hermvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(object):
def test_hermfromroots(self):
res = herm.hermfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herm.hermfromroots(roots)
res = herm.hermval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herm.herm2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermroots(self):
assert_almost_equal(herm.hermroots([1]), [])
assert_almost_equal(herm.hermroots([1, 1]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herm.hermroots(herm.hermfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herm.hermtrim, coef, -1)
# Test results
assert_equal(herm.hermtrim(coef), coef[:-1])
assert_equal(herm.hermtrim(coef, 1), coef[:-3])
assert_equal(herm.hermtrim(coef, 2), [0])
def test_hermline(self):
assert_equal(herm.hermline(3, 4), [3, 2])
def test_herm2poly(self):
for i in range(10):
assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i])
def test_poly2herm(self):
for i in range(10):
assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-x**2)
res = herm.hermweight(x)
assert_almost_equal(res, tgt)
| mit |
nvoron23/statsmodels | statsmodels/sandbox/regression/tests/results_gmm_griliches_iter.py | 34 | 7976 | import numpy as np
est = dict(
rank = 13,
N = 758,
Q = .0150568875809373,
J = 11.41312078635046,
J_df = 2,
k_1 = 13,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 13,
n_moments = 15,
k_aux = 13,
k_eq_model = 0,
ic = 6,
k_eq = 13,
cmdline = "gmm (lw - {xb:s iq expr tenure rns smsa dyear*} - {b0}) , instruments(expr tenure rns smsa dyear* med kww age mrt) igmm",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "igmm",
wmatrix = "robust",
vce = "robust",
vcetype = "Robust",
params = "xb_s xb_iq xb_expr xb_tenure xb_rns xb_smsa xb_dyear_67 xb_dyear_68 xb_dyear_69 xb_dyear_70 xb_dyear_71 xb_dyear_73 b0",
inst_1 = "expr tenure rns smsa dyear_67 dyear_68 dyear_69 dyear_70 dyear_71 dyear_73 med kww age mrt _cons",
params_1 = "xb_s xb_iq xb_expr xb_tenure xb_rns xb_smsa xb_dyear_67 xb_dyear_68 xb_dyear_69 xb_dyear_70 xb_dyear_71 xb_dyear_73 b0",
sexp_1 = "lw - ({xb_s} *s + {xb_iq} *iq + {xb_expr} *expr + {xb_tenure} *tenure + {xb_rns} *rns + {xb_smsa} *smsa + {xb_dyear_67} *dyear_67 + {xb_dyear_68} *dyear_68 + {xb_dyear_69} *dyear_69 + {xb_dyear_70} *dyear_70 + {xb_dyear_71} *dyear_71 + {xb_dyear_73} *dyear_73) - {b0}",
properties = "b V",
)
params_table = np.array([
.17587739850768, .02085563162829, 8.4330890400415, 3.366583555e-17,
.1350011116414, .21675368537396, np.nan, 1.9599639845401,
0, -.00928586712743, .00491894287617, -1.88777697997,
.05905589683705, -.01892681800673, .00035508375188, np.nan,
1.9599639845401, 0, .05031651549731, .00810558790493,
6.2076330659127, 5.378855978e-10, .03442985513012, .0662031758645,
np.nan, 1.9599639845401, 0, .04246235782951,
.00956418082077, 4.4397276280375, 9.007280073e-06, .02371690787918,
.06120780777985, np.nan, 1.9599639845401, 0,
-.1039476753865, .03373281188749, -3.0815004611293, .00205960157647,
-.17006277178325, -.03783257898975, np.nan, 1.9599639845401,
0, .12477256813508, .03099244898605, 4.0259021864082,
.0000567572801, .06402848432973, .18551665194043, np.nan,
1.9599639845401, 0, -.05297127223127, .0517946935923,
-1.0227162003936, .30644204936546, -.15448700626247, .04854446179993,
np.nan, 1.9599639845401, 0, .04564516152971,
.05001865637643, .91256272831865, .36147256434055, -.05238960352318,
.1436799265826, np.nan, 1.9599639845401, 0,
.15574543741982, .04802004585645, 3.2433421218593, .00118136262363,
.06162787700523, .24986299783442, np.nan, 1.9599639845401,
0, .16681173496168, .06134387289984, 2.7192892635594,
.00654223677971, .0465799534058, .28704351651757, np.nan,
1.9599639845401, 0, .08417610675323, .05582688740597,
1.507805838092, .13160422753823, -.02524258193145, .19359479543791,
np.nan, 1.9599639845401, 0, .09964580476612,
.06124947866865, 1.6268841291727, .10376170930541, -.02040096749628,
.21969257702853, np.nan, 1.9599639845401, 0,
4.0027753075622, .33649589464938, 11.895465505554, 1.249543428e-32,
3.3432554731038, 4.6622951420205, np.nan, 1.9599639845401,
0]).reshape(13,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov = np.array([
.00043495737061, -.00007938790704, .00002809207919, .00001486824321,
-.00017806650894, -6.696078938e-06, -.00011595347261, -.00018816769626,
-.00012205118386, -.00008281236274, -.00031504876539, -.00063574245306,
.00264272738846, -.00007938790704, .00002419599902, 4.932871670e-06,
-.00001114848619, .00006618803917, -.00002202930782, 4.808220835e-07,
.00003206765662, -.00002261059773, -.00006024105579, -.00001412126593,
.00001474591556, -.00144330101198, .00002809207919, 4.932871670e-06,
.00006570055528, -.0000203894891, .00005213529923, -.00003297805448,
.00003595284891, .00008758906787, .00003058926358, .00001696423798,
-.00008568569767, -.00013140753648, -.00094326672008, .00001486824321,
-.00001114848619, -.0000203894891, .00009147355477, -.00003774547245,
7.828122784e-06, .00008484461309, .00006729820252, .00011236802193,
.00010082715772, .00011217081931, .00009440153548, .00075659901252,
-.00017806650894, .00006618803917, .00005213529923, -.00003774547245,
.00113790259784, .00013005865302, .00018021354375, .00018779266096,
-9.435310865e-06, .0000165483542, -.00005323328914, .00008265052168,
-.00499436873124, -6.696078938e-06, -.00002202930782, -.00003297805448,
7.828122784e-06, .00013005865302, .00096053189415, .00005704546746,
.00011160225767, .00025285680201, .00010656723202, .00030213005331,
.00030792696913, .00157128168902, -.00011595347261, 4.808220835e-07,
.00003595284891, .00008484461309, .00018021354375, .00005704546746,
.00268269028432, .00085942321667, .00091151417222, .00096327250114,
.00090372304081, .00102768195348, .00034563629591, -.00018816769626,
.00003206765662, .00008758906787, .00006729820252, .00018779266096,
.00011160225767, .00085942321667, .0025018659857, .00092591134763,
.00088266305412, .0008241186538, .00095084381197, -.00206285154639,
-.00012205118386, -.00002261059773, .00003058926358, .00011236802193,
-9.435310865e-06, .00025285680201, .00091151417222, .00092591134763,
.00230592480406, .00118265696692, .0011106470199, .00129290662149,
.00256049741814, -.00008281236274, -.00006024105579, .00001696423798,
.00010082715772, .0000165483542, .00010656723202, .00096327250114,
.00088266305412, .00118265696692, .00376307074235, .00124584145426,
.00155915431219, .00599086304364, -.00031504876539, -.00001412126593,
-.00008568569767, .00011217081931, -.00005323328914, .00030213005331,
.00090372304081, .0008241186538, .0011106470199, .00124584145426,
.00311664135744, .0018437604357, .00431259131307, -.00063574245306,
.00001474591556, -.00013140753648, .00009440153548, .00008265052168,
.00030792696913, .00102768195348, .00095084381197, .00129290662149,
.00155915431219, .0018437604357, .00375149863718, .00538769349865,
.00264272738846, -.00144330101198, -.00094326672008, .00075659901252,
-.00499436873124, .00157128168902, .00034563629591, -.00206285154639,
.00256049741814, .00599086304364, .00431259131307, .00538769349865,
.11322948711589]).reshape(13,13)
cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split()
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
for i,att in enumerate(['params', 'bse', 'tvalues', 'pvalues']):
self[att] = self.params_table[:,i]
results = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
| bsd-3-clause |
jbenden/ansible | lib/ansible/modules/cloud/cloudstack/cs_portforward.py | 51 | 14301 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
required: false
default: null
state:
description:
- State of the port forwarding rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
protocol:
description:
- Protocol of the port forwarding rule.
required: false
default: 'tcp'
choices: [ 'tcp', 'udp' ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
required: false
default: null
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
required: false
default: null
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
required: false
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
required: false
default: false
network:
description:
- Name of the network.
required: false
default: null
version_added: "2.3"
vpc:
description:
- Name of the VPC.
required: false
default: null
version_added: "2.3"
domain:
description:
- Domain the C(vm) is related to.
required: false
default: null
account:
description:
- Account the C(vm) is related to.
required: false
default: null
project:
description:
- Name of the project the C(vm) is located in.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# 1.2.3.4:80 -> web01:8080
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
# forward SSH and open firewall
- local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
# forward DNS traffic, but do not open firewall
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
# remove ssh port forwarding
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
vpc:
description: Name of the VPC.
returned: success
type: string
sample: my_vpc
network:
description: Name of the network.
returned: success
type: string
sample: dmz
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
public_end_port = self.get_or_fallback('public_end_port', 'public_port')
private_port = self.module.params.get('private_port')
private_end_port = self.get_or_fallback('private_end_port', 'private_port')
args = {}
args['ipaddressid'] = self.get_ip_address(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
portforwarding_rules = self.cs.listPortForwardingRules(**args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule=portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['networkid'] = self.get_network(key='id')
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['networkid'] = self.get_network(key='id')
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.cs.updatePortForwardingRule(**args)
self.absent_portforwarding_rule()
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {}
args['id'] = portforwarding_rule['id']
if not self.module.check_mode:
res = self.cs.deletePortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
network_name = self.get_network(key='name')
if network_name:
self.result['network'] = network_name
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.result['vpc'] = vpc_name
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
protocol= dict(choices=['tcp', 'udp'], default='tcp'),
public_port = dict(type='int', required=True),
public_end_port = dict(type='int', default=None),
private_port = dict(type='int', required=True),
private_end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
open_firewall = dict(type='bool', default=False),
vm_guest_ip = dict(default=None),
vm = dict(default=None),
vpc = dict(default=None),
network = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag'], default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
khughitt/ete | ete_dev/tools/ete_ncbi_update.py | 2 | 4129 | import sys
import os
from string import strip
import tarfile
from common import Tree
from utils import ask, ask_filename
def load_ncbi_tree_from_dump(tar):
# Download: ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
parent2child = {}
name2node = {}
node2taxname = {}
synonyms = set()
name2rank = {}
print "Loading node names..."
for line in tar.extractfile("names.dmp"):
fields = map(strip, line.split("|"))
nodename = fields[0]
name_type = fields[3].lower()
taxname = fields[1]
if name_type == "scientific name":
node2taxname[nodename] = taxname
elif name_type in set(["synonym", "equivalent name", "genbank equivalent name",
"anamorph", "genbank synonym", "genbank anamorph", "teleomorph"]):
synonyms.add( (nodename, taxname) )
print len(node2taxname), "names loaded."
print len(synonyms), "synonyms loaded."
print "Loading nodes..."
for line in tar.extractfile("nodes.dmp"):
fields = line.split("|")
nodename = fields[0].strip()
parentname = fields[1].strip()
n = Tree()
n.name = nodename
n.taxname = node2taxname[nodename]
n.rank = fields[2].strip()
parent2child[nodename] = parentname
name2node[nodename] = n
print len(name2node), "nodes loaded."
print "Linking nodes..."
for node in name2node:
if node == "1":
t = name2node[node]
else:
parent = parent2child[node]
parent_node = name2node[parent]
parent_node.add_child(name2node[node])
print "Tree is loaded."
return t, synonyms
def generate_table(t):
OUT = open("taxa.tab", "w")
for j, n in enumerate(t.traverse()):
if j%1000 == 0:
print "\r",j,"nodes inserted into the DB.",
temp_node = n
track = []
while temp_node:
track.append(temp_node.name)
temp_node = temp_node.up
if n.up:
print >>OUT, '\t'.join([n.name, n.up.name, n.taxname, n.rank, ','.join(track)])
else:
print >>OUT, '\t'.join([n.name, "", n.taxname, n.rank, ','.join(track)])
OUT.close()
def update(targz_file):
tar = tarfile.open(targz_file, 'r')
t, synonyms = load_ncbi_tree_from_dump(tar)
print "Updating database [ ~/.etetoolkit/taxa.sqlite ] ..."
generate_table(t)
open("syn.tab", "w").write('\n'.join(["%s\t%s" %(v[0],v[1]) for v in synonyms]))
open("merged.tab", "w").write('\n'.join(['\t'.join(map(strip, line.split('|')[:2])) for line in tar.extractfile("merged.dmp")]))
CMD = open("commands.tmp", "w")
cmd = """
DROP TABLE IF EXISTS species;
DROP TABLE IF EXISTS synonym;
DROP TABLE IF EXISTS merged;
CREATE TABLE species (taxid INT PRIMARY KEY, parent INT, spname VARCHAR(50) COLLATE NOCASE, rank VARCHAR(50), track TEXT);
CREATE TABLE synonym (taxid INT,spname VARCHAR(50) COLLATE NOCASE, PRIMARY KEY (spname, taxid));
CREATE TABLE merged (taxid_old INT, taxid_new INT);
CREATE INDEX spname1 ON species (spname COLLATE NOCASE);
CREATE INDEX spname2 ON synonym (spname COLLATE NOCASE);
.separator '\t'
.import taxa.tab species
.import syn.tab synonym
.import merged.tab merged
"""
CMD.write(cmd)
CMD.close()
os.system("mkdir -p ~/.etetoolkit/")
os.system("sqlite3 ~/.etetoolkit/taxa.sqlite < commands.tmp")
os.system("rm syn.tab merged.tab taxa.tab commands.tmp")
print "Creating extended newick file with the whole NCBI tree [ncbi.nw]"
t.write(outfile="./ncbi.nw", features=["name", "taxname"])
def main(args):
if not args:
if ask('Download latest ncbi taxonomy dump file?', ['y', 'n']) == 'y':
status = os.system('wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz')
if status == 0:
update('taxdump.tar.gz')
else:
fname = ask_filename('path to tar.gz file containing ncbi taxonomy dump:')
update(fname)
else:
update(args[0])
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 |
montyly/manticore | tests/ethereum/EVM/test_EVMEXP.py | 1 | 79538 | import struct
import unittest
import json
from manticore.platforms import evm
from manticore.core import state
from manticore.core.smtlib import Operators, ConstraintSet
import os
class EVMTest_EXP(unittest.TestCase):
_multiprocess_can_split_ = True
maxDiff = None
def _execute(self, new_vm):
last_returned = None
last_exception = None
try:
new_vm.execute()
except evm.Stop as e:
last_exception = "STOP"
except evm.NotEnoughGas:
last_exception = "OOG"
except evm.StackUnderflow:
last_exception = "INSUFFICIENT STACK"
except evm.InvalidOpcode:
last_exception = "INVALID"
except evm.SelfDestruct:
last_exception = "SUICIDED"
except evm.Return as e:
last_exception = "RETURN"
last_returned = e.data
except evm.Revert:
last_exception = "REVERT"
return last_exception, last_returned
def test_EXP_1(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[115792089237316195423570985008687907853269984665640564039457584007913129639935],
)
def test_EXP_2(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_3(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_4(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_5(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[104454113832828984621679659393253883542637298667129925477260695573804969029359],
)
def test_EXP_6(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_7(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_8(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_9(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_10(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_11(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_12(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_13(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_14(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_15(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_16(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_17(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_18(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_19(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[115792089237316195423570985008687907853269984665640564039457584007913129639935],
)
def test_EXP_20(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_21(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_22(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[57896044618658097711785492504343953926634992332820282019728792003956564819952],
)
def test_EXP_23(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[3618502788666131106986593281521497120414687020801267626233049500247285301263],
)
def test_EXP_24(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [16])
def test_EXP_25(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [32])
def test_EXP_26(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [48])
def test_EXP_27(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [6089590155545428825848686802984512581899718912])
def test_EXP_28(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_29(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_30(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_31(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_32(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[97153515582439856940218076430383148080316642374323115531717460774015781538049],
)
def test_EXP_33(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_34(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_35(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_36(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_37(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[115792089237316195423570985008687907853269984665640564039457584007913129639935],
)
def test_EXP_38(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_39(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_40(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_41(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[112173586448650064316584391727166410732855297644839296413224972401556225198063],
)
def test_EXP_42(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_43(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_44(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_45(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_46(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_47(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_48(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_49(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [18446744073709551616])
def test_EXP_50(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[57896044618658097711785492504343953926634992332820282019735360412312277710593],
)
def test_EXP_51(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [18446744073709551616])
def test_EXP_52(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1208925819614629174706176])
def test_EXP_53(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [794071845499378503449051136])
def test_EXP_54(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[8303694420805298775410959586403913600201715917447438497573206049841934761984],
)
def test_EXP_55(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_56(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_57(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_58(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [340282366920938463463374607431768211456])
def test_EXP_59(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [43143988327398919500410556793212890625])
def test_EXP_60(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [340282366920938463463374607431768211456])
def test_EXP_61(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1461501637330902918203684832716283019655932542976])
def test_EXP_62(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [630550095814788844423632687832745817333905738742890496])
def test_EXP_63(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_64(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_65(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_66(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_67(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [6277101735386680763835789423207666416102355444464034512896])
def test_EXP_68(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[57896044618658097712068879837772420409703173580337995947392654709187277710593],
)
def test_EXP_69(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [6277101735386680763835789423207666416102355444464034512896])
def test_EXP_70(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[1766847064778384329583297500742918515827483896875618958121606201292619776],
)
def test_EXP_71(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[17084401304090163016086072004374689170541683170424114643147834605304589320192],
)
def test_EXP_72(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_73(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_74(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_75(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_76(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_77(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[42192513242301740010671492996252704544191162524312342410321251717326910681089],
)
def test_EXP_78(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_79(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_80(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_81(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
thaumos/ansible-modules-extras | monitoring/boundary_meter.py | 35 | 8359 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to add boundary meters.
(c) 2013, curtis <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
import datetime
import base64
import os
DOCUMENTATION = '''
module: boundary_meter
short_description: Manage boundary meters
description:
- This module manages boundary meters
version_added: "1.3"
author: "curtis (@ccollicutt)"
requirements:
- Boundary API access
- bprobe is required to send data, but not to register a meter
options:
name:
description:
- meter name
required: true
state:
description:
- Whether to create or remove the client from boundary
required: false
default: true
choices: ["present", "absent"]
apiid:
description:
- Organizations boundary API ID
required: true
apikey:
description:
- Organizations boundary API KEY
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
notes:
- This module does not yet support boundary tags.
'''
EXAMPLES='''
- name: Create meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}"
- name: Delete meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}"
'''
api_host = "api.boundary.com"
config_directory = "/etc/bprobe"
# "resource" like thing or apikey?
def auth_encode(apikey):
auth = base64.standard_b64encode(apikey)
auth.replace("\n", "")
return auth
def build_url(name, apiid, action, meter_id=None, cert_type=None):
if action == "create":
return 'https://%s/%s/meters' % (api_host, apiid)
elif action == "search":
return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
elif action == "certificates":
return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
elif action == "tags":
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None:
url = build_url(name, apiid, action)
else:
if cert_type is None:
url = build_url(name, apiid, action, meter_id)
else:
url = build_url(name, apiid, action, meter_id, cert_type)
headers = dict()
headers["Authorization"] = "Basic %s" % auth_encode(apikey)
headers["Content-Type"] = "application/json"
return fetch_url(module, url, data=data, headers=headers)
def create_meter(module, name, apiid, apikey):
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
# If the meter already exists, do nothing
module.exit_json(status="Meter " + name + " already exists",changed=False)
else:
# If it doesn't exist, create it
body = '{"name":"' + name + '"}'
response, info = http_request(module, name, apiid, apikey, data=body, action="create")
if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to create meter")
# If the config directory doesn't exist, create it
if not os.path.exists(config_directory):
try:
os.makedirs(config_directory)
except:
module.fail_json("Could not create " + config_directory)
# Download both cert files from the api host
types = ['key', 'cert']
for cert_type in types:
try:
# If we can't open the file it's not there, so we should download it
cert_file = open('%s/%s.pem' % (config_directory,cert_type))
except IOError:
# Now download the file...
rc = download_request(module, name, apiid, apikey, cert_type)
if rc == False:
module.fail_json("Download request for " + cert_type + ".pem failed")
return 0, "Meter " + name + " created"
def search_meter(module, name, apiid, apikey):
response, info = http_request(module, name, apiid, apikey, action="search")
if info['status'] != 200:
module.fail_json("Failed to connect to api host to search for meter")
# Return meters
return json.loads(response.read())
def get_meter_id(module, name, apiid, apikey):
# In order to delete the meter we need its id
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
return meters[0]['id']
else:
return None
def delete_meter(module, name, apiid, apikey):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is None:
return 1, "Meter does not exist, so can't delete it"
else:
response, info = http_request(module, name, apiid, apikey, action, meter_id)
if info['status'] != 200:
module.fail_json("Failed to delete meter")
# Each new meter gets a new key.pem and ca.pem file, so they should be deleted
types = ['cert', 'key']
for cert_type in types:
try:
cert_file = '%s/%s.pem' % (config_directory,cert_type)
os.remove(cert_file)
except OSError, e:
module.fail_json("Failed to remove " + cert_type + ".pem file")
return 0, "Meter " + name + " deleted"
def download_request(module, name, apiid, apikey, cert_type):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is not None:
action = "certificates"
response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
if info['status'] != 200:
module.fail_json("Failed to connect to api host to download certificate")
if result:
try:
cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
body = response.read()
cert_file = open(cert_file_path, 'w')
cert_file.write(body)
cert_file.close()
os.chmod(cert_file_path, int('0600', 8))
except:
module.fail_json("Could not write to certificate file")
return True
else:
module.fail_json("Could not get meter id")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=False),
apikey=dict(required=True),
apiid=dict(required=True),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name= module.params['name']
apikey = module.params['api_key']
apiid = module.params['api_id']
if state == "present":
(rc, result) = create_meter(module, name, apiid, apikey)
if state == "absent":
(rc, result) = delete_meter(module, name, apiid, apikey)
if rc != 0:
module.fail_json(msg=result)
module.exit_json(status=result,changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
amitaekbote/dcos | gen/tests/utils.py | 3 | 1714 | """
Utilities for tests for ``gen``.
"""
import copy
import json
import pkg_resources
import gen
true_false_msg = "Must be one of 'true', 'false'. Got 'foo'."
def make_arguments(new_arguments):
"""
Fields with default values should not be added in here so that the
default values are also tested.
"""
arguments = copy.deepcopy({
'ip_detect_filename': pkg_resources.resource_filename('gen', 'ip-detect/aws.sh'),
'ip6_detect_filename': pkg_resources.resource_filename('gen', 'ip-detect/aws6.sh'),
'bootstrap_id': '123',
'package_ids': json.dumps(['package--version']),
'exhibitor_zk_path': '/dcos',
'master_discovery': 'static',
'platform': 'aws',
'provider': 'onprem',
'exhibitor_zk_hosts': '52.37.205.237:2181',
'resolvers': '["8.8.8.8", "8.8.4.4"]',
'master_list': '["52.37.192.49", "52.37.181.230", "52.37.163.105"]',
'exhibitor_storage_backend': 'zookeeper',
'bootstrap_url': 'file:///opt/dcos_install_tmp',
'cluster_name': 'Mesosphere: The Data Center Operating System',
'bootstrap_variant': '',
'oauth_available': 'true',
'oauth_enabled': 'true',
'enable_docker_gc': 'false'})
arguments.update(new_arguments)
return arguments
def validate_error(new_arguments, key, message, unset=None):
assert gen.validate(arguments=make_arguments(new_arguments)) == {
'status': 'errors',
'errors': {key: {'message': message}},
'unset': set() if unset is None else unset,
}
def validate_success(new_arguments):
assert gen.validate(arguments=make_arguments(new_arguments)) == {
'status': 'ok',
}
| apache-2.0 |
tiagofrepereira2012/tensorflow | tensorflow/python/kernel_tests/matrix_inverse_op_test.py | 20 | 4503 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class InverseOpTest(test.TestCase):
def _verifyInverse(self, x, np_type):
for adjoint in False, True:
y = x.astype(np_type)
with self.test_session(use_gpu=True):
# Verify that x^{-1} * x == Identity matrix.
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
tf_ans = math_ops.matmul(inv, y, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(y, tf_ans)
def _verifyInverseReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyInverse(x, np_type)
def _verifyInverseComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyInverse(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0), np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
def testNonSquareMatrix(self):
# When the inverse of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
# The input to the inverse should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("Input is not invertible."):
# All rows of the matrix below add to zero.
tensor3 = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_inverse(tensor3).eval()
def testEmpty(self):
self._verifyInverseReal(np.empty([0, 2, 2]))
self._verifyInverseReal(np.empty([2, 0, 0]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
kevin-intel/scikit-learn | examples/multioutput/plot_classifier_chain_yeast.py | 23 | 4637 | """
============================
Classifier Chain
============================
Example of using classifier chain on a multilabel dataset.
For this example we will use the `yeast
<https://www.openml.org/d/40597>`_ dataset which contains
2417 datapoints each with 103 features and 14 possible labels. Each
data point has at least one label. As a baseline we first train a logistic
regression classifier for each of the 14 labels. To evaluate the performance of
these classifiers we predict on a held-out test set and calculate the
:ref:`jaccard score <jaccard_similarity_score>` for each sample.
Next we create 10 classifier chains. Each classifier chain contains a
logistic regression model for each of the 14 labels. The models in each
chain are ordered randomly. In addition to the 103 features in the dataset,
each model gets the predictions of the preceding models in the chain as
features (note that by default at training time each model gets the true
labels as features). These additional features allow each chain to exploit
correlations among the classes. The Jaccard similarity score for each chain
tends to be greater than that of the set independent logistic models.
Because the models in each chain are arranged randomly there is significant
variation in performance among the chains. Presumably there is an optimal
ordering of the classes in a chain that will yield the best performance.
However we do not know that ordering a priori. Instead we can construct an
voting ensemble of classifier chains by averaging the binary predictions of
the chains and apply a threshold of 0.5. The Jaccard similarity score of the
ensemble is greater than that of the independent models and tends to exceed
the score of each chain in the ensemble (although this is not guaranteed
with randomly ordered chains).
"""
# Author: Adam Kleczewski
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.multioutput import ClassifierChain
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import jaccard_score
from sklearn.linear_model import LogisticRegression
print(__doc__)
# Load a multi-label dataset from https://www.openml.org/d/40597
X, Y = fetch_openml('yeast', version=4, return_X_y=True)
Y = Y == 'TRUE'
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,
random_state=0)
# Fit an independent logistic regression model for each class using the
# OneVsRestClassifier wrapper.
base_lr = LogisticRegression()
ovr = OneVsRestClassifier(base_lr)
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
ovr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average='samples')
# Fit an ensemble of logistic regression classifier chains and take the
# take the average prediction of all the chains.
chains = [ClassifierChain(base_lr, order='random', random_state=i)
for i in range(10)]
for chain in chains:
chain.fit(X_train, Y_train)
Y_pred_chains = np.array([chain.predict(X_test) for chain in
chains])
chain_jaccard_scores = [jaccard_score(Y_test, Y_pred_chain >= .5,
average='samples')
for Y_pred_chain in Y_pred_chains]
Y_pred_ensemble = Y_pred_chains.mean(axis=0)
ensemble_jaccard_score = jaccard_score(Y_test,
Y_pred_ensemble >= .5,
average='samples')
model_scores = [ovr_jaccard_score] + chain_jaccard_scores
model_scores.append(ensemble_jaccard_score)
model_names = ('Independent',
'Chain 1',
'Chain 2',
'Chain 3',
'Chain 4',
'Chain 5',
'Chain 6',
'Chain 7',
'Chain 8',
'Chain 9',
'Chain 10',
'Ensemble')
x_pos = np.arange(len(model_names))
# Plot the Jaccard similarity scores for the independent model, each of the
# chains, and the ensemble (note that the vertical axis on this plot does
# not begin at 0).
fig, ax = plt.subplots(figsize=(7, 4))
ax.grid(True)
ax.set_title('Classifier Chain Ensemble Performance Comparison')
ax.set_xticks(x_pos)
ax.set_xticklabels(model_names, rotation='vertical')
ax.set_ylabel('Jaccard Similarity Score')
ax.set_ylim([min(model_scores) * .9, max(model_scores) * 1.1])
colors = ['r'] + ['b'] * len(chain_jaccard_scores) + ['g']
ax.bar(x_pos, model_scores, alpha=0.5, color=colors)
plt.tight_layout()
plt.show()
| bsd-3-clause |
joebowen/movement_validation_cloud | djangodev/lib/python2.7/site-packages/boto/rds/regioninfo.py | 167 | 1513 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class RDSRegionInfo(RegionInfo):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
from boto.rds import RDSConnection
super(RDSRegionInfo, self).__init__(connection, name, endpoint,
RDSConnection)
| mit |
tartavull/google-cloud-python | speech/google/cloud/speech/alternative.py | 2 | 2466 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation of Speech Alternative for the Google Speech API."""
class Alternative(object):
"""Representation of Speech Alternative.
:type transcript: str
:param transcript: String of transcribed data.
:type confidence: float
:param confidence: The confidence estimate between 0.0 and 1.0.
"""
def __init__(self, transcript, confidence):
self._transcript = transcript
self._confidence = confidence
@classmethod
def from_api_repr(cls, alternative):
"""Factory: construct ``Alternative`` from JSON response.
:type alternative: dict
:param alternative: Dictionary response from the REST API.
:rtype: :class:`Alternative`
:returns: Instance of ``Alternative``.
"""
return cls(alternative['transcript'], alternative.get('confidence'))
@classmethod
def from_pb(cls, alternative):
"""Factory: construct ``Alternative`` from protobuf response.
:type alternative:
:class:`google.cloud.speech.v1.SpeechRecognitionAlternative`
:param alternative: Instance of ``SpeechRecognitionAlternative``
from protobuf.
:rtype: :class:`Alternative`
:returns: Instance of ``Alternative``.
"""
confidence = alternative.confidence
if confidence == 0.0: # In the protobof 0.0 means unset.
confidence = None
return cls(alternative.transcript, confidence)
@property
def transcript(self):
"""Transcript text from audio.
:rtype: str
:returns: Text detected in audio.
"""
return self._transcript
@property
def confidence(self):
"""Confidence score for recognized speech.
:rtype: float
:returns: Confidence score of recognized speech [0-1].
"""
return self._confidence
| apache-2.0 |
glennrub/micropython | examples/rp2/pio_pinchange.py | 7 | 1199 | # Example using PIO to wait for a pin change and raise an IRQ.
#
# Demonstrates:
# - PIO wrapping
# - PIO wait instruction, waiting on an input pin
# - PIO irq instruction, in blocking mode with relative IRQ number
# - setting the in_base pin for a StateMachine
# - setting an irq handler for a StateMachine
# - instantiating 2x StateMachine's with the same program and different pins
import time
from machine import Pin
import rp2
@rp2.asm_pio()
def wait_pin_low():
wrap_target()
wait(0, pin, 0)
irq(block, rel(0))
wait(1, pin, 0)
wrap()
def handler(sm):
# Print a (wrapping) timestamp, and the state machine object.
print(time.ticks_ms(), sm)
# Instantiate StateMachine(0) with wait_pin_low program on Pin(16).
pin16 = Pin(16, Pin.IN, Pin.PULL_UP)
sm0 = rp2.StateMachine(0, wait_pin_low, in_base=pin16)
sm0.irq(handler)
# Instantiate StateMachine(1) with wait_pin_low program on Pin(17).
pin17 = Pin(17, Pin.IN, Pin.PULL_UP)
sm1 = rp2.StateMachine(1, wait_pin_low, in_base=pin17)
sm1.irq(handler)
# Start the StateMachine's running.
sm0.active(1)
sm1.active(1)
# Now, when Pin(16) or Pin(17) is pulled low a message will be printed to the REPL.
| mit |
sauliusl/scipy | scipy/stats/tests/test_multivariate.py | 35 | 30527 | """
Test functions for multivariate normal distributions.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_almost_equal, assert_equal,
assert_raises, run_module_suite, TestCase)
from test_continuous_basic import check_distribution_rvs
import numpy
import numpy as np
import scipy.linalg
from scipy.stats._multivariate import _PSD, _lnB
from scipy.stats import multivariate_normal
from scipy.stats import dirichlet, beta
from scipy.stats import wishart, invwishart, chi2, invgamma
from scipy.stats import norm
from scipy.integrate import romb
from common_tests import check_random_state_property
class TestMultivariateNormal(TestCase):
def test_input_shape(self):
mu = np.arange(3)
cov = np.identity(2)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
def test_scalar_values(self):
np.random.seed(1234)
# When evaluated on scalar data, the pdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on a single vector, the pdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
def test_logpdf(self):
# Check that the log of the pdf is in fact the logpdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logpdf(x, mean, cov)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_rank(self):
# Check that the rank is detected correctly.
np.random.seed(1234)
n = 4
mean = np.random.randn(n)
for expected_rank in range(1, n + 1):
s = np.random.randn(n, expected_rank)
cov = np.dot(s, s.T)
distn = multivariate_normal(mean, cov, allow_singular=True)
assert_equal(distn.cov_info.rank, expected_rank)
def test_degenerate_distributions(self):
def _sample_orthonormal_matrix(n):
M = np.random.randn(n, n)
u, s, v = scipy.linalg.svd(M)
return u
for n in range(1, 5):
x = np.random.randn(n)
for k in range(1, n + 1):
# Sample a small covariance matrix.
s = np.random.randn(k, k)
cov_kk = np.dot(s, s.T)
# Embed the small covariance matrix into a larger low rank matrix.
cov_nn = np.zeros((n, n))
cov_nn[:k, :k] = cov_kk
# Define a rotation of the larger low rank matrix.
u = _sample_orthonormal_matrix(n)
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
y = np.dot(u, x)
# Check some identities.
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
allow_singular=True)
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
allow_singular=True)
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
allow_singular=True)
assert_equal(distn_kk.cov_info.rank, k)
assert_equal(distn_nn.cov_info.rank, k)
assert_equal(distn_rr.cov_info.rank, k)
pdf_kk = distn_kk.pdf(x[:k])
pdf_nn = distn_nn.pdf(x)
pdf_rr = distn_rr.pdf(y)
assert_allclose(pdf_kk, pdf_nn)
assert_allclose(pdf_kk, pdf_rr)
logpdf_kk = distn_kk.logpdf(x[:k])
logpdf_nn = distn_nn.logpdf(x)
logpdf_rr = distn_rr.logpdf(y)
assert_allclose(logpdf_kk, logpdf_nn)
assert_allclose(logpdf_kk, logpdf_rr)
def test_large_pseudo_determinant(self):
# Check that large pseudo-determinants are handled appropriately.
# Construct a singular diagonal covariance matrix
# whose pseudo determinant overflows double precision.
large_total_log = 1000.0
npos = 100
nzero = 2
large_entry = np.exp(large_total_log / npos)
n = npos + nzero
cov = np.zeros((n, n), dtype=float)
np.fill_diagonal(cov, large_entry)
cov[-nzero:, -nzero:] = 0
# Check some determinants.
assert_equal(scipy.linalg.det(cov), 0)
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
(1, large_total_log))
# Check the pseudo-determinant.
psd = _PSD(cov)
assert_allclose(psd.log_pdet, large_total_log)
def test_broadcasting(self):
np.random.seed(1234)
n = 4
# Construct a random covariance matrix.
data = np.random.randn(n, n)
cov = np.dot(data, data.T)
mean = np.random.randn(n)
# Construct an ndarray which can be interpreted as
# a 2x3 array whose elements are random data vectors.
X = np.random.randn(2, 3, n)
# Check that multiple data points can be evaluated at once.
for i in range(2):
for j in range(3):
actual = multivariate_normal.pdf(X[i, j], mean, cov)
desired = multivariate_normal.pdf(X, mean, cov)[i, j]
assert_allclose(actual, desired)
def test_normal_1D(self):
# The probability density function for a 1D normal variable should
# agree with the standard normal distribution in scipy.stats.distributions
x = np.linspace(0, 2, 10)
mean, cov = 1.2, 0.9
scale = cov**0.5
d1 = norm.pdf(x, mean, scale)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, d2)
def test_marginalization(self):
# Integrating out one of the variables of a 2D Gaussian should
# yield a 1D Gaussian
mean = np.array([2.5, 3.5])
cov = np.array([[.5, 0.2], [0.2, .6]])
n = 2 ** 8 + 1 # Number of samples
delta = 6 / (n - 1) # Grid spacing
v = np.linspace(0, 6, n)
xv, yv = np.meshgrid(v, v)
pos = np.empty((n, n, 2))
pos[:, :, 0] = xv
pos[:, :, 1] = yv
pdf = multivariate_normal.pdf(pos, mean, cov)
# Marginalize over x and y axis
margin_x = romb(pdf, delta, axis=0)
margin_y = romb(pdf, delta, axis=1)
# Compare with standard normal distribution
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
norm_frozen = multivariate_normal(mean, cov)
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
assert_allclose(norm_frozen.logpdf(x),
multivariate_normal.logpdf(x, mean, cov))
def test_pseudodet_pinv(self):
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
# Assemble random covariance matrix with large and small eigenvalues
np.random.seed(1234)
n = 7
x = np.random.randn(n, n)
cov = np.dot(x, x.T)
s, u = scipy.linalg.eigh(cov)
s = 0.5 * np.ones(n)
s[0] = 1.0
s[-1] = 1e-7
cov = np.dot(u, np.dot(np.diag(s), u.T))
# Set cond so that the lowest eigenvalue is below the cutoff
cond = 1e-5
psd = _PSD(cov, cond=cond)
psd_pinv = _PSD(psd.pinv, cond=cond)
# Check that the log pseudo-determinant agrees with the sum
# of the logs of all but the smallest eigenvalue
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
# Check that the pseudo-determinant of the pseudo-inverse
# agrees with 1 / pseudo-determinant
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
def test_exception_nonsquare_cov(self):
cov = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, _PSD, cov)
def test_exception_nonfinite_cov(self):
cov_nan = [[1, 0], [0, np.nan]]
assert_raises(ValueError, _PSD, cov_nan)
cov_inf = [[1, 0], [0, np.inf]]
assert_raises(ValueError, _PSD, cov_inf)
def test_exception_non_psd_cov(self):
cov = [[1, 0], [0, -1]]
assert_raises(ValueError, _PSD, cov)
def test_exception_singular_cov(self):
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.ones((5, 5))
e = np.linalg.LinAlgError
assert_raises(e, multivariate_normal, mean, cov)
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
def test_R_values(self):
# Compare the multivariate pdf with some values precomputed
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
0.0103803050, 0.0140250800])
x = np.linspace(0, 2, 5)
y = 3 * x - 2
z = x + np.cos(y)
r = np.array([x, y, z]).T
mean = np.array([1, 3, 2], 'd')
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
pdf = multivariate_normal.pdf(r, mean, cov)
assert_allclose(pdf, r_pdf, atol=1e-10)
def test_multivariate_normal_rvs_zero_covariance(self):
mean = np.zeros(2)
covariance = np.zeros((2, 2))
model = multivariate_normal(mean, covariance, allow_singular=True)
sample = model.rvs()
assert_equal(sample, [0, 0])
def test_rvs_shape(self):
# Check that rvs parses the mean and covariance correctly, and returns
# an array of the right shape
N = 300
d = 4
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
assert_equal(sample.shape, (N, d))
sample = multivariate_normal.rvs(mean=None,
cov=np.array([[2, .1], [.1, 1]]),
size=N)
assert_equal(sample.shape, (N, 2))
u = multivariate_normal(mean=0, cov=1)
sample = u.rvs(N)
assert_equal(sample.shape, (N, ))
def test_large_sample(self):
# Generate large sample and compare sample mean and sample covariance
# with mean and covariance matrix.
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
size = 5000
sample = multivariate_normal.rvs(mean, cov, size)
assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
assert_allclose(sample.mean(0), mean, rtol=1e-1)
def test_entropy(self):
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
rv = multivariate_normal(mean, cov)
# Check that frozen distribution agrees with entropy function
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
# Compare entropy with manually computed expression involving
# the sum of the logs of the eigenvalues of the covariance matrix
eigs = np.linalg.eig(cov)[0]
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
assert_almost_equal(desired, rv.entropy())
def test_lnB(self):
alpha = np.array([1, 1, 1])
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
assert_almost_equal(np.exp(_lnB(alpha)), desired)
class TestDirichlet(TestCase):
def test_frozen_dirichlet(self):
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
assert_equal(d.var(), dirichlet.var(alpha))
assert_equal(d.mean(), dirichlet.mean(alpha))
assert_equal(d.entropy(), dirichlet.entropy(alpha))
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
def test_numpy_rvs_shape_compatibility(self):
np.random.seed(2846)
alpha = np.array([1.0, 2.0, 3.0])
x = np.random.dirichlet(alpha, size=7)
assert_equal(x.shape, (7, 3))
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
dirichlet.pdf(x.T, alpha)
dirichlet.pdf(x.T[:-1], alpha)
dirichlet.logpdf(x.T, alpha)
dirichlet.logpdf(x.T[:-1], alpha)
def test_alpha_with_zeros(self):
np.random.seed(2846)
alpha = [1.0, 0.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_with_negative_entries(self):
np.random.seed(2846)
alpha = [1.0, -2.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_zeros(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_negative_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, -0.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_too_large_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 1.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_too_deep_c(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((2, 7, 7)) / 14
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_too_deep(self):
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
x = np.ones((2, 2, 7)) / 4
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_correct_depth(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 3
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
def test_non_simplex_data(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_short(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((2, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_long(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((5, 7)) / 5
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_simple_values(self):
alpha = np.array([1, 1])
d = dirichlet(alpha)
assert_almost_equal(d.mean(), 0.5)
assert_almost_equal(d.var(), 1. / 12.)
b = beta(1, 1)
assert_almost_equal(d.mean(), b.mean())
assert_almost_equal(d.var(), b.var())
def test_K_and_K_minus_1_calls_equal(self):
# Test that calls with K and K-1 entries yield the same results.
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
def test_multiple_entry_calls(self):
# Test that calls with multiple x vectors as matrix work
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
num_multiple = 5
xm = None
for i in range(num_tests):
for m in range(num_multiple):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
if xm is not None:
xm = np.vstack((xm, x))
else:
xm = x
rm = d.pdf(xm.T)
rs = None
for xs in xm:
r = d.pdf(xs)
if rs is not None:
rs = np.append(rs, r)
else:
rs = r
assert_array_almost_equal(rm, rs)
def test_2D_dirichlet_is_beta(self):
np.random.seed(2846)
alpha = np.random.uniform(10e-10, 100, 2)
d = dirichlet(alpha)
b = beta(alpha[0], alpha[1])
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, 2)
x /= np.sum(x)
assert_almost_equal(b.pdf(x), d.pdf([x]))
assert_almost_equal(b.mean(), d.mean()[0])
assert_almost_equal(b.var(), d.var()[0])
def test_multivariate_normal_dimensions_mismatch():
# Regression test for GH #3493. Check that setting up a PDF with a mean of
# length M and a covariance matrix of size (N, N), where M != N, raises a
# ValueError with an informative error message.
mu = np.array([0.0, 0.0])
sigma = np.array([[1.0]])
assert_raises(ValueError, multivariate_normal, mu, sigma)
# A simple check that the right error message was passed along. Checking
# that the entire message is there, word for word, would be somewhat
# fragile, so we just check for the leading part.
try:
multivariate_normal(mu, sigma)
except ValueError as e:
msg = "Dimension mismatch"
assert_equal(str(e)[:len(msg)], msg)
class TestWishart(TestCase):
def test_scale_dimensions(self):
# Test that we can call the Wishart with various scale dimensions
# Test case: dim=1, scale=1
true_scale = np.array(1, ndmin=2)
scales = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2) # 2-dim
]
for scale in scales:
w = wishart(1, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# Test case: dim=2, scale=[[1,0]
# [0,2]
true_scale = np.array([[1,0],
[0,2]])
scales = [
[1,2], # iterable
np.r_[1,2], # 1-dim
np.array([[1,0], # 2-dim
[0,2]])
]
for scale in scales:
w = wishart(2, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# We cannot call with a df < dim
assert_raises(ValueError, wishart, 1, np.eye(2))
# We cannot call with a 3-dimension array
scale = np.array(1, ndmin=3)
assert_raises(ValueError, wishart, 1, scale)
def test_quantile_dimensions(self):
# Test that we can call the Wishart rvs with various quantile dimensions
# If dim == 1, consider x.shape = [1,1,1]
X = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2), # 2-dim
np.array([1], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array(1, ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 1, consider x.shape = [1,1,*]
X = [
[1,2,3], # iterable
np.r_[1,2,3], # 1-dim
np.array([1,2,3], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array([1,2,3], ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 2, consider x.shape = [2,2,1]
# where x[:,:,*] = np.eye(1)*2
X = [
2, # scalar
[2,2], # iterable
np.array(2), # 0-dim
np.r_[2,2], # 1-dim
np.array([[2,0],
[0,2]]), # 2-dim
np.array([[2,0],
[0,2]])[:,:,np.newaxis] # 3-dim
]
w = wishart(2,np.eye(2))
density = w.pdf(np.array([[2,0],
[0,2]])[:,:,np.newaxis])
for x in X:
assert_equal(w.pdf(x), density)
def test_frozen(self):
# Test that the frozen and non-frozen Wishart gives the same answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
w = wishart(df, scale)
assert_equal(w.var(), wishart.var(df, scale))
assert_equal(w.mean(), wishart.mean(df, scale))
assert_equal(w.mode(), wishart.mode(df, scale))
assert_equal(w.entropy(), wishart.entropy(df, scale))
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
def test_1D_is_chisquared(self):
# The 1-dimensional Wishart with an identity scale matrix is just a
# chi-squared distribution.
# Test variance, mean, entropy, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(1, 10, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
w = wishart(df, scale)
c = chi2(df)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
def test_is_scaled_chisquared(self):
# The 2-dimensional Wishart with an arbitrary scale matrix can be
# transformed to a scaled chi-squared distribution.
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
np.random.seed(482974)
sn = 500
df = 10
dim = 4
# Construct an arbitrary positive definite matrix
scale = np.diag(np.arange(4)+1)
scale[np.tril_indices(4, k=-1)] = np.arange(6)
scale = np.dot(scale.T, scale)
# Use :math:`\lambda = [1, \dots, 1]'`
lamda = np.ones((dim,1))
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
w = wishart(df, sigma_lamda)
c = chi2(df, scale=sigma_lamda)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
X = np.linspace(0.1,10,num=10)
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,0,sigma_lamda)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
class TestInvwishart(TestCase):
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
def test_1D_is_invgamma(self):
# The 1-dimensional inverse Wishart with an identity scale matrix is
# just an inverse gamma distribution.
# Test variance, mean, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(5, 20, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
iw = invwishart(df, scale)
ig = invgamma(df/2, scale=1./2)
# Statistics
assert_allclose(iw.var(), ig.var())
assert_allclose(iw.mean(), ig.mean())
# PDF
assert_allclose(iw.pdf(X), ig.pdf(X))
# rvs
rvs = iw.rvs(size=sn)
args = (df/2, 0, 1./2)
alpha = 0.01
check_distribution_rvs('invgamma', args, alpha, rvs)
def test_wishart_invwishart_2D_rvs(self):
dim = 3
df = 10
# Construct a simple non-diagonal positive definite matrix
scale = np.eye(dim)
scale[0,1] = 0.5
scale[1,0] = 0.5
# Construct frozen Wishart and inverse Wishart random variables
w = wishart(df, scale)
iw = invwishart(df, scale)
# Get the generated random variables from a known seed
np.random.seed(248042)
w_rvs = wishart.rvs(df, scale)
np.random.seed(248042)
frozen_w_rvs = w.rvs()
np.random.seed(248042)
iw_rvs = invwishart.rvs(df, scale)
np.random.seed(248042)
frozen_iw_rvs = iw.rvs()
# Manually calculate what it should be, based on the Bartlett (1933)
# decomposition of a Wishart into D A A' D', where D is the Cholesky
# factorization of the scale matrix and A is the lower triangular matrix
# with the square root of chi^2 variates on the diagonal and N(0,1)
# variates in the lower triangle.
np.random.seed(248042)
covariances = np.random.normal(size=3)
variances = np.r_[
np.random.chisquare(df),
np.random.chisquare(df-1),
np.random.chisquare(df-2),
]**0.5
# Construct the lower-triangular A matrix
A = np.diag(variances)
A[np.tril_indices(dim, k=-1)] = covariances
# Wishart random variate
D = np.linalg.cholesky(scale)
DA = D.dot(A)
manual_w_rvs = np.dot(DA, DA.T)
# inverse Wishart random variate
# Supposing that the inverse wishart has scale matrix `scale`, then the
# random variate is the inverse of a random variate drawn from a Wishart
# distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
iD = np.linalg.cholesky(np.linalg.inv(scale))
iDA = iD.dot(A)
manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
# Test for equality
assert_allclose(w_rvs, manual_w_rvs)
assert_allclose(frozen_w_rvs, manual_w_rvs)
assert_allclose(iw_rvs, manual_iw_rvs)
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
def test_random_state_property():
scale = np.eye(3)
scale[0,1] = 0.5
scale[1,0] = 0.5
dists = [
[multivariate_normal, ()],
[dirichlet, (np.array([1.]), )],
[wishart, (10, scale)],
[invwishart, (10, scale)]
]
for distfn, args in dists:
check_random_state_property(distfn, args)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
adamtiger/tensorflow | tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py | 49 | 18431 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.resnet_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return array_ops.placeholder(dtypes.float32,
(batch_size, height, width, channels))
else:
return math_ops.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) + np.reshape(
np.arange(width), [1, width]), [1, height, width, 1]),
[batch_size, 1, 1, channels]))
class ResnetUtilsTest(test.TestCase):
def testSubsampleThreeByThree(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testSubsampleFourByFour(self):
x = array_ops.reshape(math_ops.to_float(math_ops.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = array_ops.reshape(
constant_op.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 26], [28, 48, 66, 37],
[43, 66, 84, 46], [26, 37, 46, 22]])
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43], [43, 84]])
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = math_ops.to_float([[48, 37], [37, 22]])
y4_expected = array_ops.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def testConv2DSameOdd(self):
n, n2 = 5, 3
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = array_ops.reshape(w, [3, 3, 1, 1])
variable_scope.get_variable('Conv/weights', initializer=w)
variable_scope.get_variable('Conv/biases', initializer=array_ops.zeros([1]))
variable_scope.get_variable_scope().reuse_variables()
y1 = layers.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = math_ops.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46],
[43, 66, 84, 102, 55],
[58, 84, 102, 120, 64],
[34, 46, 55, 64, 30]])
y1_expected = array_ops.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = math_ops.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]])
y2_expected = array_ops.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = layers.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = y2_expected
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None):
"""A plain ResNet without extra layers before or after the ResNet blocks."""
with variable_scope.variable_scope(scope, values=[inputs]):
with arg_scope([layers.conv2d], outputs_collections='end_points'):
net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride)
end_points = utils.convert_collection_to_dict('end_points')
return net, end_points
def testEndPointsV1(self):
"""Test the end points of a tiny v1 bottleneck network."""
blocks = [
resnet_v1.resnet_v1_block(
'block1', base_depth=1, num_units=2, stride=2),
resnet_v1.resnet_v1_block(
'block2', base_depth=2, num_units=2, stride=1),
]
inputs = create_test_input(2, 32, 16, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_plain(inputs, blocks, scope='tiny')
expected = [
'tiny/block1/unit_1/bottleneck_v1/shortcut',
'tiny/block1/unit_1/bottleneck_v1/conv1',
'tiny/block1/unit_1/bottleneck_v1/conv2',
'tiny/block1/unit_1/bottleneck_v1/conv3',
'tiny/block1/unit_2/bottleneck_v1/conv1',
'tiny/block1/unit_2/bottleneck_v1/conv2',
'tiny/block1/unit_2/bottleneck_v1/conv3',
'tiny/block2/unit_1/bottleneck_v1/shortcut',
'tiny/block2/unit_1/bottleneck_v1/conv1',
'tiny/block2/unit_1/bottleneck_v1/conv2',
'tiny/block2/unit_1/bottleneck_v1/conv3',
'tiny/block2/unit_2/bottleneck_v1/conv1',
'tiny/block2/unit_2/bottleneck_v1/conv2',
'tiny/block2/unit_2/bottleneck_v1/conv3']
self.assertItemsEqual(expected, end_points)
def _stack_blocks_nondense(self, net, blocks):
"""A simplified ResNet Block stacker without output stride control."""
for block in blocks:
with variable_scope.variable_scope(block.scope, 'block', [net]):
for i, unit in enumerate(block.args):
with variable_scope.variable_scope('unit_%d' % (i + 1), values=[net]):
net = block.unit_fn(net, rate=1, **unit)
return net
def testAtrousValuesBottleneck(self):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with arg_scope(resnet_utils.resnet_arg_scope()):
with arg_scope([layers.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs, blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(variables.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
class ResnetCompleteNetworkTest(test.TestCase):
"""Tests with complete small ResNet v1 networks."""
def _resnet_small(self,
inputs,
num_classes=None,
is_training=None,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope='resnet_v1_small'):
"""A shallow and thin ResNet v1 for faster tests."""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=3, stride=2),
block('block2', base_depth=2, num_units=3, stride=2),
block('block3', base_depth=4, num_units=3, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
return resnet_v1.resnet_v1(inputs, blocks, num_classes, is_training,
global_pool, output_stride, include_root_block,
reuse, scope)
def testClassificationEndPoints(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(), [2, 1, 1, num_classes])
self.assertTrue('predictions' in end_points)
self.assertListEqual(end_points['predictions'].get_shape().as_list(),
[2, 1, 1, num_classes])
def testClassificationShapes(self):
global_pool = True
num_classes = 10
inputs = create_test_input(2, 224, 224, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 28, 28, 4],
'resnet/block2': [2, 14, 14, 8],
'resnet/block3': [2, 7, 7, 16],
'resnet/block4': [2, 7, 7, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 21, 21, 8],
'resnet/block3': [2, 11, 11, 16],
'resnet/block4': [2, 11, 11, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testRootlessFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
inputs = create_test_input(2, 128, 128, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
include_root_block=False,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 64, 64, 4],
'resnet/block2': [2, 32, 32, 8],
'resnet/block3': [2, 16, 16, 16],
'resnet/block4': [2, 16, 16, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalEndpointShapes(self):
global_pool = False
num_classes = 10
output_stride = 8
inputs = create_test_input(2, 321, 321, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
_, end_points = self._resnet_small(
inputs,
num_classes,
global_pool=global_pool,
output_stride=output_stride,
scope='resnet')
endpoint_to_shape = {
'resnet/block1': [2, 41, 41, 4],
'resnet/block2': [2, 41, 41, 8],
'resnet/block3': [2, 41, 41, 16],
'resnet/block4': [2, 41, 41, 32]
}
for endpoint in endpoint_to_shape:
shape = endpoint_to_shape[endpoint]
self.assertListEqual(end_points[endpoint].get_shape().as_list(), shape)
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with arg_scope(resnet_utils.resnet_arg_scope()):
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(
inputs,
None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(
inputs, None, is_training=False, global_pool=False)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
def testUnknownBatchSize(self):
batch = 2
height, width = 65, 65
global_pool = True
num_classes = 10
inputs = create_test_input(None, height, width, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
logits, _ = self._resnet_small(
inputs, num_classes, global_pool=global_pool, scope='resnet')
self.assertTrue(logits.op.name.startswith('resnet/logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, 1, 1, num_classes])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 1, 1, num_classes))
def testFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(inputs, None, global_pool=global_pool)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 3, 3, 32))
def testAtrousFullyConvolutionalUnknownHeightWidth(self):
batch = 2
height, width = 65, 65
global_pool = False
output_stride = 8
inputs = create_test_input(batch, None, None, 3)
with arg_scope(resnet_utils.resnet_arg_scope()):
output, _ = self._resnet_small(
inputs, None, global_pool=global_pool, output_stride=output_stride)
self.assertListEqual(output.get_shape().as_list(), [batch, None, None, 32])
images = create_test_input(batch, height, width, 3)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(output, {inputs: images.eval()})
self.assertEqual(output.shape, (batch, 9, 9, 32))
if __name__ == '__main__':
test.main()
| apache-2.0 |
3nids/QGIS | python/plugins/processing/algs/grass7/ext/r_shade.py | 45 | 1527 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_shade.py
----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def processInputs(alg, parameters, context, feedback):
# We need to import all the bands and color tables of the input rasters
alg.loadRasterLayerFromParameter('shade', parameters, context,
False, None)
alg.loadRasterLayerFromParameter('color', parameters, context,
False, None)
def processOutputs(alg, parameters, context, feedback):
# Keep color table
alg.exportRasterLayerFromParameter('output', parameters, context, True)
| gpl-2.0 |
chylli/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/png_unittest.py | 124 | 5663 | # Copyright (C) 2012 Balazs Ankes ([email protected]) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for png.py."""
import unittest2 as unittest
from png import PNGChecker
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost_mock import MockSystemHost
class MockSCMDetector(object):
def __init__(self, scm, prop=None):
self._scm = scm
self._prop = prop
def display_name(self):
return self._scm
def propget(self, pname, path):
return self._prop
class PNGCheckerTest(unittest.TestCase):
"""Tests PNGChecker class."""
def test_init(self):
"""Test __init__() method."""
def mock_handle_style_error(self):
pass
checker = PNGChecker("test/config", mock_handle_style_error, MockSCMDetector('git'), MockSystemHost())
self.assertEqual(checker._file_path, "test/config")
self.assertEqual(checker._handle_style_error, mock_handle_style_error)
def test_check(self):
errors = []
def mock_handle_style_error(line_number, category, confidence, message):
error = (line_number, category, confidence, message)
errors.append(error)
file_path = ''
fs = MockFileSystem()
scm = MockSCMDetector('svn')
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0],
(0, 'image/png', 5, 'Set the svn:mime-type property (svn propset svn:mime-type image/png ).'))
files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n#enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes\nenable-auto-props = yes\n*.png = svn:mime-type=image/png'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 0)
files = {'/Users/mock/.subversion/config': 'enable-auto-props = no'}
fs = MockFileSystem(files)
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
file_path = "foo.png"
fs.write_binary_file(file_path, "Dummy binary data")
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 1)
file_path = "foo-expected.png"
fs.write_binary_file(file_path, "Dummy binary data")
scm = MockSCMDetector('git')
errors = []
checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
checker.check()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))
| bsd-3-clause |
MicBrain/Tic_Tac_Toe | Tic_Tac_Toe.py | 1 | 8653 | ###################
### DESCRIPTION ###
###################
"""
Tic-tac-toe (or Noughts and crosses, Xs and Os) is a game for two players, X and O, who take
turns marking the spaces in a 3×3 grid. The player who succeeds in placing three respective marks
in a horizontal, vertical, or diagonal row wins the game.
The simplicity of Tic-tac-toe makes it ideal as a pedagogical tool for teaching the concepts
of good sportsmanship and the branch of artificial intelligence that deals with the searching of
game trees. It is straightforward to write a computer program to play Tic-tac-toe perfectly.
The game can be generalized to an m,n,k-game in which two players alternate placing stones of
their own color on an m×n board, with the goal of getting k of their own color in a row. Tic-tac-toe
is the (3,3,3)-game.
Despite its apparent simplicity, Tic-tac-toe requires detailed analysis to determine even some
elementary combinatory facts, the most interesting of which are the number of possible games and the
number of possible positions. A position is merely a state of the board, while a game usually refers
to the way a terminal position is obtained.
"""
from string import *
from random import *
import itertools
import math
####################
## MAIN VARIABLES ##
####################
Player_1 = 'x' # player 1's mark
Player_2 = 'o' # player 2's mark
A = 'A' # these just make it easier to keep referring to 'A', 'B' and 'C'
B = 'B'
C = 'C'
#####################
## State variables ##
#####################
EMPTY = ' '
Table = [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
current = randint(1, 2)
#########################
### Coordinate system ###
#########################
def square(row, col): # squares are represented as tuples of (row, col).
return (row, col) # rows are numbered 1 thru 3, cols 'A' thru 'C'.
def square_row(square): # these two functions save us the hassle of using
return square[0] # index values in our code, e.g. square[0]...
def square_col(square): # from this point on, i should never directly use
return square[1] # tuples when working with squares.
def get_square(square):
row_i = square_row(square) - 1
col_i = ord(square_col(square)) - ord(A)
return Table[row_i][col_i] # note how this and set_square are the ONLY
# functions which directly use board!
def set_square(square, mark):
row_i = square_row(square) - 1
col_i = ord(square_col(square)) - ord(A)
Table[row_i][col_i] = mark # note how this and get_square are the ONLY
def get_row(row):
return [get_square((row, A)), get_square((row, B)), get_square((row, C))]
def get_column(col):
return [get_square((1, col)), get_square((2, col)), get_square((3, col))]
def get_diagonal(corner_square):
if corner_square == (1, A) or corner_square == (3, C):
return [get_square((1, A)), get_square((2, B)), get_square((3, C))]
else:
return [get_square((1, C)), get_square((2, B)), get_square((3, A))]
def get_mark(player):
if player == 1:
return Player_1
else:
return Player_2
def all_squares_filled():
for row in range(1, 4): # range(1, 4) returns the list [1, 2, 3]
if EMPTY in get_row(row):
return False # this row contains an empty square, we know enough
return True # no empty squares found, all squares are filled
def player_has_won(player):
MARK = get_mark(player)
win = [MARK, MARK, MARK]
if get_row(1) == win or get_row(2) == win or get_row(3) == win:
return True
if get_column(A) == win or get_column(B) == win or get_column(C) == win:
return True
if get_diagonal((1, A)) == win or get_diagonal((1, C)) == win:
return True
return False
def draw_board_straight():
A1, A2, A3 = get_square((1, A)), get_square((2, A)), get_square((3, A))
B1, B2, B3 = get_square((1, B)), get_square((2, B)), get_square((3, B))
C1, C2, C3 = get_square((1, C)), get_square((2, C)), get_square((3, C))
lines = []
lines.append("")
lines.append(" " + A + " " + B + " " + C + " ")
lines.append(" ")
lines.append("1 " + A1 + " | " + B1 + " | " + C1 + " ")
lines.append(" ---+---+---")
lines.append("2 " + A2 + " | " + B2 + " | " + C2 + " ")
lines.append(" ---+---+---")
lines.append("3 " + A3 + " | " + B3 + " | " + C3 + " ")
lines.append("")
return str.join(str(lines), '\n') # the '\n' represents a newline
def draw_board_slanted():
A1, A2, A3 = get_square((1, A)), get_square((2, A)), get_square((3, A))
B1, B2, B3 = get_square((1, B)), get_square((2, B)), get_square((3, B))
C1, C2, C3 = get_square((1, C)), get_square((2, C)), get_square((3, C))
lines = []
lines.append("")
lines.append(" " + A + " " + B + " " + C + " ")
lines.append(" ")
lines.append(" 1 " + A1 + " / " + B1 + " / " + C1 + " ")
lines.append(" ---/---/--- ")
lines.append(" 2 " + A2 + " / " + B2 + " / " + C2 + " ")
lines.append(" ---/---/--- ")
lines.append("3 " + A3 + " / " + B3 + " / " + C3 + " ")
lines.append("")
return str.join(str(lines), '\n')
def draw_board():
return draw_board_slanted()
def reset_main_board():
for row in (1, 2, 3):
for col in (A, B, C):
set_square(square(row, col), EMPTY)
def play():
global current
reset_main_board()
current = randint(1, 2)
print ("Tic-Tac-Toe!")
print
player1_name = input("Player 1, what is your name? ")
player2_name = input("Player 2, what is your name? ")
def get_name(player):
if player == 1:
return player1_name
else:
return player2_name
print
print ("Welcome,", player1_name, "and", player2_name + "!")
print (player1_name, "will be", Player_1 + ", and", player2_name, "will be", Player_2 + ".")
print ("By random decision,", get_name(current), "will go first.")
print
input("[Press enter when ready to play.] ") # just waiting for them to press enter
print (draw_board())
while not all_squares_filled():
choice = input(get_name(current) + ", which square? (e.g. 2B, 2b, B2 or b2) ")
if len(choice) != 2:
print ("That's not a square. You must enter a square like b2, or 3C.")
print
continue
if choice[0] not in ["1", "2", "3"] and str.upper(choice[0]) not in [A, B, C]:
print ("The first character must be a row (1, 2 or 3) or column (A, B or C).")
print
continue
if choice[1] not in ["1", "2", "3"] and str.upper(choice[1]) not in [A, B, C]:
print ("The second character must be a row (1, 2 or 3) or column (A, B or C).")
print
continue
if choice[0] in ["1", "2", "3"] and choice[1] in ["1", "2", "3"]:
print ("You entered two rows! You must enter one row and one column (A, B or C).")
print
continue
if str.upper(choice[0]) in [A, B, C] and str.upper(choice[1]) in [A, B, C]:
print ("You entered two columns! You must enter one row (1, 2 or 3) and one column.")
print
continue
if choice[0] in ["1", "2", "3"]:
row = int(choice[0])
col = str.upper(choice[1])
else:
row = int(choice[1])
col = str.upper(choice[0])
choice = square(row, col) # make this into a (row, col) tuple
if get_square(choice) != EMPTY:
print ("Sorry, that square is already marked.")
print
continue
set_square(choice, get_mark(current))
print (draw_board())
if player_has_won(current):
print ("Congratulations", get_name(current), "-- you win!")
print
break
if all_squares_filled():
print ("Cats game!", player1_name, "and", player2_name, "draw.")
print
break
current = 3 - current # sets 1 to 2 and 2 to 1
print ("GAME IS OVER")
print
if __name__ == "__main__":
continue_playing = True
while continue_playing:
play()
again = str.lower(input("Play again? (y/n) "))
print
print
print
if again != "y":
continue_playing = False
print ("Thanks for playing!")
print
| gpl-3.0 |
osu-cass/whats-fresh-api | whats_fresh/whats_fresh_api/tests/views/entry/test_new_image.py | 2 | 3620 | from django.test import TestCase
from django.core.urlresolvers import reverse
from whats_fresh.whats_fresh_api.models import Image
from django.contrib.auth.models import User, Group
import os
class NewImageTestCase(TestCase):
"""
Test that the New Image page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the creation of a new
object with the specified details
POSTing data with all fields missing (hitting "save" without entering
data) returns the same field with notations of missing fields
"""
def setUp(self):
user = User.objects.create_user(
'temporary', '[email protected]', 'temporary')
user.save()
admin_group = Group(name='Administration Users')
admin_group.save()
user.groups.add(admin_group)
response = self.client.login(
username='temporary', password='temporary')
self.assertEqual(response, True)
self.test_media_directory = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', 'testdata', 'media'))
self.image = open(
os.path.join(self.test_media_directory, 'cat.jpg'), 'r')
def tearDown(self):
self.image.close()
def test_not_logged_in(self):
self.client.logout()
response = self.client.get(
reverse('new-image'))
self.assertRedirects(response, '/login?next=/entry/images/new')
def test_url_endpoint(self):
url = reverse('new-image')
self.assertEqual(url, '/entry/images/new')
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(reverse('new-image'))
fields = {'image': 'file', 'caption': 'input', 'name': 'input'}
form = response.context['image_form']
for field in fields:
# for the Edit tests, you should be able to access
# form[field].value
self.assertIn(fields[field], str(form[field]))
def test_successful_image_creation(self):
"""
POST a proper "new image" command to the server, and see if the
new image appears in the database. All optional fields are null.
"""
Image.objects.all().delete()
# Data that we'll post to the server to get the new image created
new_image = {
'caption': "Catption",
'name': "A cat",
'image': self.image}
self.client.post(reverse('new-image'), new_image)
image = Image.objects.all()[0]
self.assertEqual(getattr(image, 'caption'), new_image['caption'])
self.assertEqual(getattr(image, 'name'), new_image['name'])
self.assertIn('/media/images/cat', getattr(image, 'image').url)
def test_no_data_error(self):
"""
POST a "new image" command to the server missing all of the
required fields, and test to see what the error comes back as.
"""
# Create a list of all objects before sending bad POST data
all_images = Image.objects.all()
response = self.client.post(reverse('new-image'))
required_fields = ['image', 'name']
for field_name in required_fields:
self.assertIn(field_name,
response.context['image_form'].errors)
# Test that we didn't add any new objects
self.assertEqual(
list(Image.objects.all()), list(all_images))
| apache-2.0 |
danakj/chromium | chrome/test/data/nacl/debug_stub_browser_tests.py | 42 | 3536 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import sys
import xml.etree.ElementTree
import gdb_rsp
def AssertRaises(exc_class, func):
try:
func()
except exc_class:
pass
else:
raise AssertionError('Function did not raise %r' % exc_class)
def GetTargetArch(connection):
"""Get the CPU architecture of the NaCl application."""
reply = connection.RspRequest('qXfer:features:read:target.xml:0,fff')
assert reply[0] == 'l', reply
tree = xml.etree.ElementTree.fromstring(reply[1:])
arch_tag = tree.find('architecture')
assert arch_tag is not None, reply
return arch_tag.text.strip()
def ReverseBytes(byte_string):
"""Reverse bytes in the hex string: '09ab' -> 'ab09'. This converts
little-endian number in the hex string to its normal string representation.
"""
assert len(byte_string) % 2 == 0, byte_string
return ''.join([byte_string[i - 2 : i]
for i in xrange(len(byte_string), 0, -2)])
def GetProgCtrString(connection, arch):
"""Get current execution point."""
registers = connection.RspRequest('g')
# PC register indices can be found in
# native_client/src/trusted/debug_stub/abi.cc in AbiInit function.
if arch == 'i386':
# eip index is 8
return ReverseBytes(registers[8 * 8 : 8 * 8 + 8])
if arch == 'i386:x86-64':
# rip index is 16
return ReverseBytes(registers[16 * 16 : 16 * 16 + 8])
if arch == 'iwmmxt':
# pc index is 15
return ReverseBytes(registers[15 * 8 : 15 * 8 + 8])
raise AssertionError('Unknown architecture: %s' % arch)
def TestContinue(connection):
# Once the NaCl test module reports that the test passed, the NaCl <embed>
# element is removed from the page. The NaCl module will be killed by the
# browser which will appear as EOF (end-of-file) on the debug stub socket.
AssertRaises(gdb_rsp.EofOnReplyException,
lambda: connection.RspRequest('vCont;c'))
def TestBreakpoint(connection):
# Breakpoints and single-stepping might interfere with Chrome sandbox. So we
# check that they work properly in this test.
arch = GetTargetArch(connection)
registers = connection.RspRequest('g')
pc = GetProgCtrString(connection, arch)
# Set breakpoint
result = connection.RspRequest('Z0,%s,1' % pc)
assert result == 'OK', result
# Check that we stopped at breakpoint
result = connection.RspRequest('vCont;c')
stop_reply = re.compile(r'T05thread:(\d+);')
assert stop_reply.match(result), result
thread = stop_reply.match(result).group(1)
# Check that registers haven't changed
result = connection.RspRequest('g')
assert result == registers, (result, registers)
# Remove breakpoint
result = connection.RspRequest('z0,%s,1' % pc)
assert result == 'OK', result
# Check single stepping
result = connection.RspRequest('vCont;s:%s' % thread)
assert result == 'T05thread:%s;' % thread, result
assert pc != GetProgCtrString(connection, arch)
# Check that we terminate normally
AssertRaises(gdb_rsp.EofOnReplyException,
lambda: connection.RspRequest('vCont;c'))
def Main(args):
port = int(args[0])
name = args[1]
connection = gdb_rsp.GdbRspConnection(('localhost', port))
if name == 'continue':
TestContinue(connection)
elif name == 'breakpoint':
TestBreakpoint(connection)
else:
raise AssertionError('Unknown test name: %r' % name)
if __name__ == '__main__':
Main(sys.argv[1:])
| bsd-3-clause |
jpetto/bedrock | bedrock/firefox/helpers.py | 1 | 8778 | from collections import OrderedDict
from django.core.cache import cache
from django.conf import settings
import jingo
import jinja2
from bedrock.firefox.models import FirefoxOSFeedLink
from bedrock.firefox.firefox_details import firefox_desktop, firefox_android, firefox_ios
from bedrock.base.urlresolvers import reverse
from lib.l10n_utils import get_locale
def android_builds(channel, builds=None):
builds = builds or []
variations = OrderedDict([
('api-9', 'Gingerbread'),
('api-15', 'Ice Cream Sandwich+'),
('x86', 'x86'),
])
if channel == 'alpha':
for type, arch_pretty in variations.iteritems():
link = firefox_android.get_download_url('alpha', type)
builds.append({'os': 'android',
'os_pretty': 'Android',
'os_arch_pretty': 'Android %s' % arch_pretty,
'arch': 'x86' if type == 'x86' else 'armv7up %s' % type,
'arch_pretty': arch_pretty,
'download_link': link})
else:
link = firefox_android.get_download_url(channel)
builds.append({'os': 'android',
'os_pretty': 'Android',
'download_link': link})
return builds
def ios_builds(channel, builds=None):
builds = builds or []
link = firefox_ios.get_download_url(channel)
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': link})
return builds
@jingo.register.function
@jinja2.contextfunction
def download_firefox(ctx, channel='release', small=False, icon=True,
platform='all', dom_id=None, locale=None, simple=False,
force_direct=False, force_full_installer=False,
force_funnelcake=False, check_old_fx=False):
""" Output a "download firefox" button.
:param ctx: context from calling template.
:param channel: name of channel: 'release', 'beta' or 'alpha'.
:param small: Display the small button if True.
:param icon: Display the Fx icon on the button if True.
:param platform: Target platform: 'desktop', 'android', 'ios', or 'all'.
:param dom_id: Use this string as the id attr on the element.
:param locale: The locale of the download. Default to locale of request.
:param simple: Display button with text only if True. Will not display
icon or privacy/what's new/systems & languages links. Can be used
in conjunction with 'small'.
:param force_direct: Force the download URL to be direct.
:param force_full_installer: Force the installer download to not be
the stub installer (for aurora).
:param force_funnelcake: Force the download version for en-US Windows to be
'latest', which bouncer will translate to the funnelcake build.
:param check_old_fx: Checks to see if the user is on an old version of
Firefox and, if true, changes the button text from 'Free Download'
to 'Update your Firefox'. Must be used in conjunction with
'simple' param being true.
:return: The button html.
"""
show_desktop = platform in ['all', 'desktop']
show_android = platform in ['all', 'android']
show_ios = platform in ['all', 'ios']
alt_channel = '' if channel == 'release' else channel
locale = locale or get_locale(ctx['request'])
funnelcake_id = ctx.get('funnelcake_id', False)
dom_id = dom_id or 'download-button-%s-%s' % (
'desktop' if platform == 'all' else platform, channel)
l_version = firefox_desktop.latest_builds(locale, channel)
if l_version:
version, platforms = l_version
else:
locale = 'en-US'
version, platforms = firefox_desktop.latest_builds('en-US', channel)
# Gather data about the build for each platform
builds = []
if show_desktop:
for plat_os, plat_os_pretty in firefox_desktop.platform_labels.iteritems():
# Windows 64-bit builds are not available on the ESR channel yet
if plat_os == 'win64' and channel in ['esr', 'esr_next']:
continue
# Fallback to en-US if this plat_os/version isn't available
# for the current locale
_locale = locale if plat_os_pretty in platforms else 'en-US'
# And generate all the info
download_link = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=force_direct,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
# If download_link_direct is False the data-direct-link attr
# will not be output, and the JS won't attempt the IE popup.
if force_direct:
# no need to run get_download_url again with the same args
download_link_direct = False
else:
download_link_direct = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=True,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
if download_link_direct == download_link:
download_link_direct = False
builds.append({'os': plat_os,
'os_pretty': plat_os_pretty,
'download_link': download_link,
'download_link_direct': download_link_direct})
if show_android:
builds = android_builds(channel, builds)
if show_ios:
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': firefox_ios.get_download_url()})
# Get the native name for current locale
langs = firefox_desktop.languages
locale_name = langs[locale]['native'] if locale in langs else locale
data = {
'locale_name': locale_name,
'version': version,
'product': 'firefox-%s' % platform,
'builds': builds,
'id': dom_id,
'small': small,
'simple': simple,
'channel': alt_channel,
'show_desktop': show_desktop,
'show_android': show_android,
'show_ios': show_ios,
'icon': icon,
'check_old_fx': check_old_fx and simple,
}
html = jingo.render_to_string(ctx['request'],
'firefox/includes/download-button.html',
data)
return jinja2.Markup(html)
@jingo.register.function
def firefox_url(platform, page, channel=None):
"""
Return a product-related URL like /firefox/all/ or /mobile/beta/notes/.
Examples
========
In Template
-----------
{{ firefox_url('desktop', 'all', 'organizations') }}
{{ firefox_url('desktop', 'sysreq', channel) }}
{{ firefox_url('android', 'notes') }}
"""
kwargs = {}
# Tweak the channel name for the naming URL pattern in urls.py
if channel == 'release':
channel = None
if channel == 'alpha':
if platform == 'desktop':
channel = 'developer'
if platform == 'android':
channel = 'aurora'
if channel == 'esr':
channel = 'organizations'
if channel:
kwargs['channel'] = channel
if platform != 'desktop':
kwargs['platform'] = platform
# Firefox for Android and iOS have the system requirements page on SUMO
if platform in ['android', 'ios'] and page == 'sysreq':
return settings.FIREFOX_MOBILE_SYSREQ_URL
return reverse('firefox.%s' % page, kwargs=kwargs)
@jingo.register.function
def firefox_os_feed_links(locale, force_cache_refresh=False):
if locale in settings.FIREFOX_OS_FEED_LOCALES:
cache_key = 'firefox-os-feed-links-' + locale
if not force_cache_refresh:
links = cache.get(cache_key)
if links:
return links
links = list(
FirefoxOSFeedLink.objects.filter(locale=locale).order_by(
'-id').values_list('link', 'title')[:10])
cache.set(cache_key, links)
return links
elif '-' in locale:
return firefox_os_feed_links(locale.split('-')[0])
@jingo.register.function
def firefox_os_blog_link(locale):
try:
return settings.FXOS_PRESS_BLOG_LINKS[locale]
except KeyError:
if '-' in locale:
return firefox_os_blog_link(locale.split('-')[0])
else:
return None
| mpl-2.0 |
PeterSurda/PyBitmessage | src/kivymd/bottomsheet.py | 3 | 6751 | # -*- coding: utf-8 -*-
'''
Bottom Sheets
=============
`Material Design spec Bottom Sheets page <http://www.google.com/design/spec/components/bottom-sheets.html>`_
In this module there's the :class:`MDBottomSheet` class which will let you implement your own Material Design Bottom Sheets, and there are two classes called :class:`MDListBottomSheet` and :class:`MDGridBottomSheet` implementing the ones mentioned in the spec.
Examples
--------
.. note::
These widgets are designed to be called from Python code only.
For :class:`MDListBottomSheet`:
.. code-block:: python
bs = MDListBottomSheet()
bs.add_item("Here's an item with text only", lambda x: x)
bs.add_item("Here's an item with an icon", lambda x: x, icon='md-cast')
bs.add_item("Here's another!", lambda x: x, icon='md-nfc')
bs.open()
For :class:`MDListBottomSheet`:
.. code-block:: python
bs = MDGridBottomSheet()
bs.add_item("Facebook", lambda x: x, icon_src='./assets/facebook-box.png')
bs.add_item("YouTube", lambda x: x, icon_src='./assets/youtube-play.png')
bs.add_item("Twitter", lambda x: x, icon_src='./assets/twitter.png')
bs.add_item("Da Cloud", lambda x: x, icon_src='./assets/cloud-upload.png')
bs.add_item("Camera", lambda x: x, icon_src='./assets/camera.png')
bs.open()
API
---
'''
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.modalview import ModalView
from kivy.uix.scrollview import ScrollView
from kivymd.backgroundcolorbehavior import BackgroundColorBehavior
from kivymd.label import MDLabel
from kivymd.list import MDList, OneLineListItem, ILeftBody, \
OneLineIconListItem
from kivymd.theming import ThemableBehavior
Builder.load_string('''
<MDBottomSheet>
background: 'atlas://data/images/defaulttheme/action_group_disabled'
background_color: 0,0,0,.8
sv: sv
upper_padding: upper_padding
gl_content: gl_content
ScrollView:
id: sv
do_scroll_x: False
BoxLayout:
size_hint_y: None
orientation: 'vertical'
padding: 0,1,0,0
height: upper_padding.height + gl_content.height + 1 # +1 to allow overscroll
BsPadding:
id: upper_padding
size_hint_y: None
height: root.height - min(root.width * 9 / 16, gl_content.height)
on_release: root.dismiss()
BottomSheetContent:
id: gl_content
size_hint_y: None
background_color: root.theme_cls.bg_normal
cols: 1
''')
class BsPadding(ButtonBehavior, FloatLayout):
pass
class BottomSheetContent(BackgroundColorBehavior, GridLayout):
pass
class MDBottomSheet(ThemableBehavior, ModalView):
sv = ObjectProperty()
upper_padding = ObjectProperty()
gl_content = ObjectProperty()
dismiss_zone_scroll = 1000 # Arbitrary high number
def open(self, *largs):
super(MDBottomSheet, self).open(*largs)
Clock.schedule_once(self.set_dismiss_zone, 0)
def set_dismiss_zone(self, *largs):
# Scroll to right below overscroll threshold:
self.sv.scroll_y = 1 - self.sv.convert_distance_to_scroll(0, 1)[1]
# This is a line where m (slope) is 1/6 and b (y-intercept) is 80:
self.dismiss_zone_scroll = self.sv.convert_distance_to_scroll(
0, (self.height - self.upper_padding.height) * (1 / 6.0) + 80)[
1]
# Uncomment next line if the limit should just be half of
# visible content on open (capped by specs to 16 units to width/9:
# self.dismiss_zone_scroll = (self.sv.convert_distance_to_scroll(
# 0, self.height - self.upper_padding.height)[1] * 0.50)
# Check if user has overscrolled enough to dismiss bottom sheet:
self.sv.bind(on_scroll_stop=self.check_if_scrolled_to_death)
def check_if_scrolled_to_death(self, *largs):
if self.sv.scroll_y >= 1 + self.dismiss_zone_scroll:
self.dismiss()
def add_widget(self, widget, index=0):
if type(widget) == ScrollView:
super(MDBottomSheet, self).add_widget(widget, index)
else:
self.gl_content.add_widget(widget,index)
Builder.load_string('''
#:import md_icons kivymd.icon_definitions.md_icons
<ListBSIconLeft>
font_style: 'Icon'
text: u"{}".format(md_icons[root.icon])
halign: 'center'
theme_text_color: 'Primary'
valign: 'middle'
''')
class ListBSIconLeft(ILeftBody, MDLabel):
icon = StringProperty()
class MDListBottomSheet(MDBottomSheet):
mlist = ObjectProperty()
def __init__(self, **kwargs):
super(MDListBottomSheet, self).__init__(**kwargs)
self.mlist = MDList()
self.gl_content.add_widget(self.mlist)
Clock.schedule_once(self.resize_content_layout, 0)
def resize_content_layout(self, *largs):
self.gl_content.height = self.mlist.height
def add_item(self, text, callback, icon=None):
if icon:
item = OneLineIconListItem(text=text, on_release=callback)
item.add_widget(ListBSIconLeft(icon=icon))
else:
item = OneLineListItem(text=text, on_release=callback)
item.bind(on_release=lambda x: self.dismiss())
self.mlist.add_widget(item)
Builder.load_string('''
<GridBSItem>
orientation: 'vertical'
padding: 0, dp(24), 0, 0
size_hint_y: None
size: dp(64), dp(96)
BoxLayout:
padding: dp(8), 0, dp(8), dp(8)
size_hint_y: None
height: dp(48)
Image:
source: root.source
MDLabel:
font_style: 'Caption'
theme_text_color: 'Secondary'
text: root.caption
halign: 'center'
''')
class GridBSItem(ButtonBehavior, BoxLayout):
source = StringProperty()
caption = StringProperty()
class MDGridBottomSheet(MDBottomSheet):
def __init__(self, **kwargs):
super(MDGridBottomSheet, self).__init__(**kwargs)
self.gl_content.padding = (dp(16), 0, dp(16), dp(24))
self.gl_content.height = dp(24)
self.gl_content.cols = 3
def add_item(self, text, callback, icon_src):
item = GridBSItem(
caption=text,
on_release=callback,
source=icon_src
)
item.bind(on_release=lambda x: self.dismiss())
if len(self.gl_content.children) % 3 == 0:
self.gl_content.height += dp(96)
self.gl_content.add_widget(item)
| mit |
Metaswitch/calico-neutron | neutron/db/migration/alembic_migrations/versions/236b90af57ab_ml2_refactor_for_dynamic_segments.py | 17 | 1170 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ml2_type_driver_refactor_dynamic_segments
Revision ID: 236b90af57ab
Revises: 58fe87a01143
Create Date: 2014-08-14 16:22:14.293788
"""
# revision identifiers, used by Alembic.
revision = '236b90af57ab'
down_revision = '58fe87a01143'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('ml2_network_segments',
sa.Column('is_dynamic', sa.Boolean(), nullable=False,
server_default=sa.sql.false()))
def downgrade():
op.drop_column('ml2_network_segments', 'is_dynamic')
| apache-2.0 |
Elettronik/SickRage | lib/pgi/cffilib/gir/giunioninfo.py | 20 | 1903 | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
from .._compat import xrange
from ._ffi import lib
from .gibaseinfo import GIBaseInfo, GIInfoType
from .gitypeinfo import GITypeInfo
from .giregisteredtypeinfo import GIRegisteredTypeInfo
@GIBaseInfo._register(GIInfoType.UNION)
class GIUnionInfo(GIRegisteredTypeInfo):
@property
def n_fields(self):
return lib.g_union_info_get_n_fields(self._ptr)
def get_field(self, n):
return lib.g_union_info_get_field(self._ptr, n)
def get_fields(self):
for i in xrange(self.n_fields):
yield self.get_field(i)
@property
def n_methods(self):
return lib.g_union_info_get_n_methods(self._ptr)
def get_method(self):
return lib.g_union_info_get_method(self._ptr)
def get_methods(self):
for i in xrange(self.n_methods):
yield self.get_method(i)
@property
def is_discriminated(self):
return bool(lib.g_union_info_is_discriminated(self._ptr))
@property
def discriminator_offset(self):
return lib.g_union_info_get_discriminator_offset(self._ptr)
@property
def discriminator_type(self):
return GITypeInfo(lib.g_union_info_get_discriminator_type(self._ptr))
def get_discriminator(self, n):
# FIXME
return lib.g_union_info_get_discriminator(self._ptr, n)
def find_method(self, name):
# FIXME
return lib.g_union_info_find_method(self._ptr, name)
@property
def size(self):
return lib.g_union_info_get_size(self._ptr)
@property
def alignment(self):
return lib.g_union_info_get_alignment(self._ptr)
| gpl-3.0 |
KohlsTechnology/ansible | test/runner/lib/docker_util.py | 16 | 5429 | """Functions for accessing docker via the docker cli."""
from __future__ import absolute_import, print_function
import json
import os
import time
from lib.executor import (
SubprocessError,
)
from lib.util import (
ApplicationError,
run_command,
common_environment,
display,
)
from lib.config import (
EnvironmentConfig,
)
BUFFER_SIZE = 256 * 256
def get_docker_container_id():
"""
:rtype: str | None
"""
path = '/proc/self/cgroup'
if not os.path.exists(path):
return None
with open(path) as cgroup_fd:
contents = cgroup_fd.read()
paths = [line.split(':')[2] for line in contents.splitlines()]
container_ids = set(path.split('/')[2] for path in paths if path.startswith('/docker/'))
if not container_ids:
return None
if len(container_ids) == 1:
return container_ids.pop()
raise ApplicationError('Found multiple container_id candidates: %s\n%s' % (sorted(container_ids), contents))
def docker_pull(args, image):
"""
:type args: EnvironmentConfig
:type image: str
"""
if not args.docker_pull:
display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
return
for _ in range(1, 10):
try:
docker_command(args, ['pull', image])
return
except SubprocessError:
display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to pull docker image "%s".' % image)
def docker_put(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(src, 'rb') as src_fd:
docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdin=src_fd, capture=True)
def docker_get(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(dst, 'wb') as dst_fd:
docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdout=dst_fd, capture=True)
def docker_run(args, image, options, cmd=None):
"""
:type args: EnvironmentConfig
:type image: str
:type options: list[str] | None
:type cmd: list[str] | None
:rtype: str | None, str | None
"""
if not options:
options = []
if not cmd:
cmd = []
for _ in range(1, 3):
try:
return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
except SubprocessError as ex:
display.error(ex)
display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to run docker image "%s".' % image)
def docker_rm(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
"""
docker_command(args, ['rm', '-f', container_id], capture=True)
def docker_inspect(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['inspect', container_id], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_network_inspect(args, network):
"""
:type args: EnvironmentConfig
:type network: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['network', 'inspect', network], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type container_id: str
:type cmd: list[str]
:type options: list[str] | None
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
if not options:
options = []
return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
def docker_command(args, cmd, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type cmd: list[str]
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
env = docker_environment()
return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout)
def docker_environment():
"""
:rtype: dict[str, str]
"""
env = common_environment()
env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
return env
| gpl-3.0 |
satish-avninetworks/murano | murano/dsl/murano_package.py | 1 | 7758 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import weakref
import semantic_version
import six
from yaql.language import specs
from yaql.language import utils
from murano.dsl import constants
from murano.dsl import dsl_types
from murano.dsl import exceptions
from murano.dsl import helpers
from murano.dsl import meta as dslmeta
from murano.dsl import murano_object
from murano.dsl import murano_type
from murano.dsl import namespace_resolver
from murano.dsl import principal_objects
from murano.dsl import yaql_integration
class MuranoPackage(dsl_types.MuranoPackage, dslmeta.MetaProvider):
def __init__(self, package_loader, name, version=None,
runtime_version=None, requirements=None, meta=None):
super(MuranoPackage, self).__init__()
self._package_loader = weakref.proxy(package_loader)
self._name = name
self._meta = None
self._version = helpers.parse_version(version)
self._runtime_version = helpers.parse_version(runtime_version)
self._requirements = {
name: semantic_version.Spec('==' + str(self._version.major))
}
if name != constants.CORE_LIBRARY:
self._requirements[constants.CORE_LIBRARY] = \
semantic_version.Spec('==0')
self._classes = {}
self._imported_types = {object, murano_object.MuranoObject}
for key, value in six.iteritems(requirements or {}):
self._requirements[key] = helpers.parse_version_spec(value)
self._load_queue = {}
self._native_load_queue = {}
if self.name == constants.CORE_LIBRARY:
principal_objects.register(self)
self._package_class = self._create_package_class()
self._meta = dslmeta.MetaData(
meta, dsl_types.MetaTargets.Package, self._package_class)
@property
def package_loader(self):
return self._package_loader
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def runtime_version(self):
return self._runtime_version
@property
def requirements(self):
return self._requirements
@property
def classes(self):
return set(self._classes.keys()).union(
self._load_queue.keys()).union(self._native_load_queue.keys())
def get_resource(self, name):
raise NotImplementedError('resource API is not implemented')
# noinspection PyMethodMayBeStatic
def get_class_config(self, name):
return {}
def _register_mpl_classes(self, data, name=None):
type_obj = self._classes.get(name)
if type_obj is not None:
return type_obj
if callable(data):
data = data()
data = helpers.list_value(data)
unnamed_class = None
last_ns = {}
for cls_data in data:
last_ns = cls_data.setdefault('Namespaces', last_ns.copy())
if len(cls_data) == 1:
continue
cls_name = cls_data.get('Name')
if not cls_name:
if unnamed_class:
raise exceptions.AmbiguousClassName(name)
unnamed_class = cls_data
else:
ns_resolver = namespace_resolver.NamespaceResolver(last_ns)
cls_name = ns_resolver.resolve_name(cls_name)
if cls_name == name:
type_obj = murano_type.create(
cls_data, self, cls_name, ns_resolver)
self._classes[name] = type_obj
else:
self._load_queue.setdefault(cls_name, cls_data)
if type_obj is None and unnamed_class:
unnamed_class['Name'] = name
return self._register_mpl_classes(unnamed_class, name)
return type_obj
def _register_native_class(self, cls, name):
if cls in self._imported_types:
return self._classes[name]
try:
m_class = self.find_class(name, False)
except exceptions.NoClassFound:
m_class = self._register_mpl_classes({'Name': name}, name)
m_class.extension_class = cls
for method_name in dir(cls):
if method_name.startswith('_'):
continue
method = getattr(cls, method_name)
if not any((
helpers.inspect_is_method(cls, method_name),
helpers.inspect_is_static(cls, method_name),
helpers.inspect_is_classmethod(cls, method_name))):
continue
method_name_alias = (getattr(
method, '__murano_name', None) or
specs.convert_function_name(
method_name, yaql_integration.CONVENTION))
m_class.add_method(method_name_alias, method, method_name)
self._imported_types.add(cls)
return m_class
def register_class(self, cls, name=None):
if inspect.isclass(cls):
name = name or getattr(cls, '__murano_name', None) or cls.__name__
if name in self._classes:
self._register_native_class(cls, name)
else:
self._native_load_queue.setdefault(name, cls)
elif isinstance(cls, dsl_types.MuranoType):
self._classes[cls.name] = cls
elif name not in self._classes:
self._load_queue[name] = cls
def find_class(self, name, search_requirements=True):
payload = self._native_load_queue.pop(name, None)
if payload is not None:
return self._register_native_class(payload, name)
payload = self._load_queue.pop(name, None)
if payload is not None:
result = self._register_mpl_classes(payload, name)
if result:
return result
result = self._classes.get(name)
if result:
return result
if search_requirements:
pkgs_for_search = []
for package_name, version_spec in six.iteritems(
self._requirements):
if package_name == self.name:
continue
referenced_package = self._package_loader.load_package(
package_name, version_spec)
try:
return referenced_package.find_class(name, False)
except exceptions.NoClassFound:
pkgs_for_search.append(referenced_package)
continue
raise exceptions.NoClassFound(
name, packages=pkgs_for_search + [self])
raise exceptions.NoClassFound(name, packages=[self])
@property
def context(self):
return None
def _create_package_class(self):
ns_resolver = namespace_resolver.NamespaceResolver(None)
return murano_type.MuranoClass(
ns_resolver, self.name, self, utils.NO_VALUE)
def get_meta(self, context):
if not self._meta:
return []
return self._meta.get_meta(context)
def __repr__(self):
return 'MuranoPackage({name})'.format(name=self.name)
| apache-2.0 |
40223235/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/contextlib.py | 737 | 8788 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
while 1:
exc_context = new_exc.__context__
if exc_context in (None, frame_exc):
break
new_exc = exc_context
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
| gpl-3.0 |
DevHugo/zds-site | zds/utils/tutorials.py | 1 | 2669 | # coding: utf-8
import os
# Used for indexing tutorials, we need to parse each manifest to know which content have been published
class GetPublished:
published_part = []
published_chapter = []
published_extract = []
def __init__(self):
pass
@classmethod
def get_published_content(cls):
# If all array are empty load_it
if not len(GetPublished.published_part) and \
not len(GetPublished.published_chapter) and \
not len(GetPublished.published_extract):
# Get all published tutorials
from zds.tutorial.models import Tutorial
tutorials_database = Tutorial.objects.filter(sha_public__isnull=False).all()
for tutorial in tutorials_database:
# Load Manifest
json = tutorial.load_json_for_public()
# Parse it
GetPublished.load_tutorial(json)
return {"parts": GetPublished.published_part,
"chapters": GetPublished.published_chapter,
"extracts": GetPublished.published_extract}
@classmethod
def load_tutorial(cls, json):
# Load parts, chapter and extract
if 'parts' in json:
for part_json in json['parts']:
# If inside of parts we have chapters, load it
GetPublished.load_chapters(part_json)
GetPublished.load_extracts(part_json)
GetPublished.published_part.append(part_json['pk'])
GetPublished.load_chapters(json)
GetPublished.load_extracts(json)
@classmethod
def load_chapters(cls, json):
if 'chapters' in json:
for chapters_json in json['chapters']:
GetPublished.published_chapter.append(chapters_json['pk'])
GetPublished.load_extracts(chapters_json)
return GetPublished.published_chapter
@classmethod
def load_extracts(cls, json):
if 'extracts' in json:
for extract_json in json['extracts']:
GetPublished.published_extract.append(extract_json['pk'])
return GetPublished.published_extract
def get_blob(tree, chemin):
for blob in tree.blobs:
try:
if os.path.abspath(blob.path) == os.path.abspath(chemin):
data = blob.data_stream.read()
return data.decode('utf-8')
except (OSError, IOError):
return ""
if len(tree.trees) > 0:
for atree in tree.trees:
result = get_blob(atree, chemin)
if result is not None:
return result
return None
else:
return None
| gpl-3.0 |
piyushroshan/xen-4.3.2 | tools/python/xen/xm/help.py | 52 | 3242 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <[email protected]>
#============================================================================
"""Variable definition and help support for Python defconfig files.
"""
import sys
class Vars:
"""A set of configuration variables.
"""
def __init__(self, name, help, env):
"""Create a variable set.
name name of the defconfig file
help help flag
env local environment
"""
self.name = name
self.help = help
self.env = env
self.vars = []
def var(self, name, use=None, check=None):
"""Define a configuration variable.
If provided, the check function will be called as check(var, val)
where var is the variable name and val is its value (string).
It should return a new value for the variable, or raise ValueError if
the value is not acceptable.
name variable name
use variable usage string
check variable check function
"""
self.vars.append(Var(name, use, check))
def check(self):
"""Execute the variable checks or print help, depending on the value
of the help flag passed to the constructor.
"""
if self.help:
self.doHelp()
else:
for v in self.vars:
v.doCheck(self.env)
def doHelp(self, out=sys.stderr):
"""Print help for the variables.
"""
if self.vars:
print >>out, "\nConfiguration variables for %s:\n" % self.name
for v in self.vars:
v.doHelp(out)
print >>out
class Var:
"""A single variable.
"""
def __init__(self, name, use, check):
"""Create a variable.
name variable name
use variable use string
check variable value check function
"""
self.name = name
self.use = use or ''
self.check = check
def doCheck(self, env):
"""Execute the check and set the variable to the new value.
"""
if not self.check: return
try:
env[self.name] = self.check(self.name, env.get(self.name))
except StandardError, ex:
raise sys.exc_type, self.name + " - " + str(ex)
def doHelp(self, out):
"""Print help for the variable.
"""
print >>out, "%-12s" % self.name, self.use
| gpl-2.0 |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.18/IOST.py | 1 | 8248 | #!/usr/bin/env python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import operator
import sys
import base64
import time
# from Libs.IOST_Prepare import *
# from Libs.IOST_Config import *
# from Libs.IOST_WMain import *
# from Libs.IOST_Basic import *
sys.path.append("Libs")
sys.path.append("../Libs")
from Libs import IOST_Basic
from Libs import IOST_Config
from Libs import IOST_WMain
from Libs.IOST_WMain import *
from Libs import IOST_Prepare
import gtk
import gtk.glade
import gobject
# from Libs import *
# from Libs import *
# from Libs import *
#======================================================================
IOST_Debug_Enable = 0
#======================================================================
# argv_number = len(sys.argv)
# for i in range(0, argv_number):
# print sys.argv[i]
#======================================================================
IOST_WMAIN = "IOST_WMain"
IOST_CHIP = "Skylark"
IOST_OBJECT = "_Skylark"
IOST_CONFIG_DATA_DEFAULTE_FILE = "IOST_DataDefault.json"
IOST_CONFIG_OBJS_DEFAULTE_FILE = "IOST_ObjsDefault.json"
IOST_PROGRAM_PATH=os.path.dirname(os.path.abspath(sys.argv[0]))
if IOST_Debug_Enable:
print IOST_PROGRAM_PATH
IOST_SSH_BIN = "ssh"
IOST_TELNET_BIN = "telnet"
IOST_SHELL_BIN = os.environ["SHELL"]
IOST_CONGIG_DATA_PATH = IOST_PROGRAM_PATH + "/" + IOST_CHIP + "/" + IOST_CONFIG_DATA_DEFAULTE_FILE
if IOST_Debug_Enable:
print IOST_CONGIG_DATA_PATH
IOST_CONGIG_OBJS_PATH = IOST_PROGRAM_PATH + "/" + IOST_CHIP + "/" + IOST_CONFIG_OBJS_DEFAULTE_FILE
if IOST_Debug_Enable:
print IOST_CONGIG_OBJS_PATH
#======================================================================
class IOST(IOST_WMain):
"""
This is a main class of the program.
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename = "",
window_name = "",
object_name = "",
iost_data = None,
iost_objs = None):
"The function is main function to start IOST program"
IOST_WMain.__init__(self, glade_filename, window_name, object_name, iost_data, iost_objs)
#----------------------------------------------------------------------
def IOST_Main(self):
gtk.main()
#======================================================================
# MAIN FUNCTION
#======================================================================
if __name__ == "__main__":
"The main function of IOST"
IOST_Config=IOST_Config()
#-------------------------------------------------------------------------
IOST_Config.IOST_Data = IOST_Config.ReadFile(file_name=IOST_CONGIG_DATA_PATH)
#-------------------------------------------------------------------------
IOST_Config.IOST_Objs = IOST_Config.ReadFile(file_name=IOST_CONGIG_OBJS_PATH)
IOST_Config.IOST_Data["GladeFileName"] = IOST_PROGRAM_PATH + "/" + IOST_CHIP+ '/'+ IOST_Config.IOST_Data["GladeFileName"] + '_'+ IOST_Config.IOST_Data["ProjectVersion"] + '.glade'
# print IOST_Config.IOST_Data["GladeFileName"]
# print "=================================================================="
# pprint (IOST_Config.IOST_Data.keys())
# print "=================================================================="
# pprint (IOST_Config.IOST_Objs["IOST_WMain"].keys())
# print "=================================================================="
argv_number = len(sys.argv)
if IOST_Debug_Enable:
print "=================================================================="
print "Number of arg have entered is : ", argv_number
for i in range(0, argv_number):
print "========== argv[%s] = : " %(i, sys.argv[i])
#Add config file to a Files list
for i in range(1, argv_number):
# print i
# IOST_Config.IOST_Files.append(sys.argv[1]+'/'+sys.argv[i])
if os.path.isfile(sys.argv[i]):
IOST_Config.AddFileConfig2List(IOST_Config.IOST_Files, sys.argv[i])
else:
IOST_Config.AddFileConfig2List(IOST_Config.IOST_Files, IOST_PROGRAM_PATH +'/'+sys.argv[i])
# Print to debug name of all file config have inputed
if IOST_Debug_Enable:
print "=========================The list config files have entered==========================="
print "Number of config Files is %s" % (len (IOST_Config.IOST_Files))
print "Number of config files is: "
for i in range(0, len (IOST_Config.IOST_Files)):
pprint (IOST_Config.IOST_Files[i])
#Read file and store in Files Dist type at location (2n+1)
if argv_number > 1:
IOST_Config.AddObjConfig2List(IOST_Config.IOST_Files)
if IOST_Debug_Enable:
for i in range(0, len (IOST_Config.IOST_Files)):
print "================================= %s =================================" %i
pprint (IOST_Config.IOST_Files[i])
for i in range(0, len (IOST_Config.IOST_Files), 2):
IOST_Config.ModifyIOST_Objs(IOST_Config.IOST_Data, IOST_Config.IOST_Files[i+1] )
if IOST_Debug_Enable:
print "IOST_Config.IOST_Data is : "
pprint (IOST_Config.IOST_Data)
print "IOST_Config.IOST_Data['I2C0'] is : "
pprint (IOST_Config.IOST_Data["I2C0"])
IOST_Config.IOST_Data["IOST_Path"] = IOST_PROGRAM_PATH
IOST_Config.IOST_Data["IOST_RunPath"] = os.getcwd()
IOST_Config.IOST_Data["ConfigFile"]["CfgDataPath"] = IOST_CONGIG_DATA_PATH
IOST_Config.IOST_Data["ConfigFile"]["CfgObjsPath"] = IOST_CONGIG_OBJS_PATH
#-------------------------------------------------------------------------
IOST_Config.WriteFile(IOST_PROGRAM_PATH+"/Temp_Configs/Config_Data.json", IOST_Config.IOST_Data)
IOST_Config.WriteFile(IOST_PROGRAM_PATH+"/Temp_Configs/Config_Objects.json", IOST_Config.IOST_Objs)
# Some debug code here
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
if False:
for key, value in IOST_Config.IOST_Objs["IOST_WSetupTestcase"].iteritems():
print key, value
if False:
len__temp = len(IOST_Config.IOST_Objs["IOST_WSetupTestcase"])
print "=============================================="
print "Len of IOST_WSetupTestcase object is", len__temp
print "=============================================="
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()
for i in range(0, len__temp, 2 ):
print "=============================================="
print i
print "----------------------------------------------"
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()[i]
print "----------------------------------------------"
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()[i+1]
print "----------------------------------------------"
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"][IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()[(i+1)]]
if False:
exit(1)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#-------------------------------------------------------------------------
main = IOST(glade_filename=IOST_Config.IOST_Data["GladeFileName"],
window_name=IOST_WMAIN,
object_name=IOST_OBJECT,
iost_data=IOST_Config.IOST_Data, iost_objs=IOST_Config.IOST_Objs)
main.IOST_Main()
| mit |
yuanguo8/nubiaz5s_kernel | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
LUTAN/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 53 | 4430 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
gkarlin/django-jenkins | build/pylint/reporters/html.py | 5 | 2481 | # Copyright (c) 2003-2006 Sylvain Thenault ([email protected]).
# Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""HTML reporter"""
import sys
from cgi import escape
from logilab.common.ureports import HTMLWriter, Section, Table
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
class HTMLReporter(BaseReporter):
"""report messages and layouts in HTML"""
__implements__ = IReporter
extension = 'html'
def __init__(self, output=sys.stdout):
BaseReporter.__init__(self, output)
self.msgs = []
def add_message(self, msg_id, location, msg):
"""manage message of different type and in the context of path"""
module, obj, line, col_offset = location[1:]
sigle = self.make_sigle(msg_id)
self.msgs += [sigle, module, obj, str(line), str(col_offset), escape(msg)]
def set_output(self, output=None):
"""set output stream
messages buffered for old output is processed first"""
if self.out and self.msgs:
self._display(Section())
BaseReporter.set_output(self, output)
def _display(self, layout):
"""launch layouts display
overridden from BaseReporter to add insert the messages section
(in add_message, message is not displayed, just collected so it
can be displayed in an html table)
"""
if self.msgs:
# add stored messages to the layout
msgs = ['type', 'module', 'object', 'line', 'col_offset', 'message']
msgs += self.msgs
sect = Section('Messages')
layout.append(sect)
sect.append(Table(cols=6, children=msgs, rheaders=1))
self.msgs = []
HTMLWriter().format(layout, self.out)
| lgpl-3.0 |
Antiun/odoomrp-wip | quality_control_force_valid/__openerp__.py | 19 | 1379 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c)
# 2014 Serv. Tec. Avanzados - Pedro M. Baeza (http://www.serviciosbaeza.com)
# 2014 AvanzOsc (http://www.avanzosc.es)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Quality control - Manual validation",
"version": "1.0",
"depends": [
"quality_control",
],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"category": "Quality control",
'data': [
'views/qc_inspection_view.xml',
],
'installable': True,
}
| agpl-3.0 |
zeroincombenze/tools | zar/restdb.py | 2 | 29848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) SHS-AV s.r.l. (<http://www.zeroincombenze.it>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
r"""Restore database files from Production Machine to Development Machine
Make 2 server quite identical, ready to use
May be used to create a mirror server of Zeroincombenze®
Translation file rules (restconf.ini).
every line ha follow format: filename \t src \t tgt
where filename maybe:
#
every line beginning with '#' ia a remark
realfilename (i.e. http.conf)
every 'src' text is replaced by 'tgt' text
sqlname->wp (ie. mysite.sql->wp)
every 'src' is wp param and 'tgt' is the its value
sqlname->wiki (ie. mysite.sql->wiki)
every 'src' is wikimedia param and 'tgt' is the its value
sqlname/ (ie mysite.sql/)
every line is an SQL statement to execute at the end;
spaces are written with escape \ character (ie. update\ table ...)
"""
# import pdb
import os
import os.path
import sys
import glob
from datetime import date, datetime, timedelta
import time
import string
import re
from . import zarlib
try:
from os0 import os0
except ImportError:
import os0
__version__ = "1.3.35.1"
def version():
return __version__
class Restore_Image:
def __init__(self, ctx):
self.hostname = ctx['hostname']
os0.set_debug_mode(ctx['dbg_mode'])
self.prodhost = ctx['production_host']
self.devhost = ctx['development_host']
self.mirrorhost = ctx['mirror_host']
self.pgdir = ctx['pg_dir']
self.mysqldir = ctx['mysql_dir']
homedir = os.path.expanduser("~")
self.ftp_cfn = homedir + "/" + ctx['ftp_script']
self.flist = homedir + "/" + ctx['list_file']
os0.set_tlog_file(ctx['logfn'])
# Log begin execution
os0.wlog("Restore database files", __version__)
# Simulate backup
self.dry_run = ctx['dry_run']
if ctx['saveset'] == "bckdb" or \
ctx['saveset'] == "bckconf" or \
ctx['saveset'] == "bckwww":
if self.hostname == self.prodhost:
os0.wlog("Running on production machine")
if ctx['alt']:
self.bck_host = self.mirrorhost
self.fconf = homedir + "/" + \
ctx['no_translation']
else:
self.bck_host = self.devhost
self.fconf = homedir + "/" + \
ctx['data_translation']
elif self.hostname == self.mirrorhost:
os0.wlog("Running on mirror machine")
if ctx['alt']:
self.bck_host = self.prodhost
self.fconf = homedir + "/" + \
ctx['no_translation']
else:
self.bck_host = self.devhost
self.fconf = homedir + "/" + \
ctx['data_translation']
elif self.hostname == self.devhost:
os0.wlog("This command cannot run on development machine")
if not ctx['dry_run']:
raise Exception("Command aborted due invalid machine")
else:
os0.wlog("Unknown machine - Command aborted")
if not ctx['dry_run']:
raise Exception("Command aborted due unknown machine")
elif ctx['saveset'] == "restdb" or \
ctx['saveset'] == "restconf" or \
ctx['saveset'] == "restwww":
if self.hostname == self.prodhost:
os0.wlog("This command cannot run on production machine")
if not ctx['dry_run']:
raise Exception("Command aborted due production machine")
elif self.hostname == self.mirrorhost:
os0.wlog("Running on mirror machine")
if ctx['alt']:
self.bck_host = self.prodhost
self.fconf = homedir + "/" + \
ctx['no_translation']
else:
self.bck_host = self.devhost
self.fconf = homedir + "/" + \
ctx['data_translation']
elif self.hostname == self.devhost:
os0.wlog("Running on development machine")
if ctx['alt']:
self.bck_host = self.mirrorhost
self.fconf = homedir + "/" + \
ctx['data_translation']
else:
self.bck_host = self.devhost
self.fconf = homedir + "/" + \
ctx['data_translation']
else:
os0.wlog("Unknown machine - Command aborted")
if not ctx['dry_run']:
raise Exception("Command aborted due unknown machine")
# May be (.gz or .bz2)
self.tar_ext = ctx['tar_ext']
# May be (z or j)
self.tar_opt = ctx['tar_opt']
# May be (null or .sql)
self.pre_ext = ctx['pre_ext']
# May be (null or .sql)
self.sql_ext = ctx['sql_ext']
self.psql_uu = ctx['pgsql_user']
self.psql_db = ctx['pgsql_def_db']
self.mysql_uu = ctx['mysql_user']
self.mysql_db = ctx['mysql_def_db']
self.pid = os.getpid()
self.ftp_rootdir = ""
self.ftp_dir = ""
self.dbtype = ""
self.create_dict()
def create_dict(self):
self.dict = {}
self.xtl = {}
self.seed = 0
try:
cnf_fd = open(self.fconf, "r")
line = cnf_fd.readline()
while line != "":
i = line.rfind('\n')
if i >= 0 and line[0:1] != "#":
line = line.replace("\\ ", "\\b")
line = re.sub('\\s+', ' ', line).strip()
f = string.split(line, ' ')
self.add_dict_entr(f[0], f[1], f[2])
line = cnf_fd.readline()
cnf_fd.close()
except:
os0.wlog("No dictionary file", self.fconf, "found!")
def add_dict_entr(self, name, src, tgt):
self.seed = self.seed + 1
key = "{0:06d}".format(self.seed)
val = (src, tgt)
if name in self.dict:
self.dict[name].append(key)
else:
self.dict[name] = [key]
self.xtl[key] = val
# os0.wlog("> s|{0}|{1}|g {2}!".format(src, tgt, name))
def search4item(self, item):
if item in self.dict:
return self.dict[item]
else:
return None
def restore_file(self, fqn):
# pdb.set_trace()
dbtype = ""
# Extract dir if supplied
p = os.path.dirname(fqn)
f = os.path.basename(fqn) # Just filename
# No dir supplied
if p == "":
p = self.ftp_dir
elif p == "/var/lib/pgsql/backups":
dbtype = "psql"
elif p == "/var/lib/mysql/backups":
dbtype = "mysql"
if dbtype != self.dbtype:
if dbtype == "psql":
cmd = "service postgresql restart"
os0.trace_debug("$", cmd)
os0.muteshell(cmd,
simulate=self.dry_run,
keepout=os0.debug_mode)
elif dbtype == "mysql":
cmd = "service mysqld restart"
os0.trace_debug("$", cmd)
os0.muteshell(cmd,
simulate=self.dry_run,
keepout=os0.debug_mode)
if p != self.ftp_dir: # Change dir
self.chdir(p) # Set directory
llen = len(self.sql_ext) + 9
# i = len(f) - llen
# Extract dbname from XXXXX-YYYYMMDD.SQL
dbname = f[0:-llen]
# if dbname == "wp-zi-it":
# os0.wlog(" db", dbname, "not upgradable!!!")
if os.path.isfile(f):
self.restore_db(dbtype, dbname, fqn)
else:
os0.wlog(" file", f, "not found!!!")
def get_params(self, f):
ctx = {}
ctx['prefix'] = ""
ctx['siteURL'] = ""
ctx['testURL'] = ""
ctx['siteURI'] = ""
ctx['testURI'] = ""
ctx['admin_email'] = ""
ctx['conf_file'] = ""
ctx['conf_file2'] = ""
ctx['conf_file3'] = ""
ctx['index_html'] = ""
key_ids = self.search4item(f)
if key_ids:
# fxch = True
# Text couples for substitution
for key in key_ids:
src = self.xtl[key][0]
src = src.replace("\\b", " ")
tgt = self.xtl[key][1]
tgt = tgt.replace("\\b", " ")
if src == ".prefix":
ctx['prefix'] = tgt
elif src == ".siteURL":
ctx['siteURL'] = tgt
i = ctx['siteURL'].find(".")
if i < 0:
ctx['siteURL'] = "http://www." + ctx['siteURL']
i = ctx['siteURL'].find(":")
if i < 0:
ctx['siteURL'] = "http://" + ctx['siteURL']
i = ctx['siteURL'].find(".")
if ctx['admin_email'] == "":
ctx['admin_email'] = "postmaster@" + \
ctx['siteURL'][i + 1:]
if ctx['testURL'] == "":
ctx['testURL'] = ctx['siteURL'][0:i] + \
"1" + ctx['siteURL'][i:]
if ctx['siteURI'] == "":
x = ctx['siteURL'].split("://")
ctx['siteURI'] = x[1]
if ctx['testURI'] == "":
x = ctx['testURL'].split("://")
ctx['testURI'] = x[1]
elif src == ".testURL":
ctx['testURL'] = tgt
x = ctx['testURL'].split("://")
ctx['testURI'] = x[1]
elif src == ".siteURI":
ctx['siteURI'] = tgt
elif src == ".testURI":
ctx['testURI'] = tgt
elif src == ".admin_email":
ctx['admin_email'] = tgt
elif src == ".conf_file":
ctx['conf_file'] = tgt
elif src == ".conf_file2":
ctx['conf_file2'] = tgt
elif src == ".conf_file3":
ctx['conf_file3'] = tgt
elif src == ".index_html":
ctx['index_html'] = tgt
else:
raise ValueError('Invalid param {0}!'.format(src))
return ctx
def repl_data_wp(self, ctx, fqn_str):
os0.trace_debug(
"> update URL (wp) {0}->{1}"
.format(ctx['siteURL'], ctx['testURL']))
stmt = "update {0}options set option_value='{1}'"\
" where option_name='{2}'"\
.format(ctx['prefix'], ctx['testURL'], "siteurl")
fqn_str = fqn_str + stmt + ";\n"
stmt = "update {0}options set option_value='{1}'"\
" where option_name='{2}'"\
.format(ctx['prefix'], ctx['testURL'], "home")
fqn_str = fqn_str + stmt + ";\n"
stmt = "update {0}options set option_value='{1}/'"\
" where option_name='{2}'"\
.format(ctx['prefix'], ctx['testURL'], "ga_default_domain")
fqn_str = fqn_str + stmt + ";\n"
stmt = "update {0}options set option_value='{1}'"\
" where option_name='{2}'"\
.format(ctx['prefix'], ctx['siteURI'], "ga_root_domain")
fqn_str = fqn_str + stmt + ";\n"
stmt = "update {0}options set option_value='{1}'"\
" where option_name='{2}'"\
.format(ctx['prefix'], ctx['admin_email'], "admin_email")
fqn_str = fqn_str + stmt + ";\n"
stmt = "update {0}options set option_value='{1}'"\
" where option_name='{2}'"\
.format(ctx['prefix'], "0", "blog_public")
fqn_str = fqn_str + stmt + ";\n"
src_str = ctx['siteURL']
ix = fqn_str.find(src_str)
while ix >= 0:
llen = len(ctx['siteURL'])
j = ix - 1
sep = ' '
while sep == ' ':
while fqn_str[j] != '\"' and fqn_str[j] != '\'':
j = j - 1
sep = fqn_str[j]
j = j - 1
if fqn_str[j] == '\\':
if sep == '\'':
sep = ' '
else:
j = j - 1
if fqn_str[j] != ':':
sep = ' '
else:
j = j - 1
if sep == '\"':
ix1 = j + 1
while fqn_str[j].isdigit():
j = j - 1
n = fqn_str[j + 1:ix1]
i = int(n)
if i >= llen:
src = fqn_str[j + 1:ix] + ctx['siteURL']
j = len(ctx['testURL'])
n = str(i + j - llen)
tgt = n + fqn_str[ix1:ix] + ctx['testURL']
os0.trace_debug(
"> sed|{0}|{1}|".format(src, tgt))
fqn_str = fqn_str.replace(src, tgt)
ix = fqn_str.find(src_str, ix + 1)
return fqn_str
def repl_data(self, dbname, fqn):
fzero = False
try:
fqn_fd = open(fqn, 'r')
# Go to end of file
fqn_fd.seek(0, os.SEEK_END)
# File len = 0 ?
if fqn_fd.tell() == 0:
fzero = True
# Go to begin of file
fqn_fd.seek(0, 0)
# Read entire file
fqn_str = fqn_fd.read()
fqn_fd.close()
except:
fzero = True
if fzero:
os0.wlog(" file", fqn, "empty!!!")
else:
fxch = False
# Search for text substitution (Wordpress)
f = dbname + "->wp"
ctx = self.get_params(f)
if ctx['prefix'] != "" and ctx['siteURL'] != "":
fxch = True
fqn_str = self.repl_data_wp(ctx, fqn_str)
# Search for sql command to append
f = dbname + "/"
key_ids = self.search4item(f)
if key_ids:
fxch = True
# Text couples for substitution
for key in key_ids:
src = self.xtl[key][0]
src = src.replace("\\b", " ")
tgt = self.xtl[key][1]
tgt = tgt.replace("\\b", " ")
os0.trace_debug(">", src, tgt, ";")
fqn_str = fqn_str + src + " " + tgt + ";\n"
# Search for text substitution in SQL statements
f = dbname + self.sql_ext
key_ids = self.search4item(f)
if key_ids:
fxch = True
# Text couples for substitution
for key in key_ids:
src = self.xtl[key][0]
src = src.replace("\\b", " ")
tgt = self.xtl[key][1]
tgt = tgt.replace("\\b", " ")
os0.trace_debug("> sed|{0}|{1}|".format(src, tgt))
fqn_str = fqn_str.replace(src, tgt)
if fxch:
fqn_fd = open(fqn, 'w')
fqn_fd.write(fqn_str)
fqn_fd.close()
f = dbname + "->wiki"
ctx = self.get_params(f)
if ctx['siteURL'] != "":
fqns = ctx['conf_file'].split(',')
for fqn in fqns:
self.replace_file(ctx, f, fqn)
if ctx['conf_file2']:
fqns = ctx['conf_file2'].split(',')
for fqn in fqns:
self.replace_file(ctx, f, fqn)
if ctx['conf_file3']:
fqns = ctx['conf_file3'].split(',')
for fqn in fqns:
self.replace_file(ctx, f, fqn)
fqn = ctx['index_html']
self.replace_file(ctx, f, fqn)
def replace_file(self, ctx, f, fqn):
os0.trace_debug("> replace file", fqn)
try:
fn_fd = open(fqn, 'r')
fn_str = fn_fd.read()
fn_fd.close()
key_ids = self.search4item(f)
if key_ids:
src = ctx['siteURL']
tgt = ctx['testURL']
fn_str = fn_str.replace(src, tgt)
src = ctx['siteURI']
tgt = ctx['testURI']
fn_str = fn_str.replace(src, tgt)
fn_fd = open(fqn, 'w')
fn_fd.write(fn_str)
fn_fd.close()
except:
pass
def restore_db(self, dbtype, dbname, fqn):
# pdb.set_trace()
os0.wlog(" restoring", dbname, " ({0})".format(fqn))
homedir = os.path.expanduser("~")
tar_ext = self.tar_ext
tar_opt = self.tar_opt
fzip_fn = dbname + tar_ext
if not os.path.isfile(fzip_fn):
if self.tar_ext == ".gz":
tar_ext = ".bz2"
tar_opt = "j"
fzip_fn = dbname + tar_ext
if not os.path.isfile(fzip_fn):
tar_ext = self.tar_ext
tar_opt = self.tar_opt
# No compressed file found
fzip_fn = ""
elif self.tar_ext == ".bz2":
tar_ext = ".gz"
tar_opt = "z"
fzip_fn = dbname + tar_ext
if not os.path.isfile(fzip_fn):
tar_ext = self.tar_ext
tar_opt = self.tar_opt
# No compressed file found
fzip_fn = ""
f = os.path.basename(fqn) # Just filename
llen = len(self.sql_ext) + 9
i = len(f) - llen
# Extract date (YYYYMMDD) from XXXXX-YYYYMMDD.SQL
dts = f[i + 1:i + 9]
if dbtype == "psql":
cmd = "chown " + self.psql_uu + ":" + self.psql_uu + " " + fqn
elif dbtype == "mysql":
cmd = "chown " + self.mysql_uu + ":" + self.mysql_uu + " " + fqn
os0.trace_debug("$", cmd)
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
sql_fn = homedir + "/restdb.sql"
cmd = "cp " + fqn + " " + sql_fn
os0.trace_debug("$", cmd)
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
# cmd = "sed -i -e \"s|Owner: openerp|Owner: odoo|g\""\
# " -e \"s|OWNER TO openerp|OWNER TO odoo|g\" ~/restdb.sql"
# os0.trace_debug("$", cmd)
# os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
if dbtype == "psql":
cmd = "chown " + self.psql_uu + ":" + self.psql_uu + " " + sql_fn
elif dbtype == "mysql":
cmd = "chown " + self.mysql_uu + ":" + self.mysql_uu + " " + sql_fn
os0.trace_debug("$", cmd)
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
self.repl_data(dbname, sql_fn)
psh_fn = homedir + "/restdb.psh"
psh_fd = open(psh_fn, "w")
if dbtype == "psql":
user = self.psql_uu
defdb = self.psql_db
psh_fd.write("\\c {0}\n".format(defdb))
psh_fd.write(
"DROP DATABASE IF EXISTS \"{0}-{1}\";\n".format(dbname, dts))
psh_fd.write("DROP DATABASE IF EXISTS \"{0}\";\n".format(dbname))
psh_fd.write(
"CREATE DATABASE \"{0}\" TEMPLATE template1;\n".format(dbname))
psh_fd.write("\\c \"{0}\"\n".format(dbname))
psh_fd.write("\\i {0}\n".format(sql_fn))
psh_fd.write(
"ALTER DATABASE \"{0}\" OWNER TO odoo;\n".format(dbname))
cmd = "psql -f " + psh_fn + " -U" + user + " " + defdb
psh_fd.close()
os0.trace_debug("$", cmd)
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
elif dbtype == "mysql":
user = "root"
pwd = "SHS13mgr"
# defdb = self.psql_db
psh_fd.write(
"mysqladmin -u{0} --password={1} -f drop \"{2}-{3}\" ||true\n"
.format(user, pwd, dbname, dts))
psh_fd.write(
"mysqladmin -u{0} --password={1} -f drop \"{2}\" || true\n"
.format(user, pwd, dbname))
psh_fd.write(
"mysqladmin -u{0} --password={1} -f create \"{2}\"\n"
.format(user, pwd, dbname))
psh_fd.write(
"mysql -u{0} --password=SHS13mgr -G -e \"source {1}\" {2}\n"
.format(user, sql_fn, dbname))
psh_fd.close()
cmd = "chmod +x " + psh_fn
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
cmd = psh_fn
os0.trace_debug("$", cmd)
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
else:
os0.wlog(" unknown", dbname, "database type!!!")
cmd = "echo Error"
# Compressed file found
if fzip_fn != "":
if dbtype == "psql":
cmd = "chown " + self.psql_uu + \
":" + self.psql_uu + " " + fzip_fn
elif dbtype == "mysql":
cmd = "chown " + self.mysql_uu + \
":" + self.mysql_uu + " " + fzip_fn
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
cmd = "tar --keep-newer-files -x" + tar_opt + "f " + fzip_fn
os0.muteshell(cmd, simulate=self.dry_run, keepout=os0.debug_mode)
if not self.dry_run:
os.remove(fzip_fn)
self.purge_db(dbtype, dbname)
def purge_db(self, dbtype, f):
# pdb.set_trace()
if self.sql_ext != self.pre_ext:
self.change_file_ext(f)
dtc = date.today() - timedelta(90)
os0.wlog(" removing file older than", dtc.strftime("%Y-%m-%d"))
fzip_fn = f + self.tar_ext
force_change_ext = False
for i in range(180, 120, -1):
dtc = datetime.today() - timedelta(i)
dts = dtc.strftime("%Y%m%d")
fsql = f + "-" + dts + self.sql_ext
if not os.path.isfile(fsql) and self.sql_ext != self.pre_ext:
ftmp = f + "-" + dts + self.pre_ext
if os.path.isfile(ftmp):
try:
os0.wlog("$ mv", ftmp, fsql)
if not self.dry_run:
# Rename old ext -> nex ext
os.rename(ftmp, fsql)
# Force change sql file extension
force_change_ext = True
except:
pass
if dtc.day != 1:
if not self.remove_sql_file(fsql) \
and self.sql_ext != self.pre_ext:
fsql = f + "-" + dts + self.pre_ext
self.remove_sql_file(fsql)
if force_change_ext:
self.change_file_ext(f)
fsql = f + "-????????" + self.sql_ext
if dbtype == "psql":
cmd = "chown " + self.psql_uu + ":" + self.psql_uu + " " + fsql
elif dbtype == "mysql":
cmd = "chown " + self.mysql_uu + ":" + self.mysql_uu + " " + fsql
os0.trace_debug("$ ", cmd)
os0.muteshell(cmd, simulate=self.dry_run)
cmd = "tar --remove-files -c" + \
self.tar_opt + "f " + fzip_fn + " " + fsql
os0.trace_debug("$ ", cmd)
os0.muteshell(cmd, simulate=self.dry_run)
if dbtype == "psql":
cmd = "chown " + self.psql_uu + ":" + self.psql_uu + " " + fzip_fn
elif dbtype == "mysql":
cmd = "chown " + self.mysql_uu + \
":" + self.mysql_uu + " " + fzip_fn
os0.trace_debug("$ ", cmd)
os0.muteshell(cmd, simulate=self.dry_run)
os0.wlog(" removing archived files")
fsql = f + "-????????" + self.sql_ext
f_ids = sorted(glob.glob(fsql))
for fsql in f_ids:
self.remove_sql_file(fsql)
def change_file_ext(self, f):
os0.wlog(" changing extension files")
fsql = f + "-????????" + self.pre_ext
f_ids = glob.glob(fsql)
for f in f_ids:
llen = len(f) - len(self.pre_ext)
fsql = f[0:llen] + self.sql_ext
if not os.path.isfile(fsql):
ftmp = f
if os.path.isfile(ftmp):
try:
os0.wlog("$ mv", ftmp, fsql)
if not self.dry_run:
# Rename old ext -> nex ext
os.rename(ftmp, fsql)
except:
pass
def remove_sql_file(self, fsql):
try:
fzip_fd = open(fsql, "r")
fzip_fd.close()
os0.trace_debug("$ rm", fsql)
if not self.dry_run:
os.remove(fsql)
sts = True
except:
sts = False
return sts
def extract_fn_2_restore(self):
file_2_restore = ""
ls_fd = open(self.flist, "r+")
p = ls_fd.tell()
fl = ls_fd.readline()
# f_copy = False
while fl != "":
i = fl.rfind('\n')
if file_2_restore == "" and i >= 0 and fl[0:1] != '#':
f = fl[0:i]
file_2_restore = f
f = "#" + f[1:]
ls_fd.seek(p, os.SEEK_SET)
ls_fd.write(f)
p = ls_fd.tell()
fl = ls_fd.readline()
ls_fd.close()
return file_2_restore
def commit_fn_restored(self):
ftmp = self.flist + ".lst"
fbak = self.flist + ".bak"
if os.path.isfile(ftmp):
fn_fd = open(ftmp, 'r')
fzero = True
fl = fn_fd.readline()
while fl != "" and fzero:
i = fl.rfind('\n')
if i >= 0:
fzero = False
fl = fn_fd.readline()
fn_fd.close()
if not fzero:
cmd = "rm -f {2}; mv {0} {2}; mv {1} {0}".format(
self.flist, ftmp, fbak)
os0.trace_debug("$ ", cmd)
os0.muteshell(cmd, simulate=self.dry_run)
else:
if not self.dry_run:
os.remove(ftmp)
def chdir(self, path):
# Change root dir
lpath = os0.setlfilename(path)
os0.wlog(" [{0}]".format(lpath))
self.set_chdir(lpath)
self.ftp_dir = path # Remember dir
def set_chdir(self, path):
# Exec chdir and store into ftp script
os.chdir(path)
def main():
"""Tool main"""
sts = 0
# pdb.set_trace()
ctx = zarlib.parse_args(sys.argv[1:],
version=version(),
doc=__doc__)
if ctx['do_list']:
print ctx['saveset_list']
return sts
RI = Restore_Image(ctx)
f_alrdy_run = zarlib.check_if_running(ctx, RI.pid)
if f_alrdy_run:
os0.wlog("({0}) ***Another instance is running!!!".format(RI.pid))
# Restore files
file_r_ctr = 0
file_u_ctr = 0
time_wait = 60
wait_loop = 3
if not f_alrdy_run:
fl = RI.extract_fn_2_restore()
loop_ctr = wait_loop
while loop_ctr > 0:
if fl != "":
file_r_ctr = file_r_ctr + 1
if os.path.isfile(fl):
RI.restore_file(fl)
file_u_ctr += 1
if file_u_ctr > 1:
wait_loop = 60
loop_ctr = wait_loop
else:
os0.wlog(" file", fl, "not found!!!")
RI.commit_fn_restored()
fl = RI.extract_fn_2_restore()
if fl == "":
os0.wlog(" wait for next db")
time.sleep(time_wait)
loop_ctr -= 1
if not ctx['dbg_mode'] and os.path.isfile(os0.setlfilename(os0.bgout_fn)):
os.remove(os0.setlfilename(os0.bgout_fn))
if not f_alrdy_run:
os0.wlog("Restore DB ended."
" {0} DB to restore, {1} DB restored ({2})."
.format(file_u_ctr, file_u_ctr, RI.pid))
return sts
if __name__ == "__main__":
sts = main()
sys.exit(sts)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iruga090/python-social-auth | social/pipeline/disconnect.py | 88 | 1082 | from social.exceptions import NotAllowedToDisconnect
def allowed_to_disconnect(strategy, user, name, user_storage,
association_id=None, *args, **kwargs):
if not user_storage.allowed_to_disconnect(user, name, association_id):
raise NotAllowedToDisconnect()
def get_entries(strategy, user, name, user_storage, association_id=None,
*args, **kwargs):
return {
'entries': user_storage.get_social_auth_for_user(
user, name, association_id
)
}
def revoke_tokens(strategy, entries, *args, **kwargs):
revoke_tokens = strategy.setting('REVOKE_TOKENS_ON_DISCONNECT', False)
if revoke_tokens:
for entry in entries:
if 'access_token' in entry.extra_data:
backend = entry.get_backend(strategy)(strategy)
backend.revoke_token(entry.extra_data['access_token'],
entry.uid)
def disconnect(strategy, entries, user_storage, *args, **kwargs):
for entry in entries:
user_storage.disconnect(entry)
| bsd-3-clause |
Hubert51/AutoGrading | learning/number_recognization/test.py | 1 | 1250 | from pytesseract import image_to_string
from PIL import Image
import cv2
import numpy
import sys
if __name__ == '__main__':
f = open("test1.txt")
f = f.read()
for element in f:
str1 = element
position = ((712, 571), (725, 587))
dh = position[1][1] - position[0][1]
upper = position[0][1] - 2 * dh
lower = position[1][1] + int(3.5 * dh)
left = position[1][0]
print(upper,lower, left)
img = cv2.imread('answerSheet_with_name.png')
#image = Image.open('answerSheet_with_name.png')
img = img[upper:lower, left:img[1].size]
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,6)
cv2.imshow("hello", img)
################# Now finding Contours ###################
img,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, -1, (0, 0, 255),1)
im = Image.fromarray(img, 'RGB')
file = open("image_to_string.txt", "w")
# box = image_to_string(image).split('\n')
file.write(image_to_string(im))
#file.write(image_to_string(image))
file.close()
| mit |
bblais/Tech-SIE | Estimating_Proportion/Estimating_Proportion.py | 1 | 4755 |
# coding: utf-8
# #Statistical Inference for Everyone: Technical Supplement
#
#
#
# This document is the technical supplement, for instructors, for [Statistical Inference for Everyone], the introductory statistical inference textbook from the perspective of "probability theory as logic".
#
# <img src="http://web.bryant.edu/~bblais/images/Saturn_with_Dice.png" align=center width = 250px />
#
# [Statistical Inference for Everyone]: http://web.bryant.edu/~bblais/statistical-inference-for-everyone-sie.html
#
# ## Estimating a Proportion
#
# $$\newcommand{\twocvec}[2]{\left(\begin{array}{c}
# #1 \\\\ #2
# \end{array}\right)}
# \newcommand{\nchoosek}[2]{\twocvec{#1}{#2}}
# $$
#
# If $\theta$ is the model representing the probability, $\theta$, of the coin
# landing on heads (and $1-\theta$ is the probability of landing on tails), we
# need to make an estimate of probability of model $\theta$ being true given the
# data, which will consist of $N$ flips of which $h$ are heads.
#
# Bayes rule is:
# \begin{eqnarray}
# p(\theta|D,I) &=& \frac{p(D|\theta,I)p(\theta|I)}{p(D|I)} =
# \frac{p(D|\theta,I)p(\theta,I)}{\sum_\theta p(D|\theta,I)p(\theta|I)}
# \end{eqnarray}
#
# Thus, the probability of a particular model $\theta$ being true is the product
# of the probability of the observed data ($h$ heads in $N$ flips) given the
# model $\theta$ and the prior probability of the model $\theta$ being true
# before we even look at the data, divided by the probability of the data itself
# over all models.
#
# The prior probability of model $\theta$ will be assumed to be uniform (from
# maximum entropy considerations). The probability, $\theta$, ranges from 0 to
# 1, to the prior is
# \begin{eqnarray}
# p(\theta|I) = 1
# \end{eqnarray}
#
# The probability of the data given the random model, is just the binomial
# distribution:
#
# \begin{eqnarray}
# p(D|\theta)=\nchoosek{N}{h} \theta^h (1-\theta)^{N-h}
# \end{eqnarray}
#
# The probability of the data, $p(D|I)$, is found by summing (or in this case
# integrating) $p(D|\theta,I)p(\theta|I)$ for all $\theta$:
#
# \begin{eqnarray}
# p(D|I) &=& \int_0^1 \nchoosek{N}{h} \theta^h (1-\theta)^{N-h} \cdot 1 d\theta
# \\\\
# &=&\frac{N!}{h!(N-h)!} \frac{h!(N-h)!}{(N+1)!} = \frac{1}{N+1}
# \end{eqnarray}
#
# Now the probability of model $\theta$ being true, given the data, is just
#
# \begin{eqnarray}
# p(\theta|D,I)&=& (N+1) \cdot \nchoosek{N}{h} \theta^h (1-\theta)^{N-h} \\
# &=& \frac{(N+1)!}{h!(N-h)!} \theta^h (1-\theta)^{N-h}
# \end{eqnarray}
#
#
# ### Max, Mean, Variance
#
# The model with the maximum probability is found by maximizing $p(\theta|D,I)$
# w.r.t. $\theta$:
#
# \begin{eqnarray}
# \frac{dP(\theta|D,I)}{d\theta} &=& 0 = \frac{(N+1)!}{h!(N-h)!} \left(
# -(N-h) \theta^h (1-\theta)^{N-h-1} + h \theta^{h-1} (1-\theta)^{N-h} \right) \\\\
# (N-h) \theta^h (1-\theta)^{N-h-1} &=& h \theta^{h-1} (1-\theta)^{N-h} \\\\
# \theta(N-h) &=& (1-\theta) h = h-\theta h = N\theta-\theta h \\\\
# \theta&=&\frac{h}{N} \;\;\;\;\;\surd
# \end{eqnarray}
#
# The average and the standard deviation is also straightforward.
#
#
# \begin{eqnarray}
# \bar{\theta} &=& \int_0^1 \theta \cdot \frac{(N+1)!}{h!(N-h)!} \theta^h (1-\theta)^{N-h} \\\\
# &=& \frac{(N+1)!}{h!(N-h)!} \int_0^1 \theta^{h+1} (1-\theta)^{N-h} \\\\
# &=&\frac{(N+1)!}{h!(N-h)!} \frac{(h+1)!(N-h)!}{(N+2)!} \\\\
# &=&\frac{h+1}{N+2} \\\\
# \bar{\theta^2} &=& \int_0^1 \theta^2 \cdot \frac{(N+1)!}{h!(N-h)!} \theta^h (1-\theta)^{N-h} \\\\
# &=&\frac{(N+1)!}{h!(N-h)!} \frac{(h+2)!(N-h)!}{(N+3)!} \\\\
# &=&\frac{(h+1)(h+2)}{(N+2)(N+3)} \\\\
# \sigma^2 &=& \bar{\theta^2} - \bar{\theta}^2 = \frac{(h+1)(h+2)}{(N+2)(N+3)} -
# \frac{(h+1)(h+1)}{(N+2)(N+2)} \\\\
# &=&\frac{(h+1)(N-h+1)}{(N+2)^2(N+3)} \\\\
# &=& \frac{(h+1)}{(N+2)}\left( \frac{n+2}{n+2} - \frac{h+1}{N+2}\right)
# \frac{1}{N+3} \\\\
# &=& \bar{\theta}(1-\bar{\theta})\frac{1}{N+3}
# \end{eqnarray}
#
# ### An Approximation for the Variance
#
# If $f=h/N$ is the actual fraction of heads observed, then the variance above
# can be written as
# \begin{eqnarray}
# \sigma^2 &=&\frac{(fN+1)(N-fN+1)}{(N+2)^2(N+3)} \\\\
# \mbox{(for large $N$)}&\approx& \frac{(fN+1)(N-fN)}{N^3}
# =\frac{(fN+1)(1-f)}{N^2} \\\\
# \mbox{(for large $fN$)}&\approx& \frac{(fN)(N-fN)}{N^2} = \frac{f(1-f)}{N} \\\\
# \sigma^2&\approx& \frac{f(1-f)}{N}
# \end{eqnarray}
#
# In this limit, the distribution (beta distribution) can be approximated with a
# Gaussian.
#
# In[11]:
# ---------------------
# In[8]:
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
| mit |
gpndata/grpc | src/python/grpcio_test/grpc_test/framework/interfaces/face/test_cases.py | 14 | 3176 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tools for creating tests of implementations of the Face layer."""
# unittest is referenced from specification in this module.
import unittest # pylint: disable=unused-import
# test_interfaces is referenced from specification in this module.
from grpc_test.framework.interfaces.face import _blocking_invocation_inline_service
from grpc_test.framework.interfaces.face import _event_invocation_synchronous_event_service
from grpc_test.framework.interfaces.face import _future_invocation_asynchronous_event_service
from grpc_test.framework.interfaces.face import _invocation
from grpc_test.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
_TEST_CASE_SUPERCLASSES = (
_blocking_invocation_inline_service.TestCase,
_event_invocation_synchronous_event_service.TestCase,
_future_invocation_asynchronous_event_service.TestCase,
)
def test_cases(implementation):
"""Creates unittest.TestCase classes for a given Face layer implementation.
Args:
implementation: A test_interfaces.Implementation specifying creation and
destruction of a given Face layer implementation.
Returns:
A sequence of subclasses of unittest.TestCase defining tests of the
specified Face layer implementation.
"""
test_case_classes = []
for invoker_constructor in _invocation.invoker_constructors():
for super_class in _TEST_CASE_SUPERCLASSES:
test_case_classes.append(
type(invoker_constructor.name() + super_class.NAME, (super_class,),
{'implementation': implementation,
'invoker_constructor': invoker_constructor}))
return test_case_classes
| bsd-3-clause |
openaire/iis | iis-3rdparty-madis/src/main/resources/eu/dnetlib/iis/3rdparty/scripts/madis/functions/row/util.py | 4 | 10746 | # coding: utf-8
import setpath
from gzip import zlib
import subprocess
import functions
import time
import urllib2
import urllib
from lib import jopts
from functions.conf import domainExtraHeaders
import lib.gzip32 as gzip
try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from lib.collections26 import OrderedDict
def gz(*args):
"""
.. function:: gz(text) -> gzip compressed blob
Function *gz* compresses its input with gzip's maximum compression level.
Examples:
>>> table1('''
... "qwerqewrqwerqwerqwerqwerqwer"
... "asdfasdfasdfasdfasdfasdfsadf"
... ''')
>>> sql("select length(a), length(gz(a)) from table1")
length(a) | length(gz(a))
-------------------------
28 | 20
28 | 18
"""
return buffer(zlib.compress(args[0], 9))
gz.registered=True
def ungz(*args):
"""
.. function:: ungz(blob) -> text
Function *ungz* decompresses gzip blobs. If the input blobs aren't gzip
compressed, then it just returns them as they are.
Examples:
>>> table1('''
... "qwerqwerqwer"
... "asdfasdfasdf"
... ''')
>>> sql("select ungz(gz(a)) from table1")
ungz(gz(a))
------------
qwerqwerqwer
asdfasdfasdf
>>> sql("select ungz('string'), ungz(123)")
ungz('string') | ungz(123)
--------------------------
string | 123
"""
try:
return zlib.decompress(args[0])
except KeyboardInterrupt:
raise
except:
return args[0]
ungz.registered=True
def urlrequest(*args):
"""
.. function:: urlrequest([null], url) -> response
This functions connects to the *url* (via GET HTTP method) and returns the request's result. If first
parameter is *null*, then in case of errors *null* will be returned.
Examples:
>>> sql("select urlrequest('http://www.google.com/not_existing')")
Traceback (most recent call last):
...
HTTPError: HTTP Error 404: Not Found
>>> sql("select urlrequest(null, 'http://www.google.com/not_existing') as result")
result
------
None
"""
try:
req = urllib2.Request(''.join((x for x in args if x != None)), None, domainExtraHeaders)
hreq = urllib2.urlopen(req)
if [1 for x,y in hreq.headers.items() if x.lower() in ('content-encoding', 'content-type') and y.lower().find('gzip')!=-1]:
hreq = gzip.GzipFile(fileobj=hreq)
return unicode(hreq.read(), 'utf-8', errors = 'replace')
except urllib2.HTTPError,e:
if args[0] == None:
return None
else:
raise e
urlrequest.registered=True
def urlrequestpost(*args):
"""
.. function:: urlrequestpost(data_jdict, [null], url) -> response
This functions connects to the *url* (via POST HTTP method), submits the *data_jdict*, and returns the request's result. If second
parameter is *null*, then in case of errors *null* will be returned.
Examples:
>>> sql('''select urlrequestpost('{"POST_param_name":"data"}', 'http://www.google.com/not_existing')''')
Traceback (most recent call last):
...
HTTPError: HTTP Error 404: Not Found
>>> sql('''select urlrequestpost('["POST_param_name","data"]', null, 'http://www.google.com/not_existing') as result''')
result
------
None
>>> sql("select urlrequestpost(jdict('param1','value1'), null, 'http://www.google.com/not_existing') as result")
result
------
None
>>> sql("select urlrequestpost(jpack('param1','value1'), null, 'http://www.google.com/not_existing') as result")
result
------
None
"""
try:
req = urllib2.Request(''.join((x for x in args[1:] if x != None)), None, domainExtraHeaders)
datain = jopts.fromjsingle(args[0])
dataout = []
if type(datain) == list:
for i in xrange(0, len(datain), 2):
dataout.append((datain[i].encode('utf_8'), datain[i+1].encode('utf_8')))
else:
dataout = [( x.encode('utf_8'), y.encode('utf_8') ) for x,y in datain.items()]
if dataout == []:
raise functions.OperatorError('urlrequestpost',"A list or dict should be provided")
hreq = urllib2.urlopen(req, urllib.urlencode(dataout))
if [1 for x,y in hreq.headers.items() if x.lower() in ('content-encoding', 'content-type') and y.lower().find('gzip')!=-1]:
hreq = gzip.GzipFile(fileobj=hreq)
return unicode(hreq.read(), 'utf-8', errors = 'replace')
except urllib2.HTTPError,e:
if args[1] == None:
return None
else:
raise e
urlrequestpost.registered=True
def failif(*args):
"""
.. function:: failif(condition [, messsage])
If condition is true, raises an error. If message is provided, the message is included in
raised error.
Examples:
>>> sql("select failif(1=1,'exception') as answer") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator FAILIF: exception
>>> sql("select failif(1=0,'exception') as answer") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
answer
------
0
>>> sql("select failif(1=1) as answer") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator FAILIF: an error was found
"""
if len(args)>3:
raise functions.OperatorError('failif','operator needs one or two input')
if args[0]:
if len(args)==2:
raise functions.OperatorError('failif', args[1])
else:
raise functions.OperatorError('failif', 'an error was found')
return args[0]
failif.registered=True
def execprogram(*args):
"""
.. function:: execprogram(stdin=null, program_name, parameters, [raise_error]) -> text or blob
Function *execprogram* executes a shell command and returns its output. If the
value of the first argument is not *null*, the arguments value will be pushed in program's Standard Input.
If the program doesn't return a *0* return code, then a madIS error will be raised, containing
the contents of the program's error stream.
If the last argument of *execprogram* is set to *null*, then all program errors will be returned as *null*
(see "cat non_existent_file" examples below).
Every one of the program's parameters must be provided as different arguments of the *execprogram* call
(see "cat -n" example below).
.. note::
Function *execprogram* tries by default to convert the program's output to UTF-8. If the conversion
isn't succesfull, then it returns the output as a binary blob.
Examples:
>>> table1('''
... echo test
... echo 1
... ''')
>>> sql("select execprogram(null, a, b) from table1")
execprogram(null, a, b)
-----------------------
test
1
>>> sql("select execprogram(null, null, '-l')") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator EXECPROGRAM: Second parameter should be the name of the program to run
>>> sql("select execprogram(null, null, '-l', null)") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
execprogram(null, null, '-l', null)
-----------------------------------
None
>>> sql("select execprogram('test', 'cat')")
execprogram('test', 'cat')
--------------------------
test
>>> sql('''select execprogram('test', 'cat', '-n')''') #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
execprogram('test', 'cat', '-n')
--------------------------------
1 test
>>> sql("select execprogram(null, 'NON_EXISTENT_PROGRAM')") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator EXECPROGRAM: [Errno 2] No such file or directory
>>> sql("select execprogram(null, 'cat', 'non_existent_file')") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
OperatorError: Madis SQLError:
Operator EXECPROGRAM: cat: non_existent_file: No such file or directory
>>> sql("select execprogram(null, 'cat', 'non_existent_file', null)") #doctest:+ELLIPSIS +NORMALIZE_WHITESPACE
execprogram(null, 'cat', 'non_existent_file', null)
---------------------------------------------------
None
"""
if len(args)<2:
raise functions.OperatorError('execprogram', "First parameter should be data to provide to program's STDIN, or null")
raise_error=False
if len(args)>2 and args[-1]==None:
raise_error=True
if args[1]==None:
if raise_error:
return None
else:
raise functions.OperatorError('execprogram', "Second parameter should be the name of the program to run")
outtext=errtext=''
try:
p=subprocess.Popen([unicode(x) for x in args[1:] if x!=None], stdin=subprocess.PIPE if args[0]!=None else None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if args[0]==None:
outtext, errtext=p.communicate()
else:
val = args[0]
valtype = type(val)
if valtype == unicode:
val = val.encode('utf-8')
if valtype in (int,float):
val = str(val)
outtext, errtext=p.communicate( val )
except Exception,e:
raise functions.OperatorError('execprogram', functions.mstr(e))
if p.returncode!=0:
if raise_error:
return None
else:
raise functions.OperatorError('execprogram', functions.mstr(errtext).strip())
try:
outtext=unicode(outtext, 'utf-8')
except KeyboardInterrupt:
raise
except:
return buffer(outtext)
return outtext
execprogram.registered=True
def sleep(*args):
"""
.. function:: sleep(seconds)
This function waits for the given number of seconds before returning. The *seconds* parameters can
be fractional (e.g. *0.1* will sleep for 100 milliseconds).
Examples:
>>> sql("select sleep(0.1)")
sleep(0.1)
----------
0.1
"""
t = args[0]
if t<0:
t=0
time.sleep(t)
return t
sleep.registered=True
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| apache-2.0 |
guymakam/Kodi-Israel | plugin.video.reshet.video/resources/appCaster/APEpgLoader.py | 3 | 1230 | # -*- coding: utf-8 -*-
'''
Created on 21/01/2012
Copyright (c) 2010-2012 Shai Bentin.
All rights reserved. Unpublished -- rights reserved
Use of a copyright notice is precautionary only, and does
not imply publication or disclosure.
Licensed under Eclipse Public License, Version 1.0
Initial Developer: Shai Bentin.
@author: shai
'''
from APLoader import APLoader
class APEpgLoader(APLoader):
'''
classdocs
'''
EPG_URI = "v{{api_version}}/accounts/{{account_id}}/broadcasters/{{broadcaster_id}}/vod_items/{{item_id}}/epg"
def __init__(self, settings, itemId = ''):
'''
Constructor
'''
super(APEpgLoader, self).__init__(settings) # call the parent constructor with the settings object
self.queryUrl = self.URL + self.EPG_URI
self.queryUrl = self.queryUrl.replace("{{api_version}}", "1" + "2")
self.queryUrl = self.queryUrl.replace("{{account_id}}", self.accountId)
self.queryUrl = self.queryUrl.replace("{{broadcaster_id}}", self.broadcasterId)
self.queryUrl = self.queryUrl.replace("{{item_id}}", itemId);
self.queryUrl = self.prepareQueryURL(self.queryUrl, None); | gpl-2.0 |
vimagick/youtube-dl | youtube_dl/extractor/thvideo.py | 151 | 3033 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
unified_strdate
)
class THVideoIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?thvideo\.tv/(?:v/th|mobile\.php\?cid=)(?P<id>[0-9]+)'
_TEST = {
'url': 'http://thvideo.tv/v/th1987/',
'md5': 'fa107b1f73817e325e9433505a70db50',
'info_dict': {
'id': '1987',
'ext': 'mp4',
'title': '【动画】秘封活动记录 ~ The Sealed Esoteric History.分镜稿预览',
'display_id': 'th1987',
'thumbnail': 'http://thvideo.tv/uploadfile/2014/0722/20140722013459856.jpg',
'description': '社团京都幻想剧团的第一个东方二次同人动画作品「秘封活动记录 ~ The Sealed Esoteric History.」 本视频是该动画第一期的分镜草稿...',
'upload_date': '20140722'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# extract download link from mobile player page
webpage_player = self._download_webpage(
'http://thvideo.tv/mobile.php?cid=%s-0' % (video_id),
video_id, note='Downloading video source page')
video_url = self._html_search_regex(
r'<source src="(.*?)" type', webpage_player, 'video url')
# extract video info from main page
webpage = self._download_webpage(
'http://thvideo.tv/v/th%s' % (video_id), video_id)
title = self._og_search_title(webpage)
display_id = 'th%s' % video_id
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
upload_date = unified_strdate(self._html_search_regex(
r'span itemprop="datePublished" content="(.*?)">', webpage,
'upload date', fatal=False))
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
'display_id': display_id,
'thumbnail': thumbnail,
'description': description,
'upload_date': upload_date
}
class THVideoPlaylistIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?thvideo\.tv/mylist(?P<id>[0-9]+)'
_TEST = {
'url': 'http://thvideo.tv/mylist2',
'info_dict': {
'id': '2',
'title': '幻想万華鏡',
},
'playlist_mincount': 23,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
list_title = self._html_search_regex(
r'<h1 class="show_title">(.*?)<b id', webpage, 'playlist title',
fatal=False)
entries = [
self.url_result('http://thvideo.tv/v/th' + id, 'THVideo')
for id in re.findall(r'<dd><a href="http://thvideo.tv/v/th(\d+)/" target=', webpage)]
return self.playlist_result(entries, playlist_id, list_title)
| unlicense |
Giftingnation/GN-Oscar-Custom | sites/demo/apps/order/migrations/0005_auto__add_field_orderdiscount_offer_name.py | 16 | 32848 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrderDiscount.offer_name'
db.add_column('order_orderdiscount', 'offer_name', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, db_index=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'OrderDiscount.offer_name'
db.delete_column('order_orderdiscount', 'offer_name')
models = {
'address.country': {
'Meta': {'ordering': "('-is_highlighted', 'name')", 'object_name': 'Country'},
'is_highlighted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 11, 14, 42, 12, 984329)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 11, 14, 42, 12, 984227)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'order.billingaddress': {
'Meta': {'object_name': 'BillingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.communicationevent': {
'Meta': {'object_name': 'CommunicationEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['customer.CommunicationEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communication_events'", 'to': "orm['order.Order']"})
},
'order.line': {
'Meta': {'object_name': 'Line'},
'est_dispatch_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_price_before_discounts_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_before_discounts_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'line_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lines'", 'to': "orm['order.Order']"}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order_lines'", 'null': 'True', 'to': "orm['partner.Partner']"}),
'partner_line_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'partner_line_reference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'partner_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit_cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_price_incl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'unit_retail_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'order.lineattribute': {
'Meta': {'object_name': 'LineAttribute'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'to': "orm['order.Line']"}),
'option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_attributes'", 'null': 'True', 'to': "orm['catalogue.Option']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'order.lineprice': {
'Meta': {'ordering': "('id',)", 'object_name': 'LinePrice'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'prices'", 'to': "orm['order.Line']"}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_prices'", 'to': "orm['order.Order']"}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'price_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
'order.order': {
'Meta': {'ordering': "['-date_placed']", 'object_name': 'Order'},
'basket_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.BillingAddress']", 'null': 'True', 'blank': 'True'}),
'date_placed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'guest_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingAddress']", 'null': 'True', 'blank': 'True'}),
'shipping_excl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_incl_tax': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'shipping_method': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'total_excl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'total_incl_tax': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'orders'", 'null': 'True', 'to': "orm['auth.User']"})
},
'order.orderdiscount': {
'Meta': {'object_name': 'OrderDiscount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'offer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'offer_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'discounts'", 'to': "orm['order.Order']"}),
'voucher_code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'voucher_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'order.ordernote': {
'Meta': {'object_name': 'OrderNote'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'note_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notes'", 'to': "orm['order.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'order.paymentevent': {
'Meta': {'object_name': 'PaymentEvent'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.PaymentEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.PaymentEventQuantity']", 'symmetrical': 'False'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payment_events'", 'to': "orm['order.Order']"})
},
'order.paymenteventquantity': {
'Meta': {'object_name': 'PaymentEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.PaymentEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.paymenteventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'PaymentEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'order.shippingaddress': {
'Meta': {'object_name': 'ShippingAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'order.shippingevent': {
'Meta': {'ordering': "['-date']", 'object_name': 'ShippingEvent'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.ShippingEventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['order.Line']", 'through': "orm['order.ShippingEventQuantity']", 'symmetrical': 'False'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shipping_events'", 'to': "orm['order.Order']"})
},
'order.shippingeventquantity': {
'Meta': {'object_name': 'ShippingEventQuantity'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'line_quantities'", 'to': "orm['order.ShippingEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['order.Line']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'order.shippingeventtype': {
'Meta': {'ordering': "('sequence_number',)", 'object_name': 'ShippingEventType'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'sequence_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'partner.partner': {
'Meta': {'object_name': 'Partner'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['order']
| bsd-3-clause |
peterfpeterson/mantid | scripts/Interface/reduction_gui/widgets/sans/hfir_background.py | 3 | 12637 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
from qtpy.QtWidgets import (QFrame) # noqa
from qtpy.QtGui import (QDoubleValidator) # noqa
import reduction_gui.widgets.util as util
from reduction_gui.reduction.sans.hfir_background_script import Background
from reduction_gui.widgets.base_widget import BaseWidget
from reduction_gui.widgets.sans.hfir_sample_data import BeamSpreader, DirectBeam
try:
from mantidqt.utils.qt import load_ui
except ImportError:
from mantid.kernel import Logger
Logger("BckDirectBeam").information('Using legacy ui importer')
from mantidplot import load_ui
class BckDirectBeam(DirectBeam):
def __init__(self, parent=None, state=None, settings=None, data_type=None, data_proxy=None):
super(BckDirectBeam, self).__init__(parent, state, settings, data_type, data_proxy=data_proxy)
if state is None:
self.set_state(Background.DirectBeam())
def get_state(self):
direct_beam = super(BckDirectBeam, self).get_state()
m = Background.DirectBeam(direct_beam)
return m
def set_state(self, state):
super(BckDirectBeam, self).set_state(state)
class BckBeamSpreader(BeamSpreader):
def __init__(self, parent=None, state=None, settings=None, data_type=None, data_proxy=None):
super(BckBeamSpreader, self).__init__(parent, state, settings, data_type, data_proxy=data_proxy)
if state is None:
self.set_state(Background.BeamSpreader())
def get_state(self):
direct_beam = super(BckBeamSpreader, self).get_state()
m = Background.BeamSpreader(direct_beam)
return m
def set_state(self, state):
super(BckBeamSpreader, self).set_state(state)
class BackgroundWidget(BaseWidget):
"""
Widget that presents the transmission options to the user
"""
_method_box = None
## Widget name
name = "Background"
def __init__(self, parent=None, state=None, settings=None, show_transmission=True, data_type=None, data_proxy=None):
super(BackgroundWidget, self).__init__(parent, state, settings, data_type, data_proxy=data_proxy)
class BckFrame(QFrame):
def __init__(self, parent=None):
QFrame.__init__(self, parent)
self.ui = load_ui(__file__, '../../../ui/sans/hfir_background.ui', baseinstance=self)
self._content = BckFrame(self)
self._layout.addWidget(self._content)
# Flag to show transmission options or not
self.show_transmission = show_transmission
self.initialize_content()
if state is not None:
self.set_state(state)
else:
m = Background()
self.set_state(m)
self._last_direct_state = None
self._last_spreader_state = None
def initialize_content(self):
"""
Declare the validators and event connections for the
widgets loaded through the .ui file.
"""
# Validators
self._content.transmission_edit.setValidator(QDoubleValidator(self._content.transmission_edit))
self._content.dtransmission_edit.setValidator(QDoubleValidator(self._content.dtransmission_edit))
#self._content.thickness_edit.setValidator(QDoubleValidator(self._content.thickness_edit))
# Connections
self._content.calculate_trans_chk.clicked.connect(self._calculate_clicked)
self._content.trans_direct_chk.clicked.connect(self._direct_beam)
self._content.trans_spreader_chk.clicked.connect(self._beam_spreader)
self._content.background_chk.clicked.connect(self._background_clicked)
self._content.background_browse.clicked.connect(self._background_browse)
self._content.trans_dark_current_button.clicked.connect(self._trans_dark_current_browse)
self._content.background_plot_button.clicked.connect(self._background_plot_clicked)
self._content.trans_dark_current_plot_button.clicked.connect(self._trans_dark_current_plot_clicked)
# Process transmission option
if not self.show_transmission:
self._content.calculate_trans_chk.hide()
self._content.bck_trans_label.hide()
self._content.bck_trans_err_label.hide()
self._content.transmission_edit.hide()
self._content.dtransmission_edit.hide()
self._content.calculate_trans_chk.hide()
self._content.theta_dep_chk.hide()
self._content.trans_direct_chk.hide()
self._content.trans_spreader_chk.hide()
self._content.trans_dark_current_label.hide()
self._content.trans_dark_current_edit.hide()
self._content.trans_dark_current_button.hide()
if not self._has_instrument_view:
self._content.background_plot_button.hide()
self._content.trans_dark_current_plot_button.hide()
def _background_plot_clicked(self):
self.show_instrument(file_name=self._content.background_edit.text)
def _trans_dark_current_plot_clicked(self):
self.show_instrument(file_name=self._content.trans_dark_current_edit.text)
def set_state(self, state):
"""
Populate the UI elements with the data from the given state.
@param state: Transmission object
"""
bck_file = str(self._content.background_edit.text()).strip()
self._content.background_chk.setChecked(state.background_corr)
self._content.background_edit.setText(state.background_file)
if state.background_file.strip() != bck_file:
self.get_data_info()
self._background_clicked(state.background_corr)
if self.show_transmission:
self._content.transmission_edit.setText(str("%6.4f" % state.bck_transmission))
self._content.dtransmission_edit.setText(str("%6.4f" % state.bck_transmission_spread))
#self._content.thickness_edit.setText("%6.4f" % state.sample_thickness)
if isinstance(state.trans_calculation_method, state.DirectBeam):
self._content.trans_direct_chk.setChecked(True)
self._direct_beam(state=state.trans_calculation_method)
else:
self._content.trans_spreader_chk.setChecked(True)
self._beam_spreader(state=state.trans_calculation_method)
self._content.calculate_trans_chk.setChecked(state.calculate_transmission)
self._content.theta_dep_chk.setChecked(state.theta_dependent)
self._content.trans_dark_current_edit.setText(str(state.trans_dark_current))
self._calculate_clicked(state.calculate_transmission)
def get_state(self):
"""
Returns an object with the state of the interface
"""
m = Background()
m.background_corr = self._content.background_chk.isChecked()
m.background_file = str(self._content.background_edit.text())
m.bck_transmission_enabled = self.show_transmission
if self.show_transmission:
#m.sample_thickness = util._check_and_get_float_line_edit(self._content.thickness_edit)
m.bck_transmission = util._check_and_get_float_line_edit(self._content.transmission_edit)
m.bck_transmission_spread = util._check_and_get_float_line_edit(self._content.dtransmission_edit)
m.calculate_transmission = self._content.calculate_trans_chk.isChecked()
m.theta_dependent = self._content.theta_dep_chk.isChecked()
m.trans_dark_current = self._content.trans_dark_current_edit.text()
if self._method_box is not None:
m.trans_calculation_method=self._method_box.get_state()
return m
def _trans_dark_current_browse(self):
fname = self.data_browse_dialog()
if fname:
self._content.trans_dark_current_edit.setText(fname)
def _direct_beam(self, state=None):
if state is None:
state = self._last_direct_state
if isinstance(self._method_box, BckBeamSpreader):
self._last_spreader_state = self._method_box.get_state()
if self.show_transmission:
self._replace_method(BckDirectBeam(self, state=state, settings=self._settings,
data_type=self._data_type, data_proxy=self._data_proxy))
def _beam_spreader(self, state=None):
if state is None:
state = self._last_spreader_state
if isinstance(self._method_box, BckDirectBeam):
self._last_direct_state = self._method_box.get_state()
if self.show_transmission:
self._replace_method(BckBeamSpreader(self, state=state, settings=self._settings,
data_type=self._data_type, data_proxy=self._data_proxy))
def _replace_method(self, widget):
if self._method_box is not None:
for i in range(0, self._content.widget_placeholder.count()):
item = self._content.widget_placeholder.itemAt(i)
self._content.widget_placeholder.removeItem(self._content.widget_placeholder.itemAt(i))
item.widget().deleteLater()
self._method_box = widget
self._content.widget_placeholder.addWidget(self._method_box)
def _background_clicked(self, is_checked):
self._content.background_edit.setEnabled(is_checked)
#self._content.thickness_edit.setEnabled(is_checked)
#self._content.thickness_label.setEnabled(is_checked)
self._content.geometry_options_groupbox.setEnabled(is_checked)
self._content.background_browse.setEnabled(is_checked)
self._content.background_plot_button.setEnabled(is_checked)
self._content.calculate_trans_chk.setEnabled(is_checked)
self._content.theta_dep_chk.setEnabled(is_checked)
self._content.bck_trans_label.setEnabled(is_checked)
self._content.bck_trans_err_label.setEnabled(is_checked)
self._content.transmission_grpbox.setEnabled(is_checked)
self._calculate_clicked(is_checked and self._content.calculate_trans_chk.isChecked())
def _background_browse(self):
fname = self.data_browse_dialog()
if fname:
bck_file = str(self._content.background_edit.text()).strip()
self._content.background_edit.setText(fname)
if str(fname).strip() != bck_file:
self.get_data_info()
def _calculate_clicked(self, is_checked):
self._content.trans_direct_chk.setEnabled(is_checked)
self._content.trans_spreader_chk.setEnabled(is_checked)
if self._method_box is not None:
self._method_box.setEnabled(is_checked)
self._content.transmission_edit.setEnabled(not is_checked and self._content.background_chk.isChecked())
self._content.dtransmission_edit.setEnabled(not is_checked and self._content.background_chk.isChecked())
self._content.trans_dark_current_label.setEnabled(is_checked)
self._content.trans_dark_current_edit.setEnabled(is_checked)
self._content.trans_dark_current_button.setEnabled(is_checked)
self._content.trans_dark_current_plot_button.setEnabled(is_checked)
def get_data_info(self):
"""
Retrieve information from the data file and update the display
"""
if self._data_proxy is None:
return
fname = str(self._content.background_edit.text())
if len(str(fname).strip())>0:
dataproxy = self._data_proxy(fname, "__background_raw")
if len(dataproxy.errors)>0:
return
self._settings.last_data_ws = dataproxy.data_ws
if dataproxy.sample_detector_distance is not None:
self._content.sample_dist_edit.setText(str(dataproxy.sample_detector_distance))
util._check_and_get_float_line_edit(self._content.sample_dist_edit, min=0.0)
if dataproxy.wavelength is not None:
self._content.wavelength_edit.setText(str(dataproxy.wavelength))
util._check_and_get_float_line_edit(self._content.wavelength_edit, min=0.0)
if dataproxy.wavelength_spread is not None:
self._content.wavelength_spread_edit.setText(str(dataproxy.wavelength_spread))
| gpl-3.0 |
enzochiau/tablib | tablib/packages/openpyxl3/reader/worksheet.py | 55 | 3839 | # file openpyxl/reader/worksheet.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Reader for a single worksheet."""
# Python stdlib imports
try:
from xml.etree.cElementTree import iterparse
except ImportError:
from xml.etree.ElementTree import iterparse
from io import StringIO
# package imports
from ..cell import Cell, coordinate_from_string
from ..worksheet import Worksheet
def _get_xml_iter(xml_source):
if not hasattr(xml_source, 'name'):
return StringIO(xml_source)
else:
xml_source.seek(0)
return xml_source
def read_dimension(xml_source):
source = _get_xml_iter(xml_source)
it = iterparse(source)
for event, element in it:
if element.tag == '{http://schemas.openxmlformats.org/spreadsheetml/2006/main}dimension':
ref = element.get('ref')
if ':' in ref:
min_range, max_range = ref.split(':')
else:
min_range = max_range = ref
min_col, min_row = coordinate_from_string(min_range)
max_col, max_row = coordinate_from_string(max_range)
return min_col, min_row, max_col, max_row
else:
element.clear()
return None
def filter_cells(xxx_todo_changeme):
(event, element) = xxx_todo_changeme
return element.tag == '{http://schemas.openxmlformats.org/spreadsheetml/2006/main}c'
def fast_parse(ws, xml_source, string_table, style_table):
source = _get_xml_iter(xml_source)
it = iterparse(source)
for event, element in filter(filter_cells, it):
value = element.findtext('{http://schemas.openxmlformats.org/spreadsheetml/2006/main}v')
if value is not None:
coordinate = element.get('r')
data_type = element.get('t', 'n')
style_id = element.get('s')
if data_type == Cell.TYPE_STRING:
value = string_table.get(int(value))
ws.cell(coordinate).value = value
if style_id is not None:
ws._styles[coordinate] = style_table.get(int(style_id))
# to avoid memory exhaustion, clear the item after use
element.clear()
from ..reader.iter_worksheet import IterableWorksheet
def read_worksheet(xml_source, parent, preset_title, string_table,
style_table, workbook_name = None, sheet_codename = None):
"""Read an xml worksheet"""
if workbook_name and sheet_codename:
ws = IterableWorksheet(parent, preset_title, workbook_name,
sheet_codename, xml_source)
else:
ws = Worksheet(parent, preset_title)
fast_parse(ws, xml_source, string_table, style_table)
return ws
| mit |
mikkylok/mikky.lu | venv/lib/python2.7/site-packages/markdown/extensions/abbr.py | 123 | 2738 | '''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
See <https://pythonhosted.org/Markdown/extensions/abbreviations.html>
for documentation.
Oringinal code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/) and
[Seemant Kulleen](http://www.kulleen.org/)
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
from ..inlinepatterns import Pattern
from ..util import etree, AtomicString
import re
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s' % abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
super(AbbrPattern, self).__init__(pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = AtomicString(m.group('abbr'))
abbr.set('title', self.title)
return abbr
def makeExtension(*args, **kwargs):
return AbbrExtension(*args, **kwargs)
| mit |
SebDieBln/QGIS | python/ext-libs/pygments/lexers/_luabuiltins.py | 275 | 6863 | # -*- coding: utf-8 -*-
"""
pygments.lexers._luabuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the names and modules of lua functions
It is able to re-generate itself, but for adding new functions you
probably have to add some callbacks (see function module_callbacks).
Do not edit the MODULES dict by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
MODULES = {'basic': ['_G',
'_VERSION',
'assert',
'collectgarbage',
'dofile',
'error',
'getfenv',
'getmetatable',
'ipairs',
'load',
'loadfile',
'loadstring',
'next',
'pairs',
'pcall',
'print',
'rawequal',
'rawget',
'rawset',
'select',
'setfenv',
'setmetatable',
'tonumber',
'tostring',
'type',
'unpack',
'xpcall'],
'coroutine': ['coroutine.create',
'coroutine.resume',
'coroutine.running',
'coroutine.status',
'coroutine.wrap',
'coroutine.yield'],
'debug': ['debug.debug',
'debug.getfenv',
'debug.gethook',
'debug.getinfo',
'debug.getlocal',
'debug.getmetatable',
'debug.getregistry',
'debug.getupvalue',
'debug.setfenv',
'debug.sethook',
'debug.setlocal',
'debug.setmetatable',
'debug.setupvalue',
'debug.traceback'],
'io': ['io.close',
'io.flush',
'io.input',
'io.lines',
'io.open',
'io.output',
'io.popen',
'io.read',
'io.tmpfile',
'io.type',
'io.write'],
'math': ['math.abs',
'math.acos',
'math.asin',
'math.atan2',
'math.atan',
'math.ceil',
'math.cosh',
'math.cos',
'math.deg',
'math.exp',
'math.floor',
'math.fmod',
'math.frexp',
'math.huge',
'math.ldexp',
'math.log10',
'math.log',
'math.max',
'math.min',
'math.modf',
'math.pi',
'math.pow',
'math.rad',
'math.random',
'math.randomseed',
'math.sinh',
'math.sin',
'math.sqrt',
'math.tanh',
'math.tan'],
'modules': ['module',
'require',
'package.cpath',
'package.loaded',
'package.loadlib',
'package.path',
'package.preload',
'package.seeall'],
'os': ['os.clock',
'os.date',
'os.difftime',
'os.execute',
'os.exit',
'os.getenv',
'os.remove',
'os.rename',
'os.setlocale',
'os.time',
'os.tmpname'],
'string': ['string.byte',
'string.char',
'string.dump',
'string.find',
'string.format',
'string.gmatch',
'string.gsub',
'string.len',
'string.lower',
'string.match',
'string.rep',
'string.reverse',
'string.sub',
'string.upper'],
'table': ['table.concat',
'table.insert',
'table.maxn',
'table.remove',
'table.sort']}
if __name__ == '__main__':
import re
import urllib
import pprint
# you can't generally find out what module a function belongs to if you
# have only its name. Because of this, here are some callback functions
# that recognize if a gioven function belongs to a specific module
def module_callbacks():
def is_in_coroutine_module(name):
return name.startswith('coroutine.')
def is_in_modules_module(name):
if name in ['require', 'module'] or name.startswith('package'):
return True
else:
return False
def is_in_string_module(name):
return name.startswith('string.')
def is_in_table_module(name):
return name.startswith('table.')
def is_in_math_module(name):
return name.startswith('math')
def is_in_io_module(name):
return name.startswith('io.')
def is_in_os_module(name):
return name.startswith('os.')
def is_in_debug_module(name):
return name.startswith('debug.')
return {'coroutine': is_in_coroutine_module,
'modules': is_in_modules_module,
'string': is_in_string_module,
'table': is_in_table_module,
'math': is_in_math_module,
'io': is_in_io_module,
'os': is_in_os_module,
'debug': is_in_debug_module}
def get_newest_version():
f = urllib.urlopen('http://www.lua.org/manual/')
r = re.compile(r'^<A HREF="(\d\.\d)/">Lua \1</A>')
for line in f:
m = r.match(line)
if m is not None:
return m.groups()[0]
def get_lua_functions(version):
f = urllib.urlopen('http://www.lua.org/manual/%s/' % version)
r = re.compile(r'^<A HREF="manual.html#pdf-(.+)">\1</A>')
functions = []
for line in f:
m = r.match(line)
if m is not None:
functions.append(m.groups()[0])
return functions
def get_function_module(name):
for mod, cb in module_callbacks().iteritems():
if cb(name):
return mod
if '.' in name:
return name.split('.')[0]
else:
return 'basic'
def regenerate(filename, modules):
f = open(filename)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('MODULES = {')]
footer = content[content.find("if __name__ == '__main__':"):]
f = open(filename, 'w')
f.write(header)
f.write('MODULES = %s\n\n' % pprint.pformat(modules))
f.write(footer)
f.close()
def run():
version = get_newest_version()
print '> Downloading function index for Lua %s' % version
functions = get_lua_functions(version)
print '> %d functions found:' % len(functions)
modules = {}
for full_function_name in functions:
print '>> %s' % full_function_name
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
regenerate(__file__, modules)
run()
| gpl-2.0 |
blackye/luscan-devel | thirdparty_libs/django/db/backends/dummy/base.py | 106 | 2276 | """
Dummy database backend for Django.
Django uses this if the database ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
ImproperlyConfigured.
"""
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import *
from django.db.backends.creation import BaseDatabaseCreation
def complain(*args, **kwargs):
raise ImproperlyConfigured("settings.DATABASES is improperly configured. "
"Please supply the ENGINE value. Check "
"settings documentation for more details.")
def ignore(*args, **kwargs):
pass
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
class DatabaseOperations(BaseDatabaseOperations):
quote_name = complain
class DatabaseClient(BaseDatabaseClient):
runshell = complain
class DatabaseCreation(BaseDatabaseCreation):
create_test_db = ignore
destroy_test_db = ignore
class DatabaseIntrospection(BaseDatabaseIntrospection):
get_table_list = complain
get_table_description = complain
get_relations = complain
get_indexes = complain
get_key_columns = complain
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {}
# Override the base class implementations with null
# implementations. Anything that tries to actually
# do something raises complain; anything that tries
# to rollback or undo something raises ignore.
_commit = complain
_rollback = ignore
enter_transaction_management = complain
leave_transaction_management = ignore
set_dirty = complain
set_clean = complain
commit_unless_managed = complain
rollback_unless_managed = ignore
savepoint = ignore
savepoint_commit = complain
savepoint_rollback = ignore
close = ignore
cursor = complain
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = BaseDatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
| gpl-2.0 |
wylee/django-local-settings | src/local_settings/util.py | 1 | 5070 | import importlib
import io
import os
import dotenv
NO_DEFAULT = type(
"NO_DEFAULT",
(),
{
"__nonzero__": (lambda self: False), # Python 2
"__bool__": (lambda self: False), # Python 3
"__str__": (lambda self: self.__class__.__name__),
"__repr__": (lambda self: str(self)),
"__copy__": (lambda self: self),
},
)()
def get_file_name():
"""Get local settings file from environ or discover it.
If the ``LOCAL_SETTINGS_FILE`` environment variable is set, its
value is returned directly.
Otherwise, the current working directory is searched for
`local.{ext}` for each file extension handled by each loading
:mod:`strategy`. Note that the search is done in alphabetical order
so that if ``local.cfg`` and ``local.yaml`` both exist, the former
will be returned.
Returns:
str: File name if set via environ or discovered
None: File name isn't set and wasn't discovered
"""
file_name = os.environ.get("LOCAL_SETTINGS_FILE")
if file_name:
return file_name
cwd = os.getcwd()
default_file_names = get_default_file_names()
for file_name in default_file_names:
file_name = os.path.join(cwd, file_name)
if os.path.exists(file_name):
return file_name
def get_default_file_names():
"""Get default file names for all loading strategies, sorted."""
from .strategy import get_file_type_map # noqa: Avoid circular import
return sorted(f"local.{ext}" for ext in get_file_type_map())
def parse_file_name_and_section(
file_name, section=None, extender=None, extender_section=None
):
"""Parse file name and (maybe) section.
File names can be absolute paths, relative paths, or asset
specs::
/home/user/project/local.cfg
local.cfg
some.package:local.cfg
File names can also include a section::
some.package:local.cfg#dev
If a ``section`` is passed, it will take precedence over a
section parsed out of the file name.
"""
if "#" in file_name:
file_name, parsed_section = file_name.rsplit("#", 1)
else:
parsed_section = None
if ":" in file_name:
file_name = asset_path(file_name)
if extender:
if not file_name:
# Extended another section in the same file
file_name = extender
elif not os.path.isabs(file_name):
# Extended by another file in the same directory
file_name = abs_path(file_name, relative_to=os.path.dirname(extender))
if section:
pass
elif parsed_section:
section = parsed_section
elif extender_section:
section = extender_section
else:
section = None
return file_name, section
# Path utilities
def abs_path(path, relative_to=None):
"""Make path absolute and normalize it."""
if os.path.isabs(path):
path = os.path.normpath(path)
elif ":" in path:
path = asset_path(path)
else:
path = os.path.expanduser(path)
if relative_to:
path = os.path.join(relative_to, path)
path = os.path.abspath(path)
path = os.path.normpath(path)
return path
def asset_path(path):
"""Get absolute path from asset spec and normalize it."""
if ":" in path:
package_name, rel_path = path.split(":", 1)
else:
package_name, rel_path = path, ""
try:
package = importlib.import_module(package_name)
except ImportError:
raise ValueError(
f"Could not get asset path for {path}; could not import "
f"package: {package_name}"
)
if not hasattr(package, "__file__"):
raise ValueError("Can't compute path relative to namespace package")
package_path = os.path.dirname(package.__file__)
if rel_path:
path = os.path.join(package_path, rel_path)
path = os.path.normpath(path)
return path
def dotenv_path(path=None, relative_to=None, file_name=".env"):
"""Get .env path.
If a path is specified, convert it to an absolute path. Otherwise,
use the default, "./.env".
.. note:: By default, the dotenv package discovers the default .env
file relative to the call site, so we have to tell it use CWD.
"""
if path:
path = abs_path(path, relative_to)
else:
path = dotenv.find_dotenv(filename=file_name, usecwd=True)
return path
def load_dotenv(path=None, relative_to=None, file_name=".env"):
"""Load vars from dotenv file into environ."""
path = dotenv_path(path, relative_to, file_name)
dotenv.load_dotenv(path)
# These TTY functions were copied from Invoke
def is_a_tty(stream):
if hasattr(stream, "isatty") and callable(stream.isatty):
return stream.isatty()
elif has_fileno(stream):
return os.isatty(stream.fileno())
return False
def has_fileno(stream):
try:
return isinstance(stream.fileno(), int)
except (AttributeError, io.UnsupportedOperation):
return False
| mit |
ngageoint/voxel-globe | voxel_globe/tests/tasks.py | 2 | 1124 | from voxel_globe.common_tasks import shared_task, VipTask
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task(base=VipTask, bind=True)
def success(self):
import time
time.sleep(0.5)
return 123
@shared_task(base=VipTask, bind=True)
def python_crash(self):
import time
x = 15
time.sleep(0.5)
x += 5
ok()
return -321
@shared_task(base=VipTask, bind=True)
def python_segfault(self):
import time
time.sleep(0.5)
from types import CodeType as code
#Guaranteed segfault https://wiki.python.org/moin/CrashingPython
exec code(0, 5, 8, 0, "hello moshe", (), (), (), "", "", 0, "")
return -111
@shared_task(base=VipTask, bind=True, routing_key="gpu")
def run_ocl_info(self):
import os
print os.getpid()
import boxm2_adaptor as b
b.ocl_info()
@shared_task(base=VipTask, bind=True)
def run_ocl_info2(self):
import os
print os.getpid()
import boxm2_adaptor as b
b.ocl_info()
@shared_task(base=VipTask, bind=True)
def add(self, a, b, pause=None):
if pause:
import time
time.sleep(pause)
print 'a + b = %s + %s' %(a, b)
return a+b
| mit |
ngageoint/scale | scale/data/models.py | 1 | 24039 | """Defines the database models for datasets"""
from __future__ import absolute_import, unicode_literals
import copy
import logging
from collections import namedtuple
import django.contrib.postgres.fields
from django.db import models, transaction
from django.db.models import Q, Count
from data.data import data_util
from data.data.json.data_v6 import convert_data_to_v6_json, DataV6
from data.data.exceptions import InvalidData
from data.data.value import FileValue
from data.dataset.dataset import DataSetDefinition
from data.dataset.json.dataset_v6 import convert_definition_to_v6_json, DataSetDefinitionV6
from data.exceptions import InvalidDataSetDefinition, InvalidDataSetMember
from data.serializers import DataSetFileSerializerV6, DataSetMemberSerializerV6
from storage.models import ScaleFile
from util import rest as rest_utils
from util.database import alphabetize
logger = logging.getLogger(__name__)
DataSetValidation = namedtuple('DataSetValidation', ['is_valid', 'errors', 'warnings'])
# DataSetKey = namedtuple('DataSetKey', ['name', 'version'])
class DataSetManager(models.Manager):
"""Provides additional methods for handling datasets"""
def create_dataset_v6(self, definition, title=None, description=None):
"""Creates and returns a new dataset for the given name/title/description/definition/version??
:param definition: Parameter definition of the dataset
:type definition: :class:`data.dataset.dataset.DataSetDefinition`
:param title: Optional title of the dataset
:type title: string
:param description: Optional description of the dataset
:type description: string
:returns: The new dataset
:rtype: :class:`data.models.DataSet`
:raises :class:`data.exceptions.InvalidDataSet`: If a give dataset has an invalid value
"""
if not definition:
definition = DataSetDefinition(definition={})
dataset = DataSet()
dataset.title = title
dataset.description = description
dataset.definition = definition.get_dict()
dataset.save()
return dataset
def get_details_v6(self, dataset_id):
"""Gets additional details for the given dataset id
:returns: The full dataset for the given id
:rtype: :class:`data.models.DataSet`
"""
ds = DataSet.objects.get(pk=dataset_id)
ds.files = DataSetFile.objects.get_dataset_files(ds.id)
return ds
def get_datasets_v6(self, started=None, ended=None, dataset_ids=None, keywords=None, order=None):
"""Handles retrieving datasets - possibly filtered and ordered
:returns: The list of datasets that match the given filters
:rtype: [:class:`data.models.DataSet`]
"""
return self.filter_datasets(started=started, ended=ended, dataset_ids=dataset_ids, keywords=keywords, order=order)
def filter_datasets(self, started=None, ended=None, dataset_ids=None, keywords=None, order=None):
"""Returns a query for dataset models that filters on the given fields
:param started: Query datasets created after this amount of time.
:type started: :class:`datetime.datetime`
:param ended: Query datasets created before this amount of time.
:type ended: :class:`datetime.datetime`
:param dataset_ids: Query datasets assciated with the given id(s)
:type dataset_ids: :func:`list`
:param keywords: Query datasets with title or description matching one of the specified keywords
:type keywords: :func:`list`
:param order: A list of fields to control the sort order.
:type order: :func:`list`
:returns: The dataset query
:rtype: :class:`django.db.models.QuerySet`
"""
# Fetch a list of the datasets
datasets = self.all()
# Apply time range filtering
if started:
datasets = datasets.filter(created__gte=started)
if ended:
datasets = datasets.filter(created__lte=ended)
# Apply additional filters
if dataset_ids:
datasets = datasets.filter(id__in=dataset_ids)
# Execute a sub-query that returns distinct job type names that match the provided filter arguments
if keywords:
key_query = Q()
for keyword in keywords:
key_query |= Q(title__icontains=keyword)
key_query |= Q(description__icontains=keyword)
datasets = datasets.filter(key_query)
# Apply sorting
if order:
ordering = alphabetize(order, DataSet.ALPHABETIZE_FIELDS)
datasets = datasets.order_by(*ordering)
else:
datasets = datasets.order_by('id')
for ds in datasets:
files = DataSetFile.objects.get_file_ids(dataset_ids=[ds.id])
ds.files = len(files)
return datasets
def validate_dataset_v6(self, definition, title=None, description=None):
"""Validates the given dataset definiton
:param definition: The dataset definition
:type definition: dict
:returns: The dataset validation
:rtype: :class:`datset.models.DataSetValidation`
"""
is_valid = True
errors = []
warnings = []
dataset_definition = None
try:
dataset_definition = DataSetDefinitionV6(definition=definition, do_validate=True)
except InvalidDataSetDefinition as ex:
is_valid = False
errors.append(ex.error)
message = 'Dataset definition is invalid: %s' % ex
logger.info(message)
pass
# validate other fields
return DataSetValidation(is_valid, errors, warnings)
def get_dataset_files(self, dataset_id):
"""Returns the files associated with the given dataset
:returns: The list of DataSetFiles matching the file_id
:rtype: [:class:`data.models.DataSetFile`]
"""
files = DataSetFile.objects.get_dataset_files(dataset_id=dataset_id)
return files
def get_dataset_members(self, dataset_id):
"""Returns the members associated with the given dataset_id
:returns: The list of DataSetMembers
:rtype: [:class:`data.models.DataSetMember`]
"""
dataset = self.get(pk=dataset_id)
members = DataSetMember.objects.all().filter(dataset=dataset)
return members
class DataSet(models.Model):
"""
Represents a DataSet object
:keyword name: The identifying name of the dataset used by clients for queries
:type name: :class:`django.db.models.CharField`
:keyword version: The version of the dataset
:type version: :class:`django.db.models.CharField`
:keyword version_array: The version of the dataset split into SemVer integer components (major,minor,patch,prerelease)
:type version_array: :func:`list`
:keyword title: The human-readable title of this dataset (optional)
:type title: :class:`django.db.models.CharField`
:keyword description: The description of the dataset (optional)
:type description: :class:`django.db.models.CharField`
:keyword created: Defines the created time of the dataset
:type created: :class:`django.db.models.DateTimeField`
:keyword definition: Defines the dataset
:type definition: class:`django.contrib.postgres.fields.JSONField`
"""
ALPHABETIZE_FIELDS = ['title', 'description']
title = models.CharField(blank=True, max_length=50, null=True)
description = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
definition = django.contrib.postgres.fields.JSONField(default=dict)
objects = DataSetManager()
def get_definition(self):
"""Returns the dataset definition
:returns: The DataSet definition
:rtype: :class:`data.dataset.dataset.DataSetDefinition`
"""
if isinstance(self.definition, basestring):
self.definition = {}
return DataSetDefinitionV6(definition=self.definition).get_definition()
def get_v6_definition_json(self):
"""Returns the dataset definition in v6 of the JSON schema
:returns: The dataset definition in v6 of the JSON schema
:rtype: dict
"""
return rest_utils.strip_schema_version(convert_definition_to_v6_json(self.get_definition()).get_dict())
def get_dataset_definition(self):
"""Returns the dataset definition
:returns: The dataset definition json
:rtype: dict
"""
return self.definition
def get_dataset_members_json(self):
"""Returns the JSON for the associated dataset members
:returns: Returns the outgoing primitive representation.
:rtype: dict?
"""
members = DataSet.objects.get_dataset_members(dataset_id=self.id)
serializer = DataSetMemberSerializerV6(members, many=True)
return serializer.data
def get_dataset_files_json(self):
"""Returns the JSON for the associated dataset files
:returns: Returns the outgoing primitive representation.
:rtype: dict?
"""
files = DataSet.objects.get_dataset_files(self.id)
serializer = DataSetFileSerializerV6(files, many=True)
return serializer.data
class Meta(object):
"""meta information for the db"""
db_table = 'data_set'
class DataSetMemberManager(models.Manager):
"""Provides additional methods for handling dataset members"""
def build_data_list(self, template, data_started=None, data_ended=None, created_started=None, created_ended=None,
source_started=None, source_ended=None, source_sensor_classes=None, source_sensors=None,
source_collections=None,source_tasks=None, mod_started=None, mod_ended=None, job_type_ids=None,
job_type_names=None, job_ids=None, is_published=None, is_superseded=None, file_names=None,
job_outputs=None, recipe_ids=None, recipe_type_ids=None, recipe_nodes=None, batch_ids=None, order=None):
"""Builds a list of data dictionaries from a template and file filters
:param template: The template to fill with files found through filters
:type template: dict
:param data_started: Query files where data started after this time.
:type data_started: :class:`datetime.datetime`
:param data_ended: Query files where data ended before this time.
:type data_ended: :class:`datetime.datetime`
:param created_started: Query files created after this time.
:type created_started: :class:`datetime.datetime`
:param created_ended: Query files created before this time.
:type created_ended: :class:`datetime.datetime`
:param source_started: Query files where source collection started after this time.
:type source_started: :class:`datetime.datetime`
:param source_ended: Query files where source collection ended before this time.
:type source_ended: :class:`datetime.datetime`
:param source_sensor_classes: Query files with the given source sensor class.
:type source_sensor_classes: :func:`list`
:param source_sensor: Query files with the given source sensor.
:type source_sensor: :func:`list`
:param source_collection: Query files with the given source class.
:type source_collection: :func:`list`
:param source_tasks: Query files with the given source tasks.
:type source_tasks: :func:`list`
:param mod_started: Query files where the last modified date is after this time.
:type mod_started: :class:`datetime.datetime`
:param mod_ended: Query files where the last modified date is before this time.
:type mod_ended: :class:`datetime.datetime`
:param job_type_ids: Query files with jobs with the given type identifier.
:type job_type_ids: :func:`list`
:param job_type_names: Query files with jobs with the given type name.
:type job_type_names: :func:`list`
:keyword job_ids: Query files with a given job id
:type job_ids: :func:`list`
:param is_published: Query files flagged as currently exposed for publication.
:type is_published: bool
:param is_superseded: Query files that have/have not been superseded.
:type is_superseded: bool
:param file_names: Query files with the given file names.
:type file_names: :func:`list`
:keyword job_outputs: Query files with the given job outputs
:type job_outputs: :func:`list`
:keyword recipe_ids: Query files with a given recipe id
:type recipe_ids: :func:`list`
:keyword recipe_nodes: Query files with a given recipe nodes
:type recipe_nodes: :func:`list`
:keyword recipe_type_ids: Query files with the given recipe types
:type recipe_type_ids: :func:`list`
:keyword batch_ids: Query files with batches with the given identifiers.
:type batch_ids: :func:`list`
:param order: A list of fields to control the sort order.
:type order: :func:`list`
"""
files = ScaleFile.objects.filter_files(
data_started=data_started, data_ended=data_ended,
source_started=source_started, source_ended=source_ended,
source_sensor_classes=source_sensor_classes, source_sensors=source_sensors,
source_collections=source_collections, source_tasks=source_tasks,
mod_started=mod_started, mod_ended=mod_ended, job_type_ids=job_type_ids,
job_type_names=job_type_names, job_ids=job_ids,
file_names=file_names, job_outputs=job_outputs, recipe_ids=recipe_ids,
recipe_type_ids=recipe_type_ids, recipe_nodes=recipe_nodes, batch_ids=batch_ids,
order=order)
data_list = []
try:
for f in files:
entry = copy.deepcopy(template)
file_params = entry['files']
for p in file_params:
if file_params[p] == 'FILE_VALUE':
file_params[p] = [f.id]
data_list.append(DataV6(data=entry, do_validate=True).get_data())
except (KeyError, TypeError) as ex:
raise InvalidData('INVALID_TEMPLATE', "Specified template is invalid: %s" % ex)
return data_list
def validate_data_list(self, dataset_def, data_list):
"""Validates a list of data objects against a dataset
:param dataset_def: The dataset definition the member is a part of
:type dataset_def:
:param data_list: Data definitions of the dataset members
:type data_list: [:class:`data.data.data.Data`]
"""
is_valid = True
errors = []
warnings = []
for data in data_list:
try:
dataset_def.validate(data)
except (InvalidData, InvalidDataSetMember) as ex:
is_valid = False
errors.append(ex.error)
message = 'Dataset definition is invalid: %s' % ex
logger.info(message)
pass
# validate other fields
return DataSetValidation(is_valid, errors, warnings)
def create_dataset_members(self, dataset, data_list):
"""Creates a dataset member
:param dataset: The dataset the member is a part of
:type dataset: :class:`data.models.DataSet`
:param data_list: Data definitions of the dataset members
:type data_list: [:class:`data.data.data.Data`]
"""
with transaction.atomic():
dataset_members = []
datasetfiles = []
existing_scale_ids = DataSetFile.objects.get_file_ids(dataset_ids=[dataset.id])
for d in data_list:
dataset_member = DataSetMember()
dataset_member.dataset = dataset
dataset_member.data = convert_data_to_v6_json(d).get_dict()
dataset_member.file_ids = list(data_util.get_file_ids(d))
dataset_members.append(dataset_member)
datasetfiles.extend(DataSetFile.objects.create_dataset_files(dataset, d, existing_scale_ids))
existing_scale_ids.append(dataset_member.file_ids)
DataSetFile.objects.bulk_create(datasetfiles)
return DataSetMember.objects.bulk_create(dataset_members)
def get_dataset_members(self, dataset):
"""Returns dataset members for the given dataset
:returns: members for a given dataset
:rtype: QuerySet<DataSetMember>
"""
return self.all().filter(dataset=dataset).order_by('id')
def get_details_v6(self, dsm_id):
"""Gets additional details for the given dataset member id
:returns: The full dataset member for the given id
:rtype: :class:`data.models.DataSetMember`
"""
dsm = DataSetMember.objects.get(pk=dsm_id)
dsm.files = DataSetFile.objects.filter(dataset=dsm.dataset, scale_file_id__in=list(dsm.file_ids))
return dsm
class DataSetMember(models.Model):
"""
Defines the data of a dataset? contains list/descriptors of DataFiles
:keyword dataset: Refers to dataset member belongs to
:type dataset: :class:`django.db.models.ForeignKey`
:keyword data: JSON description of the data in this DataSetMember.
:type data: :class: `django.contrib.postgres.fields.JSONField(default=dict)`
:keyword created: Created Time
:type created: datetime
"""
dataset = models.ForeignKey('data.DataSet', on_delete=models.PROTECT)
data = django.contrib.postgres.fields.JSONField(default=dict)
file_ids = django.contrib.postgres.fields.ArrayField(models.IntegerField(null=True))
created = models.DateTimeField(auto_now_add=True)
objects = DataSetMemberManager()
def get_dataset_definition(self):
"""Returns the dataset definition
:returns: The dataset definition
:rtype: :class:`data.dataset.dataset.DataSetDefinition`
"""
return self.dataset.get_definition()
def get_data(self):
"""Returns the data for this datasetmember
:returns: The data for this datasetmember
:rtype: :class:`data.data.data.Data`
"""
return DataV6(data=self.data, do_validate=False).get_data()
def get_v6_data_json(self):
"""Returns the data for this datasetmember as v6 json with the version stripped
:returns: The v6 JSON output data dict for this datasetmember
:rtype: dict
"""
return rest_utils.strip_schema_version(convert_data_to_v6_json(self.get_data()).get_dict())
class Meta(object):
"""meta information for the db"""
db_table = 'data_set_member'
class DataSetFileManager(models.Manager):
"""Manages the datasetfile model"""
def create_dataset_files(self, dataset, data, existing_scale_ids):
"""Creates dataset files for the given dataset and data"""
datasetfiles = []
for i in data.values.keys():
v = data.values[i]
if type(v) is FileValue:
for id in v.file_ids:
if id in existing_scale_ids:
continue
file = DataSetFile()
file.dataset = dataset
file.scale_file = ScaleFile.objects.get(pk=id)
file.parameter_name = i
datasetfiles.append(file)
return datasetfiles
def get_file_ids(self, dataset_ids, parameter_names=None):
"""Returns a list of the file IDs for the given datasets, optionally filtered by parameter_name.
:param dataset_ids: The ids of the associated datasets
:type dataset_ids: integer
:param parameter_names: The parameter names to search for in the given datasets
:type parameter_names: string
:returns: The list of scale file IDs
:rtype: :func:`list`
"""
query = self.all().filter(dataset_id__in=list(dataset_ids))
if parameter_names:
query = query.filter(parameter_name__in=list(parameter_names))
return [result.scale_file_id for result in query.only('scale_file_id').distinct()]
def get_dataset_ids(self, file_ids, all_files=False):
"""Returns a list of the dataset IDs that contain the given files
:param file_ids: The ids of the files to look for
:type dataset_id: integer
:param all_files: Whether or not a dataset must contain all files or just some of the files in the list
:type all_files: bool
:returns: The list of dataset IDs
:rtype: :func:`list`
"""
results = []
if not all_files:
query = self.all().filter(scale_file_id__in=list(file_ids)).only('dataset_id').distinct()
results = [result.dataset_id for result in query]
else:
query = self.all().filter(scale_file_id__in=list(file_ids)).values('dataset_id').annotate(total=Count('dataset_id')).order_by('total')
for result in query:
if result['total'] == len(file_ids):
results.append(result['dataset_id'])
return results
def get_files(self, dataset_ids, parameter_names=None):
"""Returns the dataset files associated with the given dataset_ids
:param dataset_ids: The ids of the associated datasets
:type dataset_ids: integer
:param parameter_names: The parameter names to search for in the given datasets
:type parameter_names: string
:returns: The DataSetFiles associated with that dataset_id
:rtype: [:class:`data.models.DataSetFile`]
"""
files = self.all().filter(dataset_id__in=list(dataset_ids))
if parameter_names:
files = files.filter(parameter_name__in=list(parameter_names))
return files
def get_datasets(self, file_ids, all_files=False):
"""Returns the datasets associated with the given file_id
:param file_id: The id of the associated file
:type file_id: integer
:param all_files: Whether or not a dataset must contain all files or just some of the files in the list
:type all_files: bool
:returns: The DataSets associated with that dataset_id
:rtype: [:class:`data.models.DataSet`]
"""
dataset_ids = self.get_dataset_ids(file_ids=file_ids, all_files=all_files)
datasets = DataSet.objects.filter(id__in=dataset_ids)
return datasets
def get_dataset_files(self, dataset_id):
"""Returns the dataset files associated with the given dataset_id
:param dataset_id: The id of the associated dataset
:type dataset_id: integer
:returns: The DataSetFiles associated with that dataset_id
:rtype: [:class:`data.models.DataSetFile`]
"""
files = DataSetFile.objects.filter(dataset_id=dataset_id)
return files
class DataSetFile(models.Model):
"""
The actual file in a dataset member
:keyword dataset: Refers to the dataset the file is a member of
:type dataset: :class:`django.db.models.ForeignKey`
:keyword scale_file: Refers to the ScaleFile
:type scale_file: :class:`django.db.models.ForeignKey`
:keyword parameter_name: Refers to the File parameter name
:type parameter_name: :class:`django.db.models.CharField`
"""
dataset = models.ForeignKey('data.DataSet', on_delete=models.PROTECT)
scale_file = models.ForeignKey('storage.ScaleFile', on_delete=models.PROTECT)
parameter_name = models.CharField(db_index=True, max_length=50)
objects = DataSetFileManager()
class Meta(object):
"""meta information for the db"""
db_table = 'data_set_file'
unique_together = ("dataset", "scale_file") | apache-2.0 |
jordigh/mercurial-crew | mercurial/mdiff.py | 92 | 11609 | # mdiff.py - diff and patch routines for mercurial
#
# Copyright 2005, 2006 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import bdiff, mpatch, util
import re, struct, base85, zlib
def splitnewlines(text):
'''like str.splitlines, but only split on newlines.'''
lines = [l + '\n' for l in text.split('\n')]
if lines:
if lines[-1] == '\n':
lines.pop()
else:
lines[-1] = lines[-1][:-1]
return lines
class diffopts(object):
'''context is the number of context lines
text treats all files as text
showfunc enables diff -p output
git enables the git extended patch format
nodates removes dates from diff headers
ignorews ignores all whitespace changes in the diff
ignorewsamount ignores changes in the amount of whitespace
ignoreblanklines ignores changes whose lines are all blank
upgrade generates git diffs to avoid data loss
'''
defaults = {
'context': 3,
'text': False,
'showfunc': False,
'git': False,
'nodates': False,
'ignorews': False,
'ignorewsamount': False,
'ignoreblanklines': False,
'upgrade': False,
}
__slots__ = defaults.keys()
def __init__(self, **opts):
for k in self.__slots__:
v = opts.get(k)
if v is None:
v = self.defaults[k]
setattr(self, k, v)
try:
self.context = int(self.context)
except ValueError:
raise util.Abort(_('diff context lines count must be '
'an integer, not %r') % self.context)
def copy(self, **kwargs):
opts = dict((k, getattr(self, k)) for k in self.defaults)
opts.update(kwargs)
return diffopts(**opts)
defaultopts = diffopts()
def wsclean(opts, text, blank=True):
if opts.ignorews:
text = bdiff.fixws(text, 1)
elif opts.ignorewsamount:
text = bdiff.fixws(text, 0)
if blank and opts.ignoreblanklines:
text = re.sub('\n+', '\n', text).strip('\n')
return text
def splitblock(base1, lines1, base2, lines2, opts):
# The input lines matches except for interwoven blank lines. We
# transform it into a sequence of matching blocks and blank blocks.
lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
s1, e1 = 0, len(lines1)
s2, e2 = 0, len(lines2)
while s1 < e1 or s2 < e2:
i1, i2, btype = s1, s2, '='
if (i1 >= e1 or lines1[i1] == 0
or i2 >= e2 or lines2[i2] == 0):
# Consume the block of blank lines
btype = '~'
while i1 < e1 and lines1[i1] == 0:
i1 += 1
while i2 < e2 and lines2[i2] == 0:
i2 += 1
else:
# Consume the matching lines
while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
i1 += 1
i2 += 1
yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
s1 = i1
s2 = i2
def allblocks(text1, text2, opts=None, lines1=None, lines2=None, refine=False):
"""Return (block, type) tuples, where block is an mdiff.blocks
line entry. type is '=' for blocks matching exactly one another
(bdiff blocks), '!' for non-matching blocks and '~' for blocks
matching only after having filtered blank lines. If refine is True,
then '~' blocks are refined and are only made of blank lines.
line1 and line2 are text1 and text2 split with splitnewlines() if
they are already available.
"""
if opts is None:
opts = defaultopts
if opts.ignorews or opts.ignorewsamount:
text1 = wsclean(opts, text1, False)
text2 = wsclean(opts, text2, False)
diff = bdiff.blocks(text1, text2)
for i, s1 in enumerate(diff):
# The first match is special.
# we've either found a match starting at line 0 or a match later
# in the file. If it starts later, old and new below will both be
# empty and we'll continue to the next match.
if i > 0:
s = diff[i - 1]
else:
s = [0, 0, 0, 0]
s = [s[1], s1[0], s[3], s1[2]]
# bdiff sometimes gives huge matches past eof, this check eats them,
# and deals with the special first match case described above
if s[0] != s[1] or s[2] != s[3]:
type = '!'
if opts.ignoreblanklines:
if lines1 is None:
lines1 = splitnewlines(text1)
if lines2 is None:
lines2 = splitnewlines(text2)
old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
if old == new:
type = '~'
yield s, type
yield s1, '='
def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
def datetag(date, fn=None):
if not opts.git and not opts.nodates:
return '\t%s\n' % date
if fn and ' ' in fn:
return '\t\n'
return '\n'
if not a and not b:
return ""
epoch = util.datestr((0, 0))
fn1 = util.pconvert(fn1)
fn2 = util.pconvert(fn2)
if not opts.text and (util.binary(a) or util.binary(b)):
if a and b and len(a) == len(b) and a == b:
return ""
l = ['Binary file %s has changed\n' % fn1]
elif not a:
b = splitnewlines(b)
if a is None:
l1 = '--- /dev/null%s' % datetag(epoch)
else:
l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
l3 = "@@ -0,0 +1,%d @@\n" % len(b)
l = [l1, l2, l3] + ["+" + e for e in b]
elif not b:
a = splitnewlines(a)
l1 = "--- %s%s" % ("a/" + fn1, datetag(ad, fn1))
if b is None:
l2 = '+++ /dev/null%s' % datetag(epoch)
else:
l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd, fn2))
l3 = "@@ -1,%d +0,0 @@\n" % len(a)
l = [l1, l2, l3] + ["-" + e for e in a]
else:
al = splitnewlines(a)
bl = splitnewlines(b)
l = list(_unidiff(a, b, al, bl, opts=opts))
if not l:
return ""
l.insert(0, "--- a/%s%s" % (fn1, datetag(ad, fn1)))
l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd, fn2)))
for ln in xrange(len(l)):
if l[ln][-1] != '\n':
l[ln] += "\n\ No newline at end of file\n"
return "".join(l)
# creates a headerless unified diff
# t1 and t2 are the text to be diffed
# l1 and l2 are the text broken up into lines
def _unidiff(t1, t2, l1, l2, opts=defaultopts):
def contextend(l, len):
ret = l + opts.context
if ret > len:
ret = len
return ret
def contextstart(l):
ret = l - opts.context
if ret < 0:
return 0
return ret
lastfunc = [0, '']
def yieldhunk(hunk):
(astart, a2, bstart, b2, delta) = hunk
aend = contextend(a2, len(l1))
alen = aend - astart
blen = b2 - bstart + aend - a2
func = ""
if opts.showfunc:
lastpos, func = lastfunc
# walk backwards from the start of the context up to the start of
# the previous hunk context until we find a line starting with an
# alphanumeric char.
for i in xrange(astart - 1, lastpos - 1, -1):
if l1[i][0].isalnum():
func = ' ' + l1[i].rstrip()[:40]
lastfunc[1] = func
break
# by recording this hunk's starting point as the next place to
# start looking for function lines, we avoid reading any line in
# the file more than once.
lastfunc[0] = astart
# zero-length hunk ranges report their start line as one less
if alen:
astart += 1
if blen:
bstart += 1
yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
bstart, blen, func)
for x in delta:
yield x
for x in xrange(a2, aend):
yield ' ' + l1[x]
# bdiff.blocks gives us the matching sequences in the files. The loop
# below finds the spaces between those matching sequences and translates
# them into diff output.
#
hunk = None
ignoredlines = 0
for s, stype in allblocks(t1, t2, opts, l1, l2):
a1, a2, b1, b2 = s
if stype != '!':
if stype == '~':
# The diff context lines are based on t1 content. When
# blank lines are ignored, the new lines offsets must
# be adjusted as if equivalent blocks ('~') had the
# same sizes on both sides.
ignoredlines += (b2 - b1) - (a2 - a1)
continue
delta = []
old = l1[a1:a2]
new = l2[b1:b2]
b1 -= ignoredlines
b2 -= ignoredlines
astart = contextstart(a1)
bstart = contextstart(b1)
prev = None
if hunk:
# join with the previous hunk if it falls inside the context
if astart < hunk[1] + opts.context + 1:
prev = hunk
astart = hunk[1]
bstart = hunk[3]
else:
for x in yieldhunk(hunk):
yield x
if prev:
# we've joined the previous hunk, record the new ending points.
hunk[1] = a2
hunk[3] = b2
delta = hunk[4]
else:
# create a new hunk
hunk = [astart, a2, bstart, b2, delta]
delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
delta[len(delta):] = ['-' + x for x in old]
delta[len(delta):] = ['+' + x for x in new]
if hunk:
for x in yieldhunk(hunk):
yield x
def b85diff(to, tn):
'''print base85-encoded binary diff'''
def fmtline(line):
l = len(line)
if l <= 26:
l = chr(ord('A') + l - 1)
else:
l = chr(l - 26 + ord('a') - 1)
return '%c%s\n' % (l, base85.b85encode(line, True))
def chunk(text, csize=52):
l = len(text)
i = 0
while i < l:
yield text[i:i + csize]
i += csize
if to is None:
to = ''
if tn is None:
tn = ''
if to == tn:
return ''
# TODO: deltas
ret = []
ret.append('GIT binary patch\n')
ret.append('literal %s\n' % len(tn))
for l in chunk(zlib.compress(tn)):
ret.append(fmtline(l))
ret.append('\n')
return ''.join(ret)
def patchtext(bin):
pos = 0
t = []
while pos < len(bin):
p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
pos += 12
t.append(bin[pos:pos + l])
pos += l
return "".join(t)
def patch(a, bin):
if len(a) == 0:
# skip over trivial delta header
return util.buffer(bin, 12)
return mpatch.patches(a, [bin])
# similar to difflib.SequenceMatcher.get_matching_blocks
def get_matching_blocks(a, b):
return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
def trivialdiffheader(length):
return struct.pack(">lll", 0, 0, length)
patches = mpatch.patches
patchedsize = mpatch.patchedsize
textdiff = bdiff.bdiff
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.