repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
jdsika/TUM_HOly | openrave/sympy/physics/quantum/grover.py | 6 | 9227 | """Grover's algorithm and helper functions.
Todo:
* W gate construction (or perhaps -W gate based on Mermin's book)
* Generalize the algorithm for an unknown function that returns 1 on
multiple qubit states, not just one.
* Implement _represent_ZGate in OracleGate
"""
from sympy import sqrt, pi, floor
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.operator import UnitaryOperator
from sympy.physics.quantum.gate import Gate, HadamardGate
from sympy.physics.quantum.qubit import IntQubit
from sympy.core.compatibility import callable
__all__ = [
'OracleGate',
'WGate',
'superposition_basis',
'grover_iteration',
'apply_grover'
]
def superposition_basis(nqubits):
"""Creates an equal superposition of the computational basis.
Parameters
==========
nqubits : int
The number of qubits.
Return
======
state : Qubit
An equal superposition of the computational basis with nqubits.
Examples
========
Create an equal superposition of 2 qubits::
>>> from sympy.physics.quantum.grover import superposition_basis
>>> superposition_basis(2)
|0>/2 + |1>/2 + |2>/2 + |3>/2
"""
amp = 1/sqrt(2**nqubits)
return sum([amp*IntQubit(n, nqubits) for n in range(2**nqubits)])
class OracleGate(Gate):
"""A black box gate.
The gate marks the desired qubits of an unknown function by flipping
the sign of the qubits. The unknown function returns true when it
finds its desired qubits and false otherwise.
Parameters
==========
qubits : int
Number of qubits.
oracle : callable
A callable function that returns a boolean on a computational basis.
Examples
========
Apply an Oracle gate that flips the sign of |2> on different qubits::
>>> from sympy.physics.quantum.qubit import IntQubit
>>> from sympy.physics.quantum.qapply import qapply
>>> from sympy.physics.quantum.grover import OracleGate
>>> f = lambda qubits: qubits == IntQubit(2)
>>> v = OracleGate(2, f)
>>> qapply(v*IntQubit(2))
-|2>
>>> qapply(v*IntQubit(3))
|3>
"""
gate_name = u'V'
gate_name_latex = u'V'
#-------------------------------------------------------------------------
# Initialization/creation
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
if len(args) != 2:
raise QuantumError(
'Insufficient/excessive arguments to Oracle. Please ' +
'supply the number of qubits and an unknown function.'
)
sub_args = args[0],
sub_args = UnitaryOperator._eval_args(sub_args)
if not sub_args[0].is_Integer:
raise TypeError('Integer expected, got: %r' % sub_args[0])
if not callable(args[1]):
raise TypeError('Callable expected, got: %r' % args[1])
sub_args = UnitaryOperator._eval_args(tuple(range(args[0])))
return (sub_args, args[1])
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(max(args[0])+1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def search_function(self):
"""The unknown function that helps find the sought after qubits."""
return self.label[1]
@property
def targets(self):
"""A tuple of target qubits."""
return self.label[0]
#-------------------------------------------------------------------------
# Apply
#-------------------------------------------------------------------------
def _apply_operator_Qubit(self, qubits, **options):
"""Apply this operator to a Qubit subclass.
Parameters
==========
qubits : Qubit
The qubit subclass to apply this operator to.
Returns
=======
state : Expr
The resulting quantum state.
"""
if qubits.nqubits != self.nqubits:
raise QuantumError(
'OracleGate operates on %r qubits, got: %r'
(self.nqubits, qubits.nqubits)
)
# If function returns 1 on qubits
# return the negative of the qubits (flip the sign)
if self.search_function(qubits):
return -qubits
else:
return qubits
#-------------------------------------------------------------------------
# Represent
#-------------------------------------------------------------------------
def _represent_ZGate(self, basis, **options):
raise NotImplementedError(
"Represent for the Oracle has not been implemented yet"
)
class WGate(Gate):
"""General n qubit W Gate in Grover's algorithm.
The gate performs the operation 2|phi><phi| - 1 on some qubits.
|phi> = (tensor product of n Hadamards)*(|0> with n qubits)
Parameters
==========
nqubits : int
The number of qubits to operate on
"""
gate_name = u'W'
gate_name_latex = u'W'
@classmethod
def _eval_args(cls, args):
if len(args) != 1:
raise QuantumError(
'Insufficient/excessive arguments to W gate. Please ' +
'supply the number of qubits to operate on.'
)
args = UnitaryOperator._eval_args(args)
if not args[0].is_Integer:
raise TypeError('Integer expected, got: %r' % args[0])
return tuple(reversed(range(args[0])))
#-------------------------------------------------------------------------
# Apply
#-------------------------------------------------------------------------
def _apply_operator_Qubit(self, qubits, **options):
"""
qubits: a set of qubits (Qubit)
Returns: quantum object (quantum expression - QExpr)
"""
if qubits.nqubits != self.nqubits:
raise QuantumError(
'WGate operates on %r qubits, got: %r'
(self.nqubits, qubits.nqubits)
)
# See 'Quantum Computer Science' by David Mermin p.92 -> W|a> result
# Return (2/(sqrt(2^n)))|phi> - |a> where |a> is the current basis
# state and phi is the superposition of basis states (see function
# create_computational_basis above)
basis_states = superposition_basis(self.nqubits)
change_to_basis = (2/sqrt(2**self.nqubits))*basis_states
return change_to_basis - qubits
def grover_iteration(qstate, oracle):
"""Applies one application of the Oracle and W Gate, WV.
Parameters
==========
qstate : Qubit
A superposition of qubits.
oracle : OracleGate
The black box operator that flips the sign of the desired basis qubits.
Returns
=======
Qubit : The qubits after applying the Oracle and W gate.
Examples
========
Perform one iteration of grover's algorithm to see a phase change::
>>> from sympy.physics.quantum.qapply import qapply
>>> from sympy.physics.quantum.qubit import IntQubit
>>> from sympy.physics.quantum.grover import OracleGate
>>> from sympy.physics.quantum.grover import superposition_basis
>>> from sympy.physics.quantum.grover import grover_iteration
>>> numqubits = 2
>>> basis_states = superposition_basis(numqubits)
>>> f = lambda qubits: qubits == IntQubit(2)
>>> v = OracleGate(numqubits, f)
>>> qapply(grover_iteration(basis_states, v))
|2>
"""
wgate = WGate(oracle.nqubits)
return wgate*oracle*qstate
def apply_grover(oracle, nqubits, iterations=None):
"""Applies grover's algorithm.
Parameters
==========
oracle : callable
The unknown callable function that returns true when applied to the
desired qubits and false otherwise.
Returns
=======
state : Expr
The resulting state after Grover's algorithm has been iterated.
Examples
========
Apply grover's algorithm to an even superposition of 2 qubits::
>>> from sympy.physics.quantum.qapply import qapply
>>> from sympy.physics.quantum.qubit import IntQubit
>>> from sympy.physics.quantum.grover import apply_grover
>>> f = lambda qubits: qubits == IntQubit(2)
>>> qapply(apply_grover(f, 2))
|2>
"""
if nqubits <= 0:
raise QuantumError(
'Grover\'s algorithm needs nqubits > 0, received %r qubits'
% nqubits
)
if iterations is None:
iterations = floor(sqrt(2**nqubits)*(pi/4))
v = OracleGate(nqubits, oracle)
iterated = superposition_basis(nqubits)
for iter in range(iterations):
iterated = grover_iteration(iterated, v)
iterated = qapply(iterated)
return iterated
| mit | -2,803,697,750,255,861,000 | -4,717,787,235,565,894,000 | 30.599315 | 79 | 0.555977 | false |
chutsu/robotics | prototype/models/two_wheel.py | 1 | 3500 | from math import cos
from math import sin
import numpy as np
import sympy
from sympy import pprint
def two_wheel_2d_model(x, u, dt):
"""Two wheel 2D motion model
Parameters
----------
x : np.array
Two Wheel model state vector (x, y, theta)
u : np.array
Input
dt : float
Time difference
Returns
-------
np.array (x, y, theta)
"""
gdot = np.array([[u[0, 0] * cos(x[2, 0]) * dt],
[u[0, 0] * sin(x[2, 0]) * dt],
[u[1, 0] * dt]])
return x + gdot
def two_wheel_2d_linearized_model(x, u, dt):
"""Two wheel 2D linearized motion model
Parameters
----------
x : np.array
Two Wheel model state vector (x, y, theta)
u : np.array
Input
dt : float
Time difference
Returns
-------
np.array 3x3 matrix of linearized two wheel model
"""
G1 = 1.0
G2 = 0.0
G3 = -u[0, 0] * sin(x[2, 0]) * dt
G4 = 0.0
G5 = 1.0
G6 = u[0, 0] * cos(x[2, 0]) * dt
G7 = 0.0
G8 = 0.0
G9 = 1.0
return np.array([[G1, G2, G3],
[G4, G5, G6],
[G7, G8, G9]])
def two_wheel_3d_model(x, u, dt):
"""Two wheel 3D motion model
Parameters
----------
x : np.array
Two Wheel model state vector (x, y, theta)
u : np.array
Input
dt : float
Time difference
Returns
-------
np.array (x, y, z, theta)
"""
g1 = x[0] + u[0] * cos(x[3]) * dt
g2 = x[1] + u[0] * sin(x[3]) * dt
g3 = x[2] + u[1] * dt
g4 = x[3] + u[2] * dt
return np.array([g1, g2, g3, g4])
def two_wheel_2d_deriv():
""" Symbolic derivation of Jacobian of the 2D two wheel motion model """
x1, x2, x3, x4, x5 = sympy.symbols("x1,x2,x3,x4,x5")
dt = sympy.symbols("dt")
# x, y, theta, v, omega
f1 = x1 + x4 * sympy.cos(x3) * dt
f2 = x2 + x4 * sympy.sin(x3) * dt
f3 = x3 + x5 * dt
f4 = x4
f5 = x5
F = sympy.Matrix([f1, f2, f3, f4, f5])
pprint(F.jacobian([x1, x2, x3, x4, x5]))
def two_wheel_3d_deriv():
""" Symbolic derivation of Jacobian of the 3D two wheel motion model """
x1, x2, x3, x4, x5, x6, x7 = sympy.symbols("x1,x2,x3,x4,x5,x6,x7")
dt = sympy.symbols("dt")
# x1 - x
# x2 - y
# x3 - z
# x4 - theta
# x5 - v
# x6 - omega
# x7 - vz
# x, y, z, theta, v, omega, vz
f1 = x1 + x5 * sympy.cos(x4) * dt
f2 = x2 + x5 * sympy.sin(x4) * dt
f3 = x3 + x7 * dt
f4 = x4 + x6 * dt
f5 = x5
f6 = x6
f7 = x7
F = sympy.Matrix([f1, f2, f3, f4, f5, f6, f7])
pprint(F.jacobian([x1, x2, x3, x4, x5, x6, x7]))
def two_wheel_3d_deriv2():
""" Symbolic derivation of Jacobian of the 3D two wheel motion model """
functions = sympy.symbols("f1,f2,f3,f4,f5,f6,f7,f8,f9")
variables = sympy.symbols("x1,x2,x3,x4,x5,x6,x7,x8,x9")
f1, f2, f3, f4, f5, f6, f7, f8, f9 = functions
x1, x2, x3, x4, x5, x6, x7, x8, x9 = variables
dt = sympy.symbols("dt")
# x1 - x
# x2 - y
# x3 - z
# x4 - theta
# x5 - v
# x6 - vz
# x7 - omega
# x8 - a
# x9 - az
f1 = x1 + x5 * sympy.cos(x4) * dt
f2 = x2 + x5 * sympy.sin(x4) * dt
f3 = x3 + x6 * dt
f4 = x4 + x7 * dt
f5 = x5 + x8 * dt
f6 = x6 + x9 * dt
f7 = x7
f8 = x8
f9 = x9
F = sympy.Matrix([f1, f2, f3, f4, f5, f6, f7, f8, f9])
pprint(F.jacobian([x1, x2, x3, x4, x5, x6, x7, x8, x9]))
| gpl-3.0 | 9,049,559,873,376,431,000 | 3,658,912,071,949,096,400 | 19.833333 | 76 | 0.483714 | false |
TheTypoMaster/chromium-crosswalk | third_party/mojo/src/mojo/public/tools/bindings/pylib/mojom/generate/pack.py | 22 | 8235 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import module as mojom
# This module provides a mechanism for determining the packed order and offsets
# of a mojom.Struct.
#
# ps = pack.PackedStruct(struct)
# ps.packed_fields will access a list of PackedField objects, each of which
# will have an offset, a size and a bit (for mojom.BOOLs).
# Size of struct header in bytes: num_bytes [4B] + version [4B].
HEADER_SIZE = 8
class PackedField(object):
kind_to_size = {
mojom.BOOL: 1,
mojom.INT8: 1,
mojom.UINT8: 1,
mojom.INT16: 2,
mojom.UINT16: 2,
mojom.INT32: 4,
mojom.UINT32: 4,
mojom.FLOAT: 4,
mojom.HANDLE: 4,
mojom.MSGPIPE: 4,
mojom.SHAREDBUFFER: 4,
mojom.DCPIPE: 4,
mojom.DPPIPE: 4,
mojom.NULLABLE_HANDLE: 4,
mojom.NULLABLE_MSGPIPE: 4,
mojom.NULLABLE_SHAREDBUFFER: 4,
mojom.NULLABLE_DCPIPE: 4,
mojom.NULLABLE_DPPIPE: 4,
mojom.INT64: 8,
mojom.UINT64: 8,
mojom.DOUBLE: 8,
mojom.STRING: 8,
mojom.NULLABLE_STRING: 8
}
@classmethod
def GetSizeForKind(cls, kind):
if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct,
mojom.Interface)):
return 8
if isinstance(kind, mojom.Union):
return 16
if isinstance(kind, mojom.InterfaceRequest):
kind = mojom.MSGPIPE
if isinstance(kind, mojom.Enum):
# TODO(mpcomplete): what about big enums?
return cls.kind_to_size[mojom.INT32]
if not kind in cls.kind_to_size:
raise Exception("Invalid kind: %s" % kind.spec)
return cls.kind_to_size[kind]
@classmethod
def GetAlignmentForKind(cls, kind):
if isinstance(kind, mojom.Interface):
return 4
if isinstance(kind, mojom.Union):
return 8
return cls.GetSizeForKind(kind)
def __init__(self, field, index, ordinal):
"""
Args:
field: the original field.
index: the position of the original field in the struct.
ordinal: the ordinal of the field for serialization.
"""
self.field = field
self.index = index
self.ordinal = ordinal
self.size = self.GetSizeForKind(field.kind)
self.alignment = self.GetAlignmentForKind(field.kind)
self.offset = None
self.bit = None
self.min_version = None
def GetPad(offset, alignment):
"""Returns the pad necessary to reserve space so that |offset + pad| equals to
some multiple of |alignment|."""
return (alignment - (offset % alignment)) % alignment
def GetFieldOffset(field, last_field):
"""Returns a 2-tuple of the field offset and bit (for BOOLs)."""
if (field.field.kind == mojom.BOOL and
last_field.field.kind == mojom.BOOL and
last_field.bit < 7):
return (last_field.offset, last_field.bit + 1)
offset = last_field.offset + last_field.size
pad = GetPad(offset, field.alignment)
return (offset + pad, 0)
def GetPayloadSizeUpToField(field):
"""Returns the payload size (not including struct header) if |field| is the
last field.
"""
if not field:
return 0
offset = field.offset + field.size
pad = GetPad(offset, 8)
return offset + pad
class PackedStruct(object):
def __init__(self, struct):
self.struct = struct
# |packed_fields| contains all the fields, in increasing offset order.
self.packed_fields = []
# |packed_fields_in_ordinal_order| refers to the same fields as
# |packed_fields|, but in ordinal order.
self.packed_fields_in_ordinal_order = []
# No fields.
if (len(struct.fields) == 0):
return
# Start by sorting by ordinal.
src_fields = self.packed_fields_in_ordinal_order
ordinal = 0
for index, field in enumerate(struct.fields):
if field.ordinal is not None:
ordinal = field.ordinal
src_fields.append(PackedField(field, index, ordinal))
ordinal += 1
src_fields.sort(key=lambda field: field.ordinal)
# Set |min_version| for each field.
next_min_version = 0
for packed_field in src_fields:
if packed_field.field.min_version is None:
assert next_min_version == 0
else:
assert packed_field.field.min_version >= next_min_version
next_min_version = packed_field.field.min_version
packed_field.min_version = next_min_version
if (packed_field.min_version != 0 and
mojom.IsReferenceKind(packed_field.field.kind) and
not packed_field.field.kind.is_nullable):
raise Exception("Non-nullable fields are only allowed in version 0 of "
"a struct. %s.%s is defined with [MinVersion=%d]."
% (self.struct.name, packed_field.field.name,
packed_field.min_version))
src_field = src_fields[0]
src_field.offset = 0
src_field.bit = 0
dst_fields = self.packed_fields
dst_fields.append(src_field)
# Then find first slot that each field will fit.
for src_field in src_fields[1:]:
last_field = dst_fields[0]
for i in xrange(1, len(dst_fields)):
next_field = dst_fields[i]
offset, bit = GetFieldOffset(src_field, last_field)
if offset + src_field.size <= next_field.offset:
# Found hole.
src_field.offset = offset
src_field.bit = bit
dst_fields.insert(i, src_field)
break
last_field = next_field
if src_field.offset is None:
# Add to end
src_field.offset, src_field.bit = GetFieldOffset(src_field, last_field)
dst_fields.append(src_field)
class ByteInfo(object):
def __init__(self):
self.is_padding = False
self.packed_fields = []
def GetByteLayout(packed_struct):
total_payload_size = GetPayloadSizeUpToField(
packed_struct.packed_fields[-1] if packed_struct.packed_fields else None)
bytes = [ByteInfo() for i in xrange(total_payload_size)]
limit_of_previous_field = 0
for packed_field in packed_struct.packed_fields:
for i in xrange(limit_of_previous_field, packed_field.offset):
bytes[i].is_padding = True
bytes[packed_field.offset].packed_fields.append(packed_field)
limit_of_previous_field = packed_field.offset + packed_field.size
for i in xrange(limit_of_previous_field, len(bytes)):
bytes[i].is_padding = True
for byte in bytes:
# A given byte cannot both be padding and have a fields packed into it.
assert not (byte.is_padding and byte.packed_fields)
return bytes
class VersionInfo(object):
def __init__(self, version, num_fields, num_bytes):
self.version = version
self.num_fields = num_fields
self.num_bytes = num_bytes
def GetVersionInfo(packed_struct):
"""Get version information for a struct.
Args:
packed_struct: A PackedStruct instance.
Returns:
A non-empty list of VersionInfo instances, sorted by version in increasing
order.
Note: The version numbers may not be consecutive.
"""
versions = []
last_version = 0
last_num_fields = 0
last_payload_size = 0
for packed_field in packed_struct.packed_fields_in_ordinal_order:
if packed_field.min_version != last_version:
versions.append(
VersionInfo(last_version, last_num_fields,
last_payload_size + HEADER_SIZE))
last_version = packed_field.min_version
last_num_fields += 1
# The fields are iterated in ordinal order here. However, the size of a
# version is determined by the last field of that version in pack order,
# instead of ordinal order. Therefore, we need to calculate the max value.
last_payload_size = max(GetPayloadSizeUpToField(packed_field),
last_payload_size)
assert len(versions) == 0 or last_num_fields != versions[-1].num_fields
versions.append(VersionInfo(last_version, last_num_fields,
last_payload_size + HEADER_SIZE))
return versions
| bsd-3-clause | 7,585,448,608,968,433,000 | 8,652,845,180,700,900,000 | 32.205645 | 80 | 0.63558 | false |
citrix-openstack-build/nova | nova/openstack/common/timeutils.py | 24 | 5623 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(e.message)
except TypeError as e:
raise ValueError(e.message)
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
"""
utcnow.override_time = override_time
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
| apache-2.0 | 3,991,456,670,824,278,000 | -2,362,664,806,398,056,400 | 28.909574 | 78 | 0.654277 | false |
sysbot/CouchPotatoServer | couchpotato/core/notifications/plex/main.py | 86 | 2356 | from couchpotato.core.event import addEvent, fireEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
from .client import PlexClientHTTP, PlexClientJSON
from .server import PlexServer
log = CPLog(__name__)
class Plex(Notification):
http_time_between_calls = 0
def __init__(self):
super(Plex, self).__init__()
self.server = PlexServer(self)
self.client_protocols = {
'http': PlexClientHTTP(self),
'json': PlexClientJSON(self)
}
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = None):
if self.isDisabled(): return
if not group: group = {}
return self.server.refresh()
def getClientNames(self):
return [
x.strip().lower()
for x in self.conf('clients').split(',')
]
def notifyClients(self, message, client_names):
success = True
for client_name in client_names:
client_success = False
client = self.server.clients.get(client_name)
if client and client['found']:
client_success = fireEvent('notify.plex.notifyClient', client, message, single = True)
if not client_success:
if self.server.staleClients() or not client:
log.info('Failed to send notification to client "%s". '
'Client list is stale, updating the client list and retrying.', client_name)
self.server.updateClients(self.getClientNames())
else:
log.warning('Failed to send notification to client %s, skipping this time', client_name)
success = False
return success
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
return self.notifyClients(message, self.getClientNames())
def test(self, **kwargs):
test_type = self.testNotifyName()
log.info('Sending test to %s', test_type)
notify_success = self.notify(
message = self.test_message,
data = {},
listener = 'test'
)
refresh_success = self.addToLibrary()
return {'success': notify_success or refresh_success}
| gpl-3.0 | 4,948,388,895,804,047,000 | -5,671,661,544,013,976,000 | 29.205128 | 108 | 0.589983 | false |
aromanovich/kozmic-ci | kozmic/accounts/views.py | 3 | 1103 | from flask import current_app, request, render_template, redirect, url_for, flash
from flask.ext.login import current_user
from kozmic import db
from . import bp
from .forms import SettingsForm
@bp.route('/settings/', methods=('GET', 'POST'))
def settings():
form = SettingsForm(request.form, obj=current_user)
if form.validate_on_submit():
form.populate_obj(current_user)
db.session.add(current_user)
db.session.commit()
flash('Your settings have been saved.', category='success')
return redirect(url_for('.settings'))
return render_template('accounts/settings.html', form=form)
@bp.route('/memberships/sync/', methods=('POST',))
def sync_memberships():
ok_to_commit = current_user.sync_memberships_with_github()
if ok_to_commit:
db.session.commit()
else:
db.session.rollback()
flash('Something went wrong (probably there was a problem '
'communicating with the GitHub API). Please try again later.',
'warning')
return redirect(request.referrer or url_for('projects.index'))
| bsd-3-clause | 3,923,939,077,316,989,400 | -4,659,042,596,086,422,000 | 32.424242 | 81 | 0.671804 | false |
redhatrises/freeipa | ipalib/constants.py | 2 | 12530 | # Authors:
# Martin Nagy <[email protected]>
# Jason Gerard DeRose <[email protected]>
#
# Copyright (C) 2008 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
All constants centralised in one file.
"""
import os
import socket
from ipapython.dn import DN
from ipapython.version import VERSION, API_VERSION
try:
FQDN = socket.getfqdn()
except Exception:
try:
FQDN = socket.gethostname()
except Exception:
FQDN = None
# regular expression NameSpace member names must match:
NAME_REGEX = r'^[a-z][_a-z0-9]*[a-z0-9]$|^[a-z]$'
# Format for ValueError raised when name does not match above regex:
NAME_ERROR = "name must match '%s'; got '%s'"
# Standard format for TypeError message:
TYPE_ERROR = '%s: need a %r; got %r (a %r)'
# Stardard format for TypeError message when a callable is expected:
CALLABLE_ERROR = '%s: need a callable; got %r (which is a %r)'
# Standard format for Exception message when overriding an attribute:
OVERRIDE_ERROR = 'cannot override %s.%s value %r with %r'
# Standard format for AttributeError message when a read-only attribute is
# already locked:
SET_ERROR = 'locked: cannot set %s.%s to %r'
DEL_ERROR = 'locked: cannot delete %s.%s'
# Used for a tab (or indentation level) when formatting for CLI:
CLI_TAB = ' ' # Two spaces
# The section to read in the config files, i.e. [global]
CONFIG_SECTION = 'global'
# The default configuration for api.env
# This is a tuple instead of a dict so that it is immutable.
# To create a dict with this config, just "d = dict(DEFAULT_CONFIG)".
DEFAULT_CONFIG = (
('api_version', API_VERSION),
('version', VERSION),
# Domain, realm, basedn:
('domain', 'example.com'),
('realm', 'EXAMPLE.COM'),
('basedn', DN(('dc', 'example'), ('dc', 'com'))),
# LDAP containers:
('container_accounts', DN(('cn', 'accounts'))),
('container_user', DN(('cn', 'users'), ('cn', 'accounts'))),
('container_deleteuser', DN(('cn', 'deleted users'), ('cn', 'accounts'), ('cn', 'provisioning'))),
('container_stageuser', DN(('cn', 'staged users'), ('cn', 'accounts'), ('cn', 'provisioning'))),
('container_group', DN(('cn', 'groups'), ('cn', 'accounts'))),
('container_service', DN(('cn', 'services'), ('cn', 'accounts'))),
('container_host', DN(('cn', 'computers'), ('cn', 'accounts'))),
('container_hostgroup', DN(('cn', 'hostgroups'), ('cn', 'accounts'))),
('container_rolegroup', DN(('cn', 'roles'), ('cn', 'accounts'))),
('container_permission', DN(('cn', 'permissions'), ('cn', 'pbac'))),
('container_privilege', DN(('cn', 'privileges'), ('cn', 'pbac'))),
('container_automount', DN(('cn', 'automount'))),
('container_policies', DN(('cn', 'policies'))),
('container_configs', DN(('cn', 'configs'), ('cn', 'policies'))),
('container_roles', DN(('cn', 'roles'), ('cn', 'policies'))),
('container_applications', DN(('cn', 'applications'), ('cn', 'configs'), ('cn', 'policies'))),
('container_policygroups', DN(('cn', 'policygroups'), ('cn', 'configs'), ('cn', 'policies'))),
('container_policylinks', DN(('cn', 'policylinks'), ('cn', 'configs'), ('cn', 'policies'))),
('container_netgroup', DN(('cn', 'ng'), ('cn', 'alt'))),
('container_hbac', DN(('cn', 'hbac'))),
('container_hbacservice', DN(('cn', 'hbacservices'), ('cn', 'hbac'))),
('container_hbacservicegroup', DN(('cn', 'hbacservicegroups'), ('cn', 'hbac'))),
('container_dns', DN(('cn', 'dns'))),
('container_vault', DN(('cn', 'vaults'), ('cn', 'kra'))),
('container_virtual', DN(('cn', 'virtual operations'), ('cn', 'etc'))),
('container_sudorule', DN(('cn', 'sudorules'), ('cn', 'sudo'))),
('container_sudocmd', DN(('cn', 'sudocmds'), ('cn', 'sudo'))),
('container_sudocmdgroup', DN(('cn', 'sudocmdgroups'), ('cn', 'sudo'))),
('container_automember', DN(('cn', 'automember'), ('cn', 'etc'))),
('container_selinux', DN(('cn', 'usermap'), ('cn', 'selinux'))),
('container_s4u2proxy', DN(('cn', 's4u2proxy'), ('cn', 'etc'))),
('container_cifsdomains', DN(('cn', 'ad'), ('cn', 'etc'))),
('container_trusts', DN(('cn', 'trusts'))),
('container_adtrusts', DN(('cn', 'ad'), ('cn', 'trusts'))),
('container_ranges', DN(('cn', 'ranges'), ('cn', 'etc'))),
('container_dna', DN(('cn', 'dna'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_dna_posix_ids', DN(('cn', 'posix-ids'), ('cn', 'dna'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_realm_domains', DN(('cn', 'Realm Domains'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_otp', DN(('cn', 'otp'))),
('container_radiusproxy', DN(('cn', 'radiusproxy'))),
('container_views', DN(('cn', 'views'), ('cn', 'accounts'))),
('container_masters', DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_certprofile', DN(('cn', 'certprofiles'), ('cn', 'ca'))),
('container_topology', DN(('cn', 'topology'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_caacl', DN(('cn', 'caacls'), ('cn', 'ca'))),
('container_locations', DN(('cn', 'locations'), ('cn', 'etc'))),
('container_ca', DN(('cn', 'cas'), ('cn', 'ca'))),
('container_dnsservers', DN(('cn', 'servers'), ('cn', 'dns'))),
('container_custodia', DN(('cn', 'custodia'), ('cn', 'ipa'), ('cn', 'etc'))),
('container_sysaccounts', DN(('cn', 'sysaccounts'), ('cn', 'etc'))),
('container_certmap', DN(('cn', 'certmap'))),
('container_certmaprules', DN(('cn', 'certmaprules'), ('cn', 'certmap'))),
# Ports, hosts, and URIs:
('xmlrpc_uri', 'http://localhost:8888/ipa/xml'),
# jsonrpc_uri is set in Env._finalize_core()
('ldap_uri', 'ldap://localhost:389'),
('rpc_protocol', 'jsonrpc'),
# Define an inclusive range of SSL/TLS version support
('tls_version_min', 'tls1.0'),
('tls_version_max', 'tls1.2'),
# Time to wait for a service to start, in seconds
('startup_timeout', 300),
# Web Application mount points
('mount_ipa', '/ipa/'),
# WebUI stuff:
('webui_prod', True),
# Session stuff:
# Maximum time before a session expires forcing credentials to be reacquired.
('session_auth_duration', '20 minutes'),
# How a session expiration is computed, see SessionManager.set_session_expiration_time()
('session_duration_type', 'inactivity_timeout'),
# Debugging:
('verbose', 0),
('debug', False),
('startup_traceback', False),
('mode', 'production'),
('wait_for_dns', 0),
# CA plugin:
('ca_host', FQDN), # Set in Env._finalize_core()
('ca_port', 80),
('ca_agent_port', 443),
('ca_ee_port', 443),
# For the following ports, None means a default specific to the installed
# Dogtag version.
('ca_install_port', None),
('ca_agent_install_port', None),
('ca_ee_install_port', None),
# Topology plugin
('recommended_max_agmts', 4), # Recommended maximum number of replication
# agreements
# Special CLI:
('prompt_all', False),
('interactive', True),
('fallback', True),
('delegate', False),
# Enable certain optional plugins:
('enable_ra', False),
('ra_plugin', 'selfsign'),
('dogtag_version', 9),
# Used when verifying that the API hasn't changed. Not for production.
('validate_api', False),
# Skip client vs. server API version checking. Can lead to errors/strange
# behavior when newer clients talk to older servers. Use with caution.
('skip_version_check', False),
# Ignore TTL. Perform schema call and download schema if not in cache.
('force_schema_check', False),
# ********************************************************
# The remaining keys are never set from the values here!
# ********************************************************
#
# Env._bootstrap() or Env._finalize_core() will have filled in all the keys
# below by the time DEFAULT_CONFIG is merged in, so the values below are
# never actually used. They are listed both to provide a big picture and
# also so DEFAULT_CONFIG contains at least all the keys that should be
# present after Env._finalize_core() is called.
#
# Each environment variable below is sent to ``object``, which just happens
# to be an invalid value for an environment variable, so if for some reason
# any of these keys were set from the values here, an exception will be
# raised.
# Non-overridable vars set in Env._bootstrap():
('host', FQDN),
('ipalib', object), # The directory containing ipalib/__init__.py
('site_packages', object), # The directory contaning ipalib
('script', object), # sys.argv[0]
('bin', object), # The directory containing the script
('home', object), # $HOME
# Vars set in Env._bootstrap():
('in_tree', object), # Whether or not running in-tree (bool)
('dot_ipa', object), # ~/.ipa directory
('context', object), # Name of context, default is 'default'
('confdir', object), # Directory containing config files
('env_confdir', None), # conf dir specified by IPA_CONFDIR env variable
('conf', object), # File containing context specific config
('conf_default', object), # File containing context independent config
('plugins_on_demand', object), # Whether to finalize plugins on-demand (bool)
('nss_dir', object), # Path to nssdb, default {confdir}/nssdb
('tls_ca_cert', object), # Path to CA cert file
# Set in Env._finalize_core():
('in_server', object), # Whether or not running in-server (bool)
('logdir', object), # Directory containing log files
('log', object), # Path to context specific log file
('jsonrpc_uri', object), # derived from xmlrpc_uri in Env._finalize_core()
('server', object), # derived from jsonrpc_uri in Env._finalize_core()
)
LDAP_GENERALIZED_TIME_FORMAT = "%Y%m%d%H%M%SZ"
IPA_ANCHOR_PREFIX = ':IPA:'
SID_ANCHOR_PREFIX = ':SID:'
# domains levels
DOMAIN_LEVEL_0 = 0 # compat
DOMAIN_LEVEL_1 = 1 # replica promotion, topology plugin
MIN_DOMAIN_LEVEL = DOMAIN_LEVEL_0
MAX_DOMAIN_LEVEL = DOMAIN_LEVEL_1
# Constants used in generation of replication agreements and as topology
# defaults
# List of attributes that need to be excluded from replication initialization.
REPL_AGMT_TOTAL_EXCLUDES = ('entryusn',
'krblastsuccessfulauth',
'krblastfailedauth',
'krbloginfailedcount')
# List of attributes that need to be excluded from normal replication.
REPL_AGMT_EXCLUDES = ('memberof', 'idnssoaserial') + REPL_AGMT_TOTAL_EXCLUDES
# List of attributes that are not updated on empty replication
REPL_AGMT_STRIP_ATTRS = ('modifiersName',
'modifyTimestamp',
'internalModifiersName',
'internalModifyTimestamp')
DOMAIN_SUFFIX_NAME = 'domain'
CA_SUFFIX_NAME = 'ca'
PKI_GSSAPI_SERVICE_NAME = 'dogtag'
IPA_CA_CN = u'ipa'
IPA_CA_RECORD = "ipa-ca"
IPA_CA_NICKNAME = 'caSigningCert cert-pki-ca'
RENEWAL_CA_NAME = 'dogtag-ipa-ca-renew-agent'
# regexp definitions
PATTERN_GROUPUSER_NAME = '^[a-zA-Z0-9_.][a-zA-Z0-9_.-]*[a-zA-Z0-9_.$-]?$'
# Kerberos Anonymous principal name
ANON_USER = 'WELLKNOWN/ANONYMOUS'
# IPA API Framework user
IPAAPI_USER = 'ipaapi'
IPAAPI_GROUP = 'ipaapi'
# TLS related constants
TLS_VERSIONS = [
"ssl2",
"ssl3",
"tls1.0",
"tls1.1",
"tls1.2"
]
TLS_VERSION_MINIMAL = "tls1.0"
# high ciphers without RC4, MD5, TripleDES, pre-shared key
# and secure remote password
TLS_HIGH_CIPHERS = "HIGH:!aNULL:!eNULL:!MD5:!RC4:!3DES:!PSK:!SRP"
# Use cache path
USER_CACHE_PATH = (
os.environ.get('XDG_CACHE_HOME') or
os.path.join(
os.environ.get(
'HOME',
os.path.expanduser('~')
),
'.cache'
)
)
SOFTHSM_DNSSEC_TOKEN_LABEL = u'ipaDNSSEC'
| gpl-3.0 | 8,077,007,687,917,618,000 | 2,899,160,869,778,003,000 | 38.526814 | 102 | 0.608859 | false |
XiangyiKong/flask-snippets | appstructure/zc.buildout/__init__.py | 2 | 1215 | # -*- coding: utf-8 -*-
"""
appstructure.zc.buildout
~~~~~~~~~~~~~~~~~~~~~~~~
Deploy using zc.buildout and PythonPaste
http://flask.pocoo.org/snippets/27/
"""
"""
Deploy the application
First, you could save the buildout directory using your favorite DVCS, or create a tarball for future deployments.
Then bootstrap the buildout:
~/buildout_env $ python bootstrap.py --distribute
Adjust your settings in buildout.cfg, and build the application:
~/buildout_env $ bin/buildout
Run the tests:
~/buildout_env $ bin/test
Test rendered page. ... ok
------------------------------------------------------------
Ran 1 test in 0.055s
OK
~/buildout_env $
Now launch the server:
~/buildout_env $ bin/flask-ctl debug fg
bin/paster serve parts/etc/debug.ini --reload
Starting subprocess with file monitor
Starting server in PID 24862.
serving on http://127.0.0.1:5000
Visit http://127.0.0.1:5000 with your browser.
Visit http://127.0.0.1:5000/?broken to bring the Werkzeug Debugger. Quit the application with Ctrl+C.
Note: when you change the configuration in buildout.cfg, you need to rebuild the application using bin/buildout.
Further reading:
http://www.buildout.org
http://pythonpaste.org
"""
| bsd-3-clause | 5,670,098,645,554,295,000 | 3,052,073,815,341,115,000 | 24.851064 | 114 | 0.688889 | false |
gurneyalex/stock-logistics-workflow | product_serial/__openerp__.py | 17 | 2709 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2008 Raphaël Valyi
# Copyright (C) 2013 Akretion (http://www.akretion.com/)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Product Serial",
"summary": "Enhance Serial Number management",
"version": "1.0",
"author": "Akretion, NaN·tic,Odoo Community Association (OCA)",
"website": "http://www.akretion.com",
"depends": ["stock"],
"category": "Generic Modules/Inventory Control",
"license": "AGPL-3",
"description": """Enhance the management of Production Lots (Serial Numbers) in OpenERP.
Here are the additional features proposed by this module:
1. Add a new selection field 'Lot split type' on the product form
under the 'Inventory' tab to specify how the Production Lots should be
split on the Pickings (you should also enable 'Track Incoming/Outgoing
Lots', and the new 'Track internal lots' field).
2. If the option 'Active auto split' is active for the Company,
OpenERP will automagically split up picking list movements into one
movement per product instance or logistical unit packing quantity (in
that case, only the first logistical unit is taken into account at the
present time. Improvement to take them all to be done!).
3. Turn Incoming Pickings into an editable grid where you can
directly type the codes of a new production lot and/or tracking number
to create and associate to the move (it also checks it doesn't exist
yet).
4. If the option 'Group invoice lines' is active for the Company,
OpenERP will group the invoice lines to make it look like the
Sale/Purchase Order when generating an Invoice from a Picking.
""",
"demo": ["product_demo.xml"],
"data": [
"product_view.xml",
"company_view.xml",
"stock_view.xml",
"wizard/prodlot_wizard_view.xml",
],
"active": False,
'installable': False
}
| agpl-3.0 | 1,606,430,923,879,967,500 | 7,901,082,954,326,018,000 | 41.296875 | 92 | 0.661987 | false |
charbeljc/account-financial-tools | __unported__/account_cancel_invoice_check_payment_order/account_invoice.py | 44 | 2589 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import osv, orm
class account_invoice(orm.Model):
_inherit = "account.invoice"
def action_cancel(self, cr, uid, ids, *args):
invoices = self.read(cr, uid, ids, ['move_id', 'payment_ids'])
for invoice in invoices:
if invoice['move_id']:
# This invoice have a move line, we search move_line
# concerned by this move
cr.execute("""SELECT po.reference as payment_name,
po.date_done as payment_date,
pl.name
FROM payment_line as pl
INNER JOIN payment_order AS po
ON pl.id = order_id
WHERE move_line_id IN (SELECT id
FROM account_move_line
WHERE move_id = %s)
LIMIT 1""",
(invoice['move_id'][0],))
payment_orders = cr.dictfetchone()
if payment_orders:
raise osv.except_osv(
_('Error !'),
_("Invoice already imported in the payment "
"order (%s) at %s on line %s" %
(payment_orders['payment_name'],
payment_orders['payment_date'],
payment_orders['name']))
)
return super(account_invoice, self).action_cancel(cr, uid, ids, *args)
| agpl-3.0 | -4,200,161,364,360,663,000 | 6,411,491,379,791,761,000 | 46.072727 | 78 | 0.488219 | false |
lingthio/Flask-User | flask_user/user_mixin.py | 1 | 4450 | """This module implements the UserMixin class for Flask-User.
This Mixin adds required methods to User data-model.
"""
from flask import current_app
from flask_login import UserMixin as FlaskLoginUserMixin
class UserMixin(FlaskLoginUserMixin):
""" This class adds required methods to the User data-model.
Example:
class User(db.Model, UserMixin):
...
"""
def get_id(self):
"""Converts a User ID and parts of a User password hash to a token."""
# This function is used by Flask-Login to store a User ID securely as a browser cookie.
# The last part of the password is included to invalidate tokens when password change.
# user_id and password_ends_with are encrypted, timestamped and signed.
# This function works in tandem with UserMixin.get_user_by_token()
user_manager = current_app.user_manager
user_id = self.id
password_ends_with = '' if user_manager.USER_ENABLE_AUTH0 else self.password[-8:]
user_token = user_manager.generate_token(
user_id, # User ID
password_ends_with, # Last 8 characters of user password
)
# print("UserMixin.get_id: ID:", self.id, "token:", user_token)
return user_token
@classmethod
def get_user_by_token(cls, token, expiration_in_seconds=None):
# This function works in tandem with UserMixin.get_id()
# Token signatures and timestamps are verified.
# user_id and password_ends_with are decrypted.
# Verifies a token and decrypts a User ID and parts of a User password hash
user_manager = current_app.user_manager
data_items = user_manager.verify_token(token, expiration_in_seconds)
# Verify password_ends_with
token_is_valid = False
if data_items:
# Load user by User ID
user_id = data_items[0]
password_ends_with = data_items[1]
user = user_manager.db_manager.get_user_by_id(user_id)
user_password = '' if user_manager.USER_ENABLE_AUTH0 else user.password[-8:]
# Make sure that last 8 characters of user password matches
token_is_valid = user and user_password==password_ends_with
return user if token_is_valid else None
def has_roles(self, *requirements):
""" Return True if the user has all of the specified roles. Return False otherwise.
has_roles() accepts a list of requirements:
has_role(requirement1, requirement2, requirement3).
Each requirement is either a role_name, or a tuple_of_role_names.
role_name example: 'manager'
tuple_of_role_names: ('funny', 'witty', 'hilarious')
A role_name-requirement is accepted when the user has this role.
A tuple_of_role_names-requirement is accepted when the user has ONE of these roles.
has_roles() returns true if ALL of the requirements have been accepted.
For example:
has_roles('a', ('b', 'c'), d)
Translates to:
User has role 'a' AND (role 'b' OR role 'c') AND role 'd'"""
# Translates a list of role objects to a list of role_names
user_manager = current_app.user_manager
role_names = user_manager.db_manager.get_user_roles(self)
# has_role() accepts a list of requirements
for requirement in requirements:
if isinstance(requirement, (list, tuple)):
# this is a tuple_of_role_names requirement
tuple_of_role_names = requirement
authorized = False
for role_name in tuple_of_role_names:
if role_name in role_names:
# tuple_of_role_names requirement was met: break out of loop
authorized = True
break
if not authorized:
return False # tuple_of_role_names requirement failed: return False
else:
# this is a role_name requirement
role_name = requirement
# the user must have this role
if not role_name in role_names:
return False # role_name requirement failed: return False
# All requirements have been met: return True
return True
| mit | -3,035,617,870,752,151,000 | 6,390,556,372,250,658,000 | 42.203883 | 106 | 0.602921 | false |
mathcamp/steward_web | steward_web/__init__.py | 1 | 1965 | """ Steward extension providing framework for web interface """
import re
import pyramid.renderers
from pyramid.request import Request
from pyramid.settings import asbool
def to_json(value):
""" A json filter for jinja2 """
return pyramid.renderers.render('json', value)
def do_index(request):
""" Render the index page """
return {}
def _add_steward_web_app(config, title, name):
""" Add a route to the list of steward web apps """
config.registry.steward_web_apps.append((title, name))
def _web_apps(request):
""" Get the list of steward web apps """
return tuple(request.registry.steward_web_apps)
def _route_names(request, pattern=r'.*'):
""" Get a list of route names that match the pattern """
pattern = re.compile('^' + pattern + '$')
introspector = request.registry.introspector
routes = introspector.get_category('routes')
names = []
for route in routes:
name = route['introspectable']['name']
if pattern.match(name):
names.append(name)
return names
def _route_map(request, pattern=r'.*'):
""" Get a dict of route names to route urls """
return {name: request.route_url(name) for name in
request.route_names(pattern)}
def includeme(config):
""" Configure the app """
settings = config.get_settings()
config.add_route('root', '/')
config.add_view('steward_web.do_index', route_name='root',
renderer='index.jinja2')
config.add_route('login', '/login')
config.add_route('logout', '/logout')
config.registry.steward_web_apps = []
config.add_directive('add_steward_web_app', _add_steward_web_app)
config.add_request_method(_web_apps, name='steward_web_apps', reify=True)
config.add_request_method(_route_names, name='route_names')
config.add_request_method(_route_map, name='route_map')
if asbool(settings.get('steward.web.basic_login', True)):
config.scan()
| mit | 2,200,076,226,215,330,800 | -6,857,478,729,087,201,000 | 29.230769 | 77 | 0.653944 | false |
eayunstack/fuel-web | nailgun/nailgun/extensions/cluster_upgrade/upgrade.py | 3 | 7844 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
from distutils import version
import six
from nailgun import consts
from nailgun.objects.serializers import network_configuration
from nailgun import utils
from .objects import adapters
def merge_attributes(a, b):
"""Merge values of editable attributes.
The values of the b attributes have precedence over the values
of the a attributes.
"""
attrs = copy.deepcopy(b)
for section, pairs in six.iteritems(attrs):
if section == "repo_setup" or section not in a:
continue
a_values = a[section]
for key, values in six.iteritems(pairs):
if key != "metadata" and key in a_values:
values["value"] = a_values[key]["value"]
return attrs
def merge_nets(a, b):
new_settings = copy.deepcopy(b)
source_networks = dict((n["name"], n) for n in a["networks"])
for net in new_settings["networks"]:
if net["name"] not in source_networks:
continue
source_net = source_networks[net["name"]]
for key, value in six.iteritems(net):
if (key not in ("cluster_id", "id", "meta", "group_id") and
key in source_net):
net[key] = source_net[key]
networking_params = new_settings["networking_parameters"]
source_params = a["networking_parameters"]
for key, value in six.iteritems(networking_params):
if key not in source_params:
continue
networking_params[key] = source_params[key]
return new_settings
class UpgradeHelper(object):
network_serializers = {
consts.CLUSTER_NET_PROVIDERS.neutron:
network_configuration.NeutronNetworkConfigurationSerializer,
consts.CLUSTER_NET_PROVIDERS.nova_network:
network_configuration.NovaNetworkConfigurationSerializer,
}
@classmethod
def clone_cluster(cls, orig_cluster, data):
from .objects import relations
new_cluster = cls.create_cluster_clone(orig_cluster, data)
cls.copy_attributes(orig_cluster, new_cluster)
cls.copy_network_config(orig_cluster, new_cluster)
relations.UpgradeRelationObject.create_relation(orig_cluster.id,
new_cluster.id)
return new_cluster
@classmethod
def create_cluster_clone(cls, orig_cluster, data):
create_data = orig_cluster.get_create_data()
create_data["name"] = data["name"]
create_data["release_id"] = data["release_id"]
new_cluster = adapters.NailgunClusterAdapter.create(create_data)
return new_cluster
@classmethod
def copy_attributes(cls, orig_cluster, new_cluster):
# TODO(akscram): Attributes should be copied including
# borderline cases when some parameters are
# renamed or moved into plugins. Also, we should
# to keep special steps in copying of parameters
# that know how to translate parameters from one
# version to another. A set of this kind of steps
# should define an upgrade path of a particular
# cluster.
new_cluster.generated_attrs = utils.dict_merge(
new_cluster.generated_attrs,
orig_cluster.generated_attrs)
new_cluster.editable_attrs = merge_attributes(
orig_cluster.editable_attrs,
new_cluster.editable_attrs)
@classmethod
def transform_vips_for_net_groups_70(cls, vips):
"""Rename or remove types of VIPs for 7.0 network groups.
This method renames types of VIPs from older releases (<7.0) to
be compatible with network groups of the 7.0 release according
to the rules:
management: haproxy -> management
public: haproxy -> public
public: vrouter -> vrouter_pub
Note, that in the result VIPs are present only those IPs that
correspond to the given rules.
"""
rename_vip_rules = {
"management": {
"haproxy": "management",
"vrouter": "vrouter",
},
"public": {
"haproxy": "public",
"vrouter": "vrouter_pub",
},
}
renamed_vips = collections.defaultdict(dict)
for ng_name, vips in six.iteritems(vips):
ng_vip_rules = rename_vip_rules[ng_name]
for vip_type, vip_addr in six.iteritems(vips):
if vip_type not in ng_vip_rules:
continue
new_vip_type = ng_vip_rules[vip_type]
renamed_vips[ng_name][new_vip_type] = vip_addr
return renamed_vips
@classmethod
def copy_network_config(cls, orig_cluster, new_cluster):
nets_serializer = cls.network_serializers[orig_cluster.net_provider]
nets = merge_nets(
nets_serializer.serialize_for_cluster(orig_cluster.cluster),
nets_serializer.serialize_for_cluster(new_cluster.cluster))
orig_net_manager = orig_cluster.get_network_manager()
new_net_manager = new_cluster.get_network_manager()
new_net_manager.update(nets)
vips = orig_net_manager.get_assigned_vips()
for ng_name in vips:
if ng_name not in (consts.NETWORKS.public,
consts.NETWORKS.management):
vips.pop(ng_name)
# NOTE(akscram): In the 7.0 release was introduced networking
# templates that use the vip_type column as
# unique names of VIPs.
if version.LooseVersion(orig_cluster.release.environment_version) < \
version.LooseVersion("7.0"):
vips = cls.transform_vips_for_net_groups_70(vips)
new_net_manager.assign_given_vips_for_net_groups(vips)
new_net_manager.assign_vips_for_net_groups()
@classmethod
def assign_node_to_cluster(cls, node, seed_cluster):
orig_cluster = adapters.NailgunClusterAdapter.get_by_uid(
node.cluster_id)
orig_manager = orig_cluster.get_network_manager()
seed_manager = seed_cluster.get_network_manager()
netgroups_id_mapping = cls.get_netgroups_id_mapping(
orig_cluster, seed_cluster)
node.update_cluster_assignment(seed_cluster)
seed_manager.set_node_netgroups_ids(node, netgroups_id_mapping)
orig_manager.set_nic_assignment_netgroups_ids(
node, netgroups_id_mapping)
orig_manager.set_bond_assignment_netgroups_ids(
node, netgroups_id_mapping)
node.add_pending_change(consts.CLUSTER_CHANGES.interfaces)
@classmethod
def get_netgroups_id_mapping(self, orig_cluster, seed_cluster):
orig_ng = orig_cluster.get_network_groups()
seed_ng = seed_cluster.get_network_groups()
seed_ng_dict = dict((ng.name, ng.id) for ng in seed_ng)
mapping = dict((ng.id, seed_ng_dict[ng.name]) for ng in orig_ng)
mapping[orig_cluster.get_admin_network_group().id] = \
seed_cluster.get_admin_network_group().id
return mapping
| apache-2.0 | 7,466,150,975,783,790,000 | -895,062,727,308,607,500 | 38.616162 | 78 | 0.620984 | false |
zace-yuan/viewfinder | backend/db/async_aws_sts.py | 13 | 4387 | #!/bin/env python
#
# Copyright 2012 bit.ly
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Created by Dan Frank on 2012-01-25.
Copyright (c) 2012 bit.ly. All rights reserved.
"""
import functools
from tornado.httpclient import HTTPRequest
from tornado.httpclient import AsyncHTTPClient
import xml.sax
import boto
from boto.sts.connection import STSConnection
from boto.sts.credentials import Credentials
from boto.exception import BotoServerError
class InvalidClientTokenIdError(BotoServerError):
"""Error subclass to indicate that the client's token(s) is/are invalid.
"""
pass
class AsyncAwsSts(STSConnection):
"""Class that manages session tokens. Users of AsyncDynamoDB should not
need to worry about what goes on here.
Usage: Keep an instance of this class (though it should be cheap to
re instantiate) and periodically call get_session_token to get a new
Credentials object when, say, your session token expires.
"""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None):
STSConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass, debug,
https_connection_factory, region, path, converter)
def get_session_token(self, callback):
"""Gets a new Credentials object with a session token, using this
instance's aws keys. Callback should operate on the new Credentials obj,
or else a boto.exception.BotoServerError.
"""
return self.get_object('GetSessionToken', {}, Credentials, verb='POST', callback=callback)
def get_object(self, action, params, cls, path="/", parent=None, verb="GET", callback=None):
"""Get an instance of `cls` using `action`."""
if not parent:
parent = self
self.make_request(action, params, path, verb,
functools.partial(self._finish_get_object, callback=callback, parent=parent, cls=cls))
def _finish_get_object(self, response_body, callback, cls=None, parent=None, error=None):
"""Process the body returned by STS. If an error is present,
convert from a tornado error to a boto error.
"""
if error:
if error.code == 403:
error_class = InvalidClientTokenIdError
else:
error_class = BotoServerError
return callback(None, error=error_class(error.code, error.message, response_body))
obj = cls(parent)
h = boto.handler.XmlHandler(obj, parent)
xml.sax.parseString(response_body, h)
return callback(obj)
def make_request(self, action, params={}, path='/', verb='GET', callback=None):
"""Make an async request. This handles the logic of translating
from boto params to a tornado request obj, issuing the request,
and passing back the body.
The callback should operate on the body of the response, and take
an optional error argument that will be a tornado error.
"""
request = HTTPRequest('https://%s' % self.host, method=verb)
request.params = params
request.auth_path = '/' # need this for auth
request.host = self.host # need this for auth
if action:
request.params['Action'] = action
if self.APIVersion:
request.params['Version'] = self.APIVersion
self._auth_handler.add_auth(request) # add signature
http_client = AsyncHTTPClient()
http_client.fetch(request, functools.partial(self._finish_make_request, callback=callback))
def _finish_make_request(self, response, callback):
if response.error:
return callback(response.body, error=response.error)
return callback(response.body)
| apache-2.0 | 4,768,954,982,996,872,000 | 6,398,320,590,044,654,000 | 40 | 108 | 0.693184 | false |
DmitryADP/diff_qc750 | external/webkit/Tools/CygwinDownloader/cygwin-downloader.py | 20 | 5471 | #!/usr/bin/env python
import os, random, sys, time, urllib
#
# Options
#
dry_run = len(sys.argv) > 1 and "--dry-run" in set(sys.argv[1:])
quiet = len(sys.argv) > 1 and "--quiet" in set(sys.argv[1:])
#
# Functions and constants
#
def download_progress_hook(block_count, block_size, total_blocks):
if quiet or random.random() > 0.5:
return
sys.stdout.write(".")
sys.stdout.flush()
def download_url_to_file(url, file, message):
if not quiet:
print message + " ",
if not dry_run:
dir = os.path.dirname(file)
if len(dir) and not os.path.exists(dir):
os.makedirs(dir)
urllib.urlretrieve(url, file, download_progress_hook)
if not quiet:
print
# This is mostly just the list of North America http mirrors from http://cygwin.com/mirrors.html,
# but a few have been removed that seemed unresponsive from Cupertino.
mirror_servers = ["http://cygwin.elite-systems.org/",
"http://mirror.mcs.anl.gov/cygwin/",
"http://cygwin.osuosl.org/",
"http://mirrors.kernel.org/sourceware/cygwin/",
"http://mirrors.xmission.com/cygwin/",
"http://sourceware.mirrors.tds.net/pub/sourceware.org/cygwin/"]
package_mirror_url = mirror_servers[random.choice(range(len(mirror_servers)))]
def download_package(package, message):
download_url_to_file(package_mirror_url + package["path"], package["path"], message)
required_packages = frozenset(["apache",
"bc",
"bison",
"curl",
"diffutils",
"e2fsprogs",
"emacs",
"flex",
"gcc",
"gperf",
"keychain",
"make",
"nano",
"openssh",
"patch",
"perl",
"perl-libwin32",
"python",
"rebase",
"rsync",
"ruby",
"subversion",
"unzip",
"vim",
"zip"])
#
# Main
#
print "Using Cygwin mirror server " + package_mirror_url + " to download setup.ini..."
urllib.urlretrieve(package_mirror_url + "setup.ini", "setup.ini.orig")
downloaded_packages_file_path = "setup.ini.orig"
downloaded_packages_file = file(downloaded_packages_file_path, "r")
if not dry_run:
modified_packages_file = file("setup.ini", "w")
packages = {}
current_package = ''
for line in downloaded_packages_file.readlines():
if line[0] == "@":
current_package = line[2:-1]
packages[current_package] = {"name": current_package, "needs_download": False, "requires": [], "path": ""}
elif line[:10] == "category: ":
if current_package in required_packages:
line = "category: Base\n"
if "Base" in set(line[10:-1].split()):
packages[current_package]["needs_download"] = True
elif line[:10] == "requires: ":
packages[current_package]["requires"] = line[10:].split()
packages[current_package]["requires"].sort()
elif line[:9] == "install: " and not len(packages[current_package]["path"]):
end_of_path = line.find(" ", 9)
if end_of_path != -1:
packages[current_package]["path"] = line[9:end_of_path]
if not dry_run:
modified_packages_file.write(line)
downloaded_packages_file.close()
os.remove(downloaded_packages_file_path)
if not dry_run:
modified_packages_file.close()
names_to_download = set()
package_names = packages.keys()
package_names.sort()
def add_package_and_dependencies(name):
if name in names_to_download:
return
if not name in packages:
return
packages[name]["needs_download"] = True
names_to_download.add(name)
for dep in packages[name]["requires"]:
add_package_and_dependencies(dep)
for name in package_names:
if packages[name]["needs_download"]:
add_package_and_dependencies(name)
downloaded_so_far = 0
for name in package_names:
if packages[name]["needs_download"]:
downloaded_so_far += 1
download_package(packages[name], "Downloading package %3d of %3d (%s)" % (downloaded_so_far, len(names_to_download), name))
download_url_to_file("http://cygwin.com/setup.exe", "setup.exe", "Downloading setup.exe")
seconds_to_sleep = 10
print """
Finished downloading Cygwin. In %d seconds,
I will run setup.exe. Select the "Install
from Local Directory" option and browse to
"%s"
when asked for the "Local Package Directory".
""" % (seconds_to_sleep, os.getcwd())
while seconds_to_sleep > 0:
print "%d..." % seconds_to_sleep,
sys.stdout.flush()
time.sleep(1)
seconds_to_sleep -= 1
print
if not dry_run:
os.execl("setup.exe")
| gpl-2.0 | 4,153,633,186,799,158,300 | 4,805,241,411,400,149,000 | 33.847134 | 139 | 0.519101 | false |
PolicyStat/django | tests/get_or_create/tests.py | 19 | 13380 | from __future__ import unicode_literals
from datetime import date
import traceback
import warnings
from django.db import IntegrityError, DatabaseError
from django.utils.encoding import DjangoUnicodeDecodeError
from django.test import TestCase, TransactionTestCase
from .models import (DefaultPerson, Person, ManualPrimaryKeyTest, Profile,
Tag, Thing, Publisher, Author, Book)
class GetOrCreateTests(TestCase):
def setUp(self):
self.lennon = Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John", last_name="Lennon", defaults={
"birthday": date(1940, 10, 9)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
self.assertRaises(
IntegrityError,
Person.objects.get_or_create, first_name="Tom", last_name="Smith"
)
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
class GetOrCreateTestsWithManualPKs(TestCase):
def setUp(self):
self.first_pk = ManualPrimaryKeyTest.objects.create(id=1, data="Original")
def test_create_with_duplicate_primary_key(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
self.assertRaises(
IntegrityError,
ManualPrimaryKeyTest.objects.get_or_create, id=1, data="Different"
)
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_get_or_create_raises_IntegrityError_plus_traceback(self):
"""
get_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn(str('obj.save'), formatted_traceback)
def test_savepoint_rollback(self):
"""
Regression test for #20463: the database connection should still be
usable after a DataError or ProgrammingError in .get_or_create().
"""
try:
# Hide warnings when broken data is saved with a warning (MySQL).
with warnings.catch_warnings():
warnings.simplefilter('ignore')
Person.objects.get_or_create(
birthday=date(1970, 1, 1),
defaults={'first_name': b"\xff", 'last_name': b"\xff"})
except (DatabaseError, DjangoUnicodeDecodeError):
Person.objects.create(
first_name="Bob", last_name="Ross", birthday=date(1950, 1, 1))
else:
self.skipTest("This backend accepts broken utf-8.")
def test_get_or_create_empty(self):
"""
Regression test for #16137: get_or_create does not require kwargs.
"""
try:
DefaultPerson.objects.get_or_create()
except AssertionError:
self.fail("If all the attributes on a model have defaults, we "
"shouldn't need to pass any arguments.")
class GetOrCreateTransactionTests(TransactionTestCase):
available_apps = ['get_or_create']
def test_get_or_create_integrityerror(self):
"""
Regression test for #15117. Requires a TransactionTestCase on
databases that delay integrity checks until the end of transactions,
otherwise the exception is never raised.
"""
try:
Profile.objects.get_or_create(person=Person(id=1))
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
class GetOrCreateThroughManyToMany(TestCase):
def test_get_get_or_create(self):
tag = Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
a_thing.tags.add(tag)
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertFalse(created)
self.assertEqual(obj.pk, tag.pk)
def test_create_get_or_create(self):
a_thing = Thing.objects.create(name='a')
obj, created = a_thing.tags.get_or_create(text='foo')
self.assertTrue(created)
self.assertEqual(obj.text, 'foo')
self.assertIn(obj, a_thing.tags.all())
def test_something(self):
Tag.objects.create(text='foo')
a_thing = Thing.objects.create(name='a')
self.assertRaises(IntegrityError, a_thing.tags.get_or_create, text='foo')
class UpdateOrCreateTests(TestCase):
def test_update(self):
Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertFalse(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create(self):
p, created = Person.objects.update_or_create(
first_name='John', last_name='Lennon', defaults={
'birthday': date(1940, 10, 10)
}
)
self.assertTrue(created)
self.assertEqual(p.first_name, 'John')
self.assertEqual(p.last_name, 'Lennon')
self.assertEqual(p.birthday, date(1940, 10, 10))
def test_create_twice(self):
params = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': date(1940, 10, 10),
}
Person.objects.update_or_create(**params)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.update_or_create(**params)
self.assertFalse(created)
def test_integrity(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
self.assertRaises(IntegrityError,
Person.objects.update_or_create, first_name="Tom", last_name="Smith")
def test_manual_primary_key_test(self):
"""
If you specify an existing primary key, but different other fields,
then you will get an error and data will not be updated.
"""
ManualPrimaryKeyTest.objects.create(id=1, data="Original")
self.assertRaises(
IntegrityError,
ManualPrimaryKeyTest.objects.update_or_create, id=1, data="Different"
)
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
def test_error_contains_full_traceback(self):
"""
update_or_create should raise IntegrityErrors with the full traceback.
This is tested by checking that a known method call is in the traceback.
We cannot use assertRaises/assertRaises here because we need to inspect
the actual traceback. Refs #16340.
"""
try:
ManualPrimaryKeyTest.objects.update_or_create(id=1, data="Different")
except IntegrityError:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
def test_create_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book, created = p.books.update_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
self.assertEqual(p.books.count(), 1)
def test_update_with_related_manager(self):
"""
Should be able to use update_or_create from the related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
self.assertEqual(p.books.count(), 1)
name = "The Book of Django"
book, created = p.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(p.books.count(), 1)
def test_create_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
create a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book, created = author.books.update_or_create(name="The Book of Ed & Fred", publisher=p)
self.assertTrue(created)
self.assertEqual(author.books.count(), 1)
def test_update_with_many(self):
"""
Should be able to use update_or_create from the m2m related manager to
update a book. Refs #23611.
"""
p = Publisher.objects.create(name="Acme Publishing")
author = Author.objects.create(name="Ted")
book = Book.objects.create(name="The Book of Ed & Fred", publisher=p)
book.authors.add(author)
self.assertEqual(author.books.count(), 1)
name = "The Book of Django"
book, created = author.books.update_or_create(defaults={'name': name}, id=book.id)
self.assertFalse(created)
self.assertEqual(book.name, name)
self.assertEqual(author.books.count(), 1)
| bsd-3-clause | 433,362,557,759,062,660 | 6,151,539,271,397,874,000 | 37.228571 | 96 | 0.617713 | false |
abrt/faf | src/pyfaf/storage/migrations/versions/168c63b81f85_report_history_default_value.py | 1 | 1945 | # Copyright (C) 2014 ABRT Team
# Copyright (C) 2014 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
"""
Report history default value
Revision ID: 168c63b81f85
Revises: 183a15e52a4f
Create Date: 2016-12-13 15:49:32.883743
"""
from alembic.op import alter_column, execute
# revision identifiers, used by Alembic.
revision = '168c63b81f85'
down_revision = '1c4d6317721a'
def upgrade() -> None:
alter_column('reporthistorydaily', 'unique', server_default="0")
alter_column('reporthistoryweekly', 'unique', server_default="0")
alter_column('reporthistorymonthly', 'unique', server_default="0")
execute('UPDATE reporthistorydaily SET "unique" = 0 WHERE "unique" IS NULL')
execute('UPDATE reporthistoryweekly SET "unique" = 0 WHERE "unique" IS NULL')
execute('UPDATE reporthistorymonthly SET "unique" = 0 WHERE "unique" IS NULL')
def downgrade() -> None:
alter_column('reporthistorydaily', 'unique', server_default=None)
alter_column('reporthistoryweekly', 'unique', server_default=None)
alter_column('reporthistorymonthly', 'unique', server_default=None)
execute('UPDATE reporthistorydaily SET "unique" = NULL WHERE "unique" = 0')
execute('UPDATE reporthistoryweekly SET "unique" = NULL WHERE "unique" = 0')
execute('UPDATE reporthistorymonthly SET "unique" = NULL WHERE "unique" = 0')
| gpl-3.0 | -5,224,294,259,105,135,000 | 3,743,178,673,140,643,300 | 37.137255 | 82 | 0.731105 | false |
lancezlin/ml_template_py | lib/python2.7/site-packages/pandas/tests/frame/test_missing.py | 7 | 24048 | # -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range)
import pandas as pd
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData, _check_mixed_float
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
self.assertTrue((frame['bar'] == 5).all())
inp_frame2.dropna(subset=['bar'], inplace=True)
self.assert_index_equal(samesize_frame.index, self.frame.index)
self.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.ix[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.ix[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.ix[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
self.assertRaises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
self.assertRaises(ValueError, self.frame.dropna, how='foo')
self.assertRaises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
self.assertRaises(KeyError, self.frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_fillna(self):
self.tsframe.ix[:5, 'A'] = nan
self.tsframe.ix[-5:, 'A'] = nan
zero_filled = self.tsframe.fillna(0)
self.assertTrue((zero_filled.ix[:5, 'A'] == 0).all())
padded = self.tsframe.fillna(method='pad')
self.assertTrue(np.isnan(padded.ix[:5, 'A']).all())
self.assertTrue((padded.ix[-5:, 'A'] == padded.ix[-5, 'A']).all())
# mixed type
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
self.assertRaises(ValueError, self.tsframe.fillna)
self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
mf.ix[-10:, 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=1)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])
result = df.fillna({2: 'foo'})
expected = DataFrame([['a', 'a', 'foo', 'a'],
['b', 'b', 'foo', 'b'],
['c', 'c', 'foo', 'c']])
assert_frame_equal(result, expected)
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(df.ix[0, 'Date2'])
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.get_dtype_counts().sort_values()
expected = Series({'object': 5})
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = result.get_dtype_counts().sort_values()
expected = Series({'int64': 5})
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3), columns=['A', 'B'], dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan', index=lrange(3), columns=['A', 'B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
for v in ['', 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=pd.date_range('20130110', periods=3))
self.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
self.assertIsNot(expected, df)
df.fillna(value=0, inplace=True)
assert_frame_equal(df, expected)
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
self.assertIsNot(expected, df)
df.fillna(method='ffill', inplace=True)
assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with assertRaisesRegexp(NotImplementedError, 'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar'] * 5},
index=list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with assertRaisesRegexp(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
self.assertRaises(TypeError, self.frame.fillna, [1, 2])
# tuple
self.assertRaises(TypeError, self.frame.fillna, (1, 2))
# frame with series
self.assertRaises(ValueError, self.frame.iloc[:, 0].fillna,
self.frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
self.assertEqual(df.columns.tolist(), filled.columns.tolist())
def test_fill_corner(self):
self.mixed_frame.ix[5:20, 'foo'] = nan
self.mixed_frame.ix[-10:, 'A'] = nan
filled = self.mixed_frame.fillna(value=0)
self.assertTrue((filled.ix[5:20, 'foo'] == 0).all())
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = DataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
assert_frame_equal(res, exp)
class TestDataFrameInterpolate(tm.TestCase, TestData):
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core']
exit=False)
| mit | -1,228,732,907,530,046,500 | -7,378,993,818,503,706,000 | 35.108108 | 79 | 0.515178 | false |
NAMD/justicecloud | justice/external/wtforms/ext/sqlalchemy/orm.py | 50 | 10766 | """
Tools for generating forms based on SQLAlchemy models.
"""
from __future__ import unicode_literals
import inspect
from wtforms import fields as f
from wtforms import validators
from wtforms.form import Form
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField
from wtforms.ext.sqlalchemy.validators import Unique
__all__ = (
'model_fields', 'model_form',
)
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
class ModelConverterBase(object):
def __init__(self, converters, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
class ModelConverterBase(object):
def __init__(self, converters, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def convert(self, model, mapper, prop, field_args, db_session=None):
if not hasattr(prop, 'columns') and not hasattr(prop, 'direction'):
return
elif not hasattr(prop, 'direction') and len(prop.columns) != 1:
raise TypeError('Do not know how to convert multiple-column '
+ 'properties currently')
kwargs = {
'validators': [],
'filters': [],
'default': None,
}
converter = None
column = None
if not hasattr(prop, 'direction'):
column = prop.columns[0]
# Support sqlalchemy.schema.ColumnDefault, so users can benefit
# from setting defaults for fields, e.g.:
# field = Column(DateTimeField, default=datetime.utcnow)
default = getattr(column, 'default', None)
if default is not None:
# Only actually change default if it has an attribute named
# 'arg' that's callable.
callable_default = getattr(default, 'arg', None)
if callable_default and callable(callable_default):
default = callable_default(None)
kwargs['default'] = default
if column.nullable:
kwargs['validators'].append(validators.Optional())
else:
kwargs['validators'].append(validators.Required())
if db_session and column.unique:
kwargs['validators'].append(Unique(lambda: db_session, model,
column))
if self.use_mro:
types = inspect.getmro(type(column.type))
else:
types = [type(column.type)]
for col_type in types:
type_string = '%s.%s' % (col_type.__module__,
col_type.__name__)
if type_string.startswith('sqlalchemy'):
type_string = type_string[11:]
if type_string in self.converters:
converter = self.converters[type_string]
break
else:
for col_type in types:
if col_type.__name__ in self.converters:
converter = self.converters[col_type.__name__]
break
else:
return
if db_session and hasattr(prop, 'direction'):
foreign_model = prop.mapper.class_
nullable = True
for pair in prop.local_remote_pairs:
if not pair[0].nullable:
nullable = False
kwargs.update({
'allow_blank': nullable,
'query_factory': lambda: db_session.query(foreign_model).all()
})
converter = self.converters[prop.direction.name]
if field_args:
kwargs.update(field_args)
return converter(model=model, mapper=mapper, prop=prop, column=column,
field_args=kwargs)
class ModelConverter(ModelConverterBase):
def __init__(self, extra_converters=None):
super(ModelConverter, self).__init__(extra_converters)
@classmethod
def _string_common(cls, column, field_args, **extra):
if column.type.length:
field_args['validators'].append(validators.Length(max=column.type.length))
@converts('String', 'Unicode')
def conv_String(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return f.TextField(**field_args)
@converts('Text', 'UnicodeText', 'types.LargeBinary', 'types.Binary')
def conv_Text(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return f.TextAreaField(**field_args)
@converts('Boolean')
def conv_Boolean(self, field_args, **extra):
return f.BooleanField(**field_args)
@converts('Date')
def conv_Date(self, field_args, **extra):
return f.DateField(**field_args)
@converts('DateTime')
def conv_DateTime(self, field_args, **extra):
return f.DateTimeField(**field_args)
@converts('Integer', 'SmallInteger')
def handle_integer_types(self, column, field_args, **extra):
unsigned = getattr(column.type, 'unsigned', False)
if unsigned:
field_args['validators'].append(validators.NumberRange(min=0))
return f.IntegerField(**field_args)
@converts('Numeric', 'Float')
def handle_decimal_types(self, column, field_args, **extra):
places = getattr(column.type, 'scale', 2)
if places is not None:
field_args['places'] = places
return f.DecimalField(**field_args)
@converts('databases.mysql.MSYear')
def conv_MSYear(self, field_args, **extra):
field_args['validators'].append(validators.NumberRange(min=1901, max=2155))
return f.TextField(**field_args)
@converts('databases.postgres.PGInet', 'dialects.postgresql.base.INET')
def conv_PGInet(self, field_args, **extra):
field_args.setdefault('label', 'IP Address')
field_args['validators'].append(validators.IPAddress())
return f.TextField(**field_args)
@converts('dialects.postgresql.base.MACADDR')
def conv_PGMacaddr(self, field_args, **extra):
field_args.setdefault('label', 'MAC Address')
field_args['validators'].append(validators.MacAddress())
return f.TextField(**field_args)
@converts('dialects.postgresql.base.UUID')
def conv_PGUuid(self, field_args, **extra):
field_args.setdefault('label', 'UUID')
field_args['validators'].append(validators.UUID())
return f.TextField(**field_args)
@converts('MANYTOONE')
def conv_ManyToOne(self, field_args, **extra):
return QuerySelectField(**field_args)
@converts('MANYTOMANY', 'ONETOMANY')
def conv_ManyToMany(self, field_args, **extra):
return QuerySelectMultipleField(**field_args)
def model_fields(model, db_session=None, only=None, exclude=None,
field_args=None, converter=None):
"""
Generate a dictionary of fields for a given SQLAlchemy model.
See `model_form` docstring for description of parameters.
"""
if not hasattr(model, '_sa_class_manager'):
raise TypeError('model must be a sqlalchemy mapped model')
mapper = model._sa_class_manager.mapper
converter = converter or ModelConverter()
field_args = field_args or {}
properties = ((p.key, p) for p in mapper.iterate_properties)
if only:
properties = (x for x in properties if x[0] in only)
elif exclude:
properties = (x for x in properties if x[0] not in exclude)
field_dict = {}
for name, prop in properties:
field = converter.convert(model, mapper, prop,
field_args.get(name), db_session)
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, db_session=None, base_class=Form, only=None,
exclude=None, field_args=None, converter=None, exclude_pk=True,
exclude_fk=True, type_name=None):
"""
Create a wtforms Form for a given SQLAlchemy model class::
from wtalchemy.orm import model_form
from myapp.models import User
UserForm = model_form(User)
:param model:
A SQLAlchemy mapped model class.
:param db_session:
An optional SQLAlchemy Session.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
:param exclude_pk:
An optional boolean to force primary key exclusion.
:param exclude_fk:
An optional boolean to force foreign keys exclusion.
:param type_name:
An optional string to set returned type name.
"""
class ModelForm(base_class):
"""Sets object as form attribute."""
def __init__(self, *args, **kwargs):
if 'obj' in kwargs:
self._obj = kwargs['obj']
super(ModelForm, self).__init__(*args, **kwargs)
if not exclude:
exclude = []
model_mapper = model.__mapper__
for prop in model_mapper.iterate_properties:
if not hasattr(prop, 'direction') and prop.columns[0].primary_key:
if exclude_pk:
exclude.append(prop.key)
if hasattr(prop, 'direction') and exclude_fk and \
prop.direction.name != 'MANYTOMANY':
for pair in prop.local_remote_pairs:
exclude.append(pair[0].key)
type_name = type_name or str(model.__name__ + 'Form')
field_dict = model_fields(model, db_session, only, exclude, field_args,
converter)
return type(type_name, (ModelForm, ), field_dict)
| lgpl-3.0 | 3,926,425,052,509,066,000 | 5,606,307,264,639,277,000 | 34.414474 | 86 | 0.604774 | false |
HalcyonChimera/osf.io | addons/gitlab/tests/test_serializer.py | 15 | 1607 | # -*- coding: utf-8 -*-
"""Serializer tests for the GitLab addon."""
import mock
import pytest
from tests.base import OsfTestCase
from addons.base.tests.serializers import StorageAddonSerializerTestSuiteMixin
from addons.gitlab.api import GitLabClient
from addons.gitlab.tests.factories import GitLabAccountFactory
from addons.gitlab.serializer import GitLabSerializer
pytestmark = pytest.mark.django_db
class TestGitLabSerializer(StorageAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'gitlab'
Serializer = GitLabSerializer
ExternalAccountFactory = GitLabAccountFactory
client = GitLabClient()
def set_provider_id(self, pid):
self.node_settings.repo = pid
## Overrides ##
def setUp(self):
super(TestGitLabSerializer, self).setUp()
self.mock_api_user = mock.patch('addons.gitlab.api.GitLabClient.user')
self.mock_api_user.return_value = mock.Mock()
self.mock_api_user.start()
def tearDown(self):
self.mock_api_user.stop()
super(TestGitLabSerializer, self).tearDown()
def test_serialize_acccount(self):
ea = self.ExternalAccountFactory()
expected = {
'id': ea._id,
'provider_id': ea.provider_id,
'provider_name': ea.provider_name,
'provider_short_name': ea.provider,
'display_name': ea.display_name,
'profile_url': ea.profile_url,
'nodes': [],
'host': ea.oauth_secret,
'host_url': ea.oauth_secret,
}
assert self.ser.serialize_account(ea) == expected
| apache-2.0 | 7,691,917,915,568,051,000 | 1,866,790,919,496,909,300 | 31.14 | 78 | 0.663348 | false |
mauriceling/dose | examples/14_revive_simulation_13_fitness_loss.py | 2 | 4596 | '''
Example 14: Continuation of examining the effects of natural selection on a
population's genetic pool by implementing a fitness scheme that counts
a specific sequence within the chromosome along with a goal to be reached
from an evenly deployed population. In this simulation, loss of fitness is
observed by implementing a random selection scheme to the population.
In this simulation,
- revival of 1 population of 100 organisms
- unchanged simulation parameters
- 5000 generations to be simulated
- random organism killing in pospopulation_control
'''
# needed to run this example without prior
# installation of DOSE into Python site-packages
try:
import run_examples_without_installation
except ImportError: pass
# Example codes starts from here
import dose, random
from collections import Counter
from copy import deepcopy
parameters = {"database_source" : "T1_11x0.db",
"simulation_time": "default",
"rev_start" : [200],
"extend_gen" : 5000,
"simulation_name": "T1_11x0_revival",
"database_file": "T1_11x0_revival.db",
"database_logging_frequency": 1,
}
class simulation_functions(dose.dose_functions):
def organism_movement(self, Populations, pop_name, World): pass
def organism_location(self, Populations, pop_name, World): pass
def ecoregulate(self, World): pass
def update_ecology(self, World, x, y, z): pass
def update_local(self, World, x, y, z): pass
def report(self, World): pass
def fitness(self, Populations, pop_name):
for organism in Populations[pop_name].agents:
final_fitness = []
chromosome = organism.genome[0].sequence
zero_count = []
for base_index in range(parameters["chromosome_size"] - 1):
if int(chromosome[base_index]) == 0 and int(chromosome[base_index - 1]) != 0:
next_index = 1
while int(chromosome[next_index + base_index]) == 0:
next_index += 1
if (next_index + base_index) == parameters["chromosome_size"]: break
zero_count.append(next_index)
for sequence in range(len(zero_count)):
if len(final_fitness) == 10: break
seq_score = sorted(zero_count, reverse = True)[sequence]
if seq_score > int(parameters["goal"]/10): seq_score = int(parameters["goal"]/10)
final_fitness.append(seq_score)
organism.status['fitness'] = sum(final_fitness)
def mutation_scheme(self, organism):
organism.genome[0].rmutate(parameters["mutation_type"],
parameters["additional_mutation"])
def prepopulation_control(self, Populations, pop_name): pass
def mating(self, Populations, pop_name):
group = deepcopy(Populations[pop_name].agents)
for organism in group:
organism.generate_name()
Populations[pop_name].agents.append(organism)
def postpopulation_control(self, Populations, pop_name):
group = deepcopy(Populations[pop_name].agents)
for i in range(len(group)//2):
Populations[pop_name].agents.remove(random.choice(Populations[pop_name].agents))
def generation_events(self, Populations, pop_name): pass
def population_report(self, Populations, pop_name):
report_list = []
for organism in Populations[pop_name].agents:
chromosome = organism.status['identity']
fitness = str(organism.status['fitness'])
report_list.append(chromosome + ' ' + fitness)
return '\n'.join(report_list)
def database_report(self, con, cur, start_time,
Populations, World, generation_count):
try:
dose.database_report_populations(con, cur, start_time,
Populations, generation_count)
except: pass
try:
dose.database_report_world(con, cur, start_time,
World, generation_count)
except: pass
def deployment_scheme(self, Populations, pop_name, World): pass
for trial in range(13, 26):
parameters["simulation_name"] = "T" + str(trial) + "_ts_7x0_loss1"
parameters["database_source"] = "T" + str(trial) + "_ts_7x0_gain1.db"
parameters["database_file"] = "T" + str(trial) + "_ts_7x0_loss1.db"
dose.revive_simulation(parameters, simulation_functions)
parameters["simulation_time"] = "default" | gpl-3.0 | -8,266,144,847,703,724,000 | -6,576,525,350,618,982,000 | 39.681416 | 97 | 0.623368 | false |
sradevski/homeAutomate | scripts/laptop_on_network.py | 1 | 1994 | #!/usr/bin/python
import remote_core as core
import os
import sys
import nmap
import datetime
import time
import re
import go_to_sleep
try:
nm = nmap.PortScanner() # instance of nmap.PortScanner
except nmap.PortScannerError:
print('Nmap not found', sys.exc_info()[0])
sys.exit(0)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(0)
macAddressToSearch = '64:76:BA:A3:43:B0'
laptopHasBeenTurnedOn = False
disconnectedCounter = 0
def checkIfLaptopOn():
global macAddressToSearch, laptopHasBeenTurnedOn, disconnectedCounter
curHosts = []
# nm.scan(hosts = '192.168.11.1-8', arguments = '-n -sP -PS 7,22,88,443,80,660,2195 -PA 80,22,443 -PU -T3')
nm.scan(hosts = '192.168.11.1-8', arguments = '-n -sn -PR')
for host in nm.all_hosts():
try:
mac = nm[host]['addresses']['mac']
vendor = nm[host]['vendor'][mac]
except:
vendor = mac = 'unknown'
curHosts.append(mac)
localtime = time.asctime(time.localtime(time.time()))
print('============ {0} ============'.format(localtime))
for host in curHosts:
print(host)
config = core.load_config();
if config['location']['am_home']:
if macAddressToSearch not in curHosts:
if laptopHasBeenTurnedOn:
if disconnectedCounter > 3:
wentToSleepScript()
laptopHasBeenTurnedOn = False
disconnectedCounter += 1
else:
laptopHasBeenTurnedOn = True
def wentToSleepScript():
time.sleep(10)
go_to_sleep.go_to_sleep()
# print("SLEEPING")
if __name__ == '__main__':
start_at_hour = 22
stop_at_hour = 2
sleep_seconds = 60 * 60 * (start_at_hour - stop_at_hour) - 20
while True:
localtime = time.localtime(time.time())
if localtime.tm_hour > stop_at_hour and localtime.tm_hour < start_at_hour:
time.sleep(sleep_seconds - (60 * 60 * (start_at_hour - localtime.tm_hour)))
time.sleep(10)
checkIfLaptopOn()
| mit | -6,297,448,426,147,583,000 | 5,538,828,470,706,810,000 | 25.586667 | 110 | 0.61986 | false |
Orav/kbengine | kbe/src/lib/python/Lib/tkinter/font.py | 2 | 6845 | # Tkinter font wrapper
#
# written by Fredrik Lundh, February 1998
#
__version__ = "0.9"
import itertools
import tkinter
# weight/slant
NORMAL = "normal"
ROMAN = "roman"
BOLD = "bold"
ITALIC = "italic"
def nametofont(name):
"""Given the name of a tk named font, returns a Font representation.
"""
return Font(name=name, exists=True)
class Font:
"""Represents a named font.
Constructor options are:
font -- font specifier (name, system font, or (family, size, style)-tuple)
name -- name to use for this font configuration (defaults to a unique name)
exists -- does a named font by this name already exist?
Creates a new named font if False, points to the existing font if True.
Raises _tkinter.TclError if the assertion is false.
the following are ignored if font is specified:
family -- font 'family', e.g. Courier, Times, Helvetica
size -- font size in points
weight -- font thickness: NORMAL, BOLD
slant -- font slant: ROMAN, ITALIC
underline -- font underlining: false (0), true (1)
overstrike -- font strikeout: false (0), true (1)
"""
counter = itertools.count(1)
def _set(self, kw):
options = []
for k, v in kw.items():
options.append("-"+k)
options.append(str(v))
return tuple(options)
def _get(self, args):
options = []
for k in args:
options.append("-"+k)
return tuple(options)
def _mkdict(self, args):
options = {}
for i in range(0, len(args), 2):
options[args[i][1:]] = args[i+1]
return options
def __init__(self, root=None, font=None, name=None, exists=False,
**options):
if not root:
root = tkinter._default_root
tk = getattr(root, 'tk', root)
if font:
# get actual settings corresponding to the given font
font = tk.splitlist(tk.call("font", "actual", font))
else:
font = self._set(options)
if not name:
name = "font" + str(next(self.counter))
self.name = name
if exists:
self.delete_font = False
# confirm font exists
if self.name not in tk.splitlist(tk.call("font", "names")):
raise tkinter._tkinter.TclError(
"named font %s does not already exist" % (self.name,))
# if font config info supplied, apply it
if font:
tk.call("font", "configure", self.name, *font)
else:
# create new font (raises TclError if the font exists)
tk.call("font", "create", self.name, *font)
self.delete_font = True
self._tk = tk
self._split = tk.splitlist
self._call = tk.call
def __str__(self):
return self.name
def __eq__(self, other):
return isinstance(other, Font) and self.name == other.name
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def __del__(self):
try:
if self.delete_font:
self._call("font", "delete", self.name)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def copy(self):
"Return a distinct copy of the current font"
return Font(self._tk, **self.actual())
def actual(self, option=None, displayof=None):
"Return actual font attributes"
args = ()
if displayof:
args = ('-displayof', displayof)
if option:
args = args + ('-' + option, )
return self._call("font", "actual", self.name, *args)
else:
return self._mkdict(
self._split(self._call("font", "actual", self.name, *args)))
def cget(self, option):
"Get font attribute"
return self._call("font", "config", self.name, "-"+option)
def config(self, **options):
"Modify font attributes"
if options:
self._call("font", "config", self.name,
*self._set(options))
else:
return self._mkdict(
self._split(self._call("font", "config", self.name)))
configure = config
def measure(self, text, displayof=None):
"Return text width"
args = (text,)
if displayof:
args = ('-displayof', displayof, text)
return int(self._call("font", "measure", self.name, *args))
def metrics(self, *options, **kw):
"""Return font metrics.
For best performance, create a dummy widget
using this font before calling this method."""
args = ()
displayof = kw.pop('displayof', None)
if displayof:
args = ('-displayof', displayof)
if options:
args = args + self._get(options)
return int(
self._call("font", "metrics", self.name, *args))
else:
res = self._split(self._call("font", "metrics", self.name, *args))
options = {}
for i in range(0, len(res), 2):
options[res[i][1:]] = int(res[i+1])
return options
def families(root=None, displayof=None):
"Get font families (as a tuple)"
if not root:
root = tkinter._default_root
args = ()
if displayof:
args = ('-displayof', displayof)
return root.tk.splitlist(root.tk.call("font", "families", *args))
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
root = tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "names"))
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
root = tkinter.Tk()
# create a font
f = Font(family="times", size=30, weight=NORMAL)
print(f.actual())
print(f.actual("family"))
print(f.actual("weight"))
print(f.config())
print(f.cget("family"))
print(f.cget("weight"))
print(names())
print(f.measure("hello"), f.metrics("linespace"))
print(f.metrics(displayof=root))
f = Font(font=("Courier", 20, "bold"))
print(f.measure("hello"), f.metrics("linespace", displayof=root))
w = tkinter.Label(root, text="Hello, world", font=f)
w.pack()
w = tkinter.Button(root, text="Quit!", command=root.destroy)
w.pack()
fb = Font(font=w["font"]).copy()
fb.config(weight=BOLD)
w.config(font=fb)
tkinter.mainloop()
| lgpl-3.0 | 4,877,135,429,968,076,000 | -6,558,146,878,361,535,000 | 27.252137 | 79 | 0.531337 | false |
ixc/django-fluent-contents | fluent_contents/plugins/code/migrations/0001_initial.py | 2 | 1042 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CodeItem',
fields=[
('contentitem_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('language', models.CharField(default=b'html', max_length=50, verbose_name='Language')),
('code', models.TextField(verbose_name='Code')),
('linenumbers', models.BooleanField(default=False, verbose_name='Show line numbers')),
],
options={
'db_table': 'contentitem_code_codeitem',
'verbose_name': 'Code snippet',
'verbose_name_plural': 'Code snippets',
},
bases=('fluent_contents.contentitem',),
),
]
| apache-2.0 | -4,756,995,836,953,512,000 | 1,887,453,743,154,675,700 | 34.931034 | 164 | 0.571017 | false |
RobertoMalatesta/shedskin | tests/155.py | 6 | 6975 |
# (c) Peter Cock
# --- http://www2.warwick.ac.uk/fac/sci/moac/currentstudents/peter_cock/python/sudoku/
TRIPLETS = [[0,1,2],[3,4,5],[6,7,8]]
ROW_ITER = [[(row,col) for col in range(0,9)] for row in range(0,9)]
COL_ITER = [[(row,col) for row in range(0,9)] for col in range(0,9)]
TxT_ITER = [[(row,col) for row in rows for col in cols] for rows in TRIPLETS for cols in TRIPLETS]
class soduko:
def __init__(self, start_grid=None) :
self.squares =[ [range(1,10) for col in range(0,9)] for row in range(0,9)]
if start_grid is not None:
assert len(start_grid)==9, "Bad input!"
for row in range(0,9) :
self.set_row(row, start_grid[row])
self._changed=False
def copy(self) :
soduko_copy = soduko(None)
for row in range(0,9) :
for col in range(0,9) :
soduko_copy.squares[row][col] = self.squares[row][col][:]
soduko_copy._changed=False
return soduko_copy
def set_row(self,row, x_list) :
assert len(x_list)==9, 'not 9'
for col in range(0,9) :
try :
x = int(x_list[col])
except :
x = 0
self.set_cell(row,col,x)
def set_cell(self,row,col,x):
if self.squares[row][col] == [x] :
pass
elif x not in range(1,9+1) :
pass
else:
assert x in self.squares[row][col], "bugger2"
self.squares[row][col] = [x]
self.update_neighbours(row,col,x)
self._changed=True
def cell_exclude(self, row,col,x) :
assert x in range(1,9+1), 'inra'
if x in self.squares[row][col] :
self.squares[row][col].remove(x)
assert len(self.squares[row][col]) > 0, "bugger"
if len(self.squares[row][col]) == 1 :
self._changed=True
self.update_neighbours(row,col,self.squares[row][col][0])
else :
pass
return
def update_neighbours(self,set_row,set_col,x) :
for row in range(0,9) :
if row <> set_row :
self.cell_exclude(row,set_col,x)
for col in range(0,9) :
if col <> set_col :
self.cell_exclude(set_row,col,x)
for triplet in TRIPLETS :
if set_row in triplet : rows = triplet[:]
if set_col in triplet : cols = triplet[:]
rows.remove(set_row)
cols.remove(set_col)
for row in rows :
for col in cols :
assert row <> set_row or col <> set_col , 'meuh'
self.cell_exclude(row,col,x)
def get_cell_digit_str(self,row,col) :
if len(self.squares[row][col])==1 :
return str(self.squares[row][col][0])
else :
return "0"
def __str__(self):
answer = " 123 456 789\n"
for row in range(0,9) :
answer = answer + str(row+1) + " [" + "".join([self.get_cell_digit_str(row,col).replace("0","?") for col in range(0,3)]) + "] [" + "".join([self.get_cell_digit_str(row,col).replace("0","?") for col in range(3,6)]) + "] [" + "".join([self.get_cell_digit_str(row,col).replace("0","?") for col in range(6,9)]) + "]\n"
if row+1 in [3,6] :
answer = answer + " --- --- ---\n"
return answer
def check(self) :
self._changed=True
while self._changed:
self._changed=False
self.check_for_single_occurances()
self.check_for_last_in_row_col_3x3()
return
def check_for_single_occurances(self):
for check_type in [ROW_ITER, COL_ITER, TxT_ITER]:
for check_list in check_type :
for x in range(1,9+1) : #1 to 9 inclusive
x_in_list = []
for (row,col) in check_list :
if x in self.squares[row][col] :
x_in_list.append((row,col))
if len(x_in_list)==1 :
(row,col) = x_in_list[0]
if len(self.squares[row][col]) > 1 :
self.set_cell(row,col,x)
def check_for_last_in_row_col_3x3(self):
for (type_name, check_type) in [("Row",ROW_ITER),("Col",COL_ITER),("3x3",TxT_ITER)]:
for check_list in check_type :
unknown_entries = []
unassigned_values = range(1,9+1) #1-9 inclusive
known_values = []
for (row,col) in check_list :
if len(self.squares[row][col]) == 1 :
assert self.squares[row][col][0] not in known_values, "bugger3"
known_values.append(self.squares[row][col][0])
assert self.squares[row][col][0] in unassigned_values, "bugger4"
unassigned_values.remove(self.squares[row][col][0])
else :
unknown_entries.append((row,col))
assert len(unknown_entries) + len(known_values) == 9, 'bugger5'
assert len(unknown_entries) == len(unassigned_values), 'bugger6'
if len(unknown_entries) == 1 :
x = unassigned_values[0]
(row,col) = unknown_entries[0]
self.set_cell(row,col,x)
return
def one_level_supposition(self):
progress=True
while progress :
progress=False
for row in range(0,9) :
for col in range(0,9):
if len(self.squares[row][col]) > 1 :
bad_x = []
for x in self.squares[row][col] :
soduko_copy = self.copy()
try:
soduko_copy.set_cell(row,col,x)
soduko_copy.check()
except AssertionError, e :
bad_x.append(x)
del soduko_copy
if len(bad_x) == 0 :
pass
elif len(bad_x) < len(self.squares[row][col]) :
for x in bad_x :
self.cell_exclude(row,col,x)
self.check()
progress=True
else :
assert False, "bugger7"
for x in range(50):
t = soduko(["800000600",
"040500100",
"070090000",
"030020007",
"600008004",
"500000090",
"000030020",
"001006050",
"004000003"])
t.check()
t.one_level_supposition()
t.check()
print t
| gpl-3.0 | -8,872,386,611,028,060,000 | 649,502,715,156,345,000 | 37.324176 | 424 | 0.456631 | false |
JordanReiter/django-notification | notification/views.py | 1 | 6596 | from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
try:
from django.contrib.syndication.views import Feed
except ImportError:
from django.contrib.syndication.views import feed as Feed
from notification.models import *
from notification.decorators import basic_auth_required, simple_basic_auth_callback
from notification.feeds import NoticeUserFeed
@basic_auth_required(realm="Notices Feed", callback_func=simple_basic_auth_callback)
def feed_for_user(request):
"""
An atom feed for all unarchived :model:`notification.Notice`s for a user.
"""
url = "feed/%s" % request.user.username
return Feed(request, url, {
"feed": NoticeUserFeed,
})
@login_required
def notices(request):
"""
The main notices index view.
Template: :template:`notification/notices.html`
Context:
notices
A list of :model:`notification.Notice` objects that are not archived
and to be displayed on the site.
"""
notices = Notice.objects.notices_for(request.user, on_site=True)
return render_to_response("notification/notices.html", {
"notices": notices,
}, context_instance=RequestContext(request))
@login_required
def notice_settings(request):
"""
The notice settings view.
Template: :template:`notification/notice_settings.html`
Context:
notice_types
A list of all :model:`notification.NoticeType` objects.
notice_settings
A dictionary containing ``column_headers`` for each ``NOTICE_MEDIA``
and ``rows`` containing a list of dictionaries: ``notice_type``, a
:model:`notification.NoticeType` object and ``cells``, a list of
tuples whose first value is suitable for use in forms and the second
value is ``True`` or ``False`` depending on a ``request.POST``
variable called ``form_label``, whose valid value is ``on``.
"""
notice_types = NoticeType.objects.all()
settings_table = []
for notice_type in notice_types:
settings_row = []
for medium_id, medium_display in NOTICE_MEDIA:
form_label = "%s_%s" % (notice_type.label, medium_id)
setting = get_notification_setting(request.user, notice_type, medium_id)
if request.method == "POST":
if request.POST.get(form_label) == "on":
if not setting.send:
setting.send = True
setting.save()
else:
if setting.send:
setting.send = False
setting.save()
settings_row.append((form_label, setting.send))
settings_table.append({"notice_type": notice_type, "cells": settings_row})
if request.method == "POST":
next_page = request.POST.get("next_page", ".")
return HttpResponseRedirect(next_page)
notice_settings = {
"column_headers": [medium_display for medium_id, medium_display in NOTICE_MEDIA],
"rows": settings_table,
}
return render_to_response("notification/notice_settings.html", {
"notice_types": notice_types,
"notice_settings": notice_settings,
}, context_instance=RequestContext(request))
@login_required
def single(request, id, mark_seen=True):
"""
Detail view for a single :model:`notification.Notice`.
Template: :template:`notification/single.html`
Context:
notice
The :model:`notification.Notice` being viewed
Optional arguments:
mark_seen
If ``True``, mark the notice as seen if it isn't
already. Do nothing if ``False``. Default: ``True``.
"""
notice = get_object_or_404(Notice, id=id)
if request.user == notice.recipient:
if mark_seen and notice.unseen:
notice.unseen = False
notice.save()
return render_to_response("notification/single.html", {
"notice": notice,
}, context_instance=RequestContext(request))
raise Http404
@login_required
def archive(request, noticeid=None, next_page=None):
"""
Archive a :model:`notices.Notice` if the requesting user is the
recipient or if the user is a superuser. Returns a
``HttpResponseRedirect`` when complete.
Optional arguments:
noticeid
The ID of the :model:`notices.Notice` to be archived.
next_page
The page to redirect to when done.
"""
if noticeid:
try:
notice = Notice.objects.get(id=noticeid)
if request.user == notice.recipient or request.user.is_superuser:
notice.archive()
else: # you can archive other users' notices
# only if you are superuser.
return HttpResponseRedirect(next_page)
except Notice.DoesNotExist:
return HttpResponseRedirect(next_page)
return HttpResponseRedirect(next_page)
@login_required
def delete(request, noticeid=None, next_page=None):
"""
Delete a :model:`notices.Notice` if the requesting user is the recipient
or if the user is a superuser. Returns a ``HttpResponseRedirect`` when
complete.
Optional arguments:
noticeid
The ID of the :model:`notices.Notice` to be archived.
next_page
The page to redirect to when done.
"""
if noticeid:
try:
notice = Notice.objects.get(id=noticeid)
if request.user == notice.recipient or request.user.is_superuser:
notice.delete()
else: # you can delete other users' notices
# only if you are superuser.
return HttpResponseRedirect(next_page)
except Notice.DoesNotExist:
return HttpResponseRedirect(next_page)
return HttpResponseRedirect(next_page)
@login_required
def mark_all_seen(request):
"""
Mark all unseen notices for the requesting user as seen. Returns a
``HttpResponseRedirect`` when complete.
"""
for notice in Notice.objects.notices_for(request.user, unseen=True):
notice.unseen = False
notice.save()
return HttpResponseRedirect(reverse("notification_notices"))
| mit | -8,215,693,159,184,665,000 | -1,753,843,141,594,830,600 | 32.482234 | 89 | 0.622347 | false |
matthiasdiener/spack | var/spack/repos/builtin/packages/r-tseries/package.py | 5 | 1765 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RTseries(RPackage):
"""Time series analysis and computational finance."""
homepage = "https://cran.r-project.org/package=tseries"
url = "https://cran.r-project.org/src/contrib/tseries_0.10-42.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/tseries"
version('0.10-42', '3feaa5c463bc967d749323163d9bc836')
depends_on('r-quadprog', type=('build', 'run'))
depends_on('r-zoo', type=('build', 'run'))
depends_on('r-quantmod', type=('build', 'run'))
| lgpl-2.1 | 5,629,796,650,625,726,000 | -4,395,305,668,992,770,000 | 44.25641 | 78 | 0.667989 | false |
ErickMurillo/ciat_plataforma | ficha_granos_basicos/migrations/0006_auto__del_datosparcela__del_field_monitoreo_fecha_monitoreo__add_field.py | 3 | 36192 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'DatosParcela'
db.delete_table(u'ficha_granos_basicos_datosparcela')
# Deleting field 'Monitoreo.fecha_monitoreo'
db.delete_column(u'ficha_granos_basicos_monitoreo', 'fecha_monitoreo')
# Adding field 'Monitoreo.cultivo'
db.add_column(u'ficha_granos_basicos_monitoreo', 'cultivo',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding model 'DatosParcela'
db.create_table(u'ficha_granos_basicos_datosparcela', (
('latitud', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('percepcion_fertilidad', self.gf('django.db.models.fields.IntegerField')()),
('distancia', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('edad_parcela', self.gf('django.db.models.fields.FloatField')()),
('profundidad_capa', self.gf('django.db.models.fields.FloatField')()),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fuente_agua', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=7, null=True, blank=True)),
('longitud', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('acceso_agua', self.gf('django.db.models.fields.IntegerField')()),
('monitoreo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['ficha_granos_basicos.Monitoreo'])),
('direccion_viento', self.gf('django.db.models.fields.IntegerField')()),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
('tamano_parcela', self.gf('django.db.models.fields.FloatField')()),
))
db.send_create_signal(u'ficha_granos_basicos', ['DatosParcela'])
# Adding field 'Monitoreo.fecha_monitoreo'
db.add_column(u'ficha_granos_basicos_monitoreo', 'fecha_monitoreo',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
# Deleting field 'Monitoreo.cultivo'
db.delete_column(u'ficha_granos_basicos_monitoreo', 'cultivo')
models = {
u'ficha_granos_basicos.curadosemilla': {
'Meta': {'object_name': 'CuradoSemilla'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tratamiento': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ficha_granos_basicos.TratamientoSemilla']", 'symmetrical': 'False'}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.datosmonitoreo': {
'Meta': {'object_name': 'DatosMonitoreo'},
'area_siembra': ('django.db.models.fields.FloatField', [], {}),
'cultivo': ('django.db.models.fields.IntegerField', [], {}),
'fecha_cosecha': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fecha_siembra': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitoreo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"})
},
u'ficha_granos_basicos.distribucionpendiente': {
'Meta': {'object_name': 'DistribucionPendiente'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inclinado': ('django.db.models.fields.FloatField', [], {}),
'monitoreo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"}),
'plano': ('django.db.models.fields.FloatField', [], {}),
'seleccion': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.enfermedadesfrijol': {
'Meta': {'object_name': 'EnfermedadesFrijol'},
'enfermedad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Especies']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'planta_1': ('django.db.models.fields.IntegerField', [], {}),
'planta_2': ('django.db.models.fields.IntegerField', [], {}),
'planta_3': ('django.db.models.fields.IntegerField', [], {}),
'planta_4': ('django.db.models.fields.IntegerField', [], {}),
'planta_5': ('django.db.models.fields.IntegerField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.enfermedadesmaiz': {
'Meta': {'object_name': 'EnfermedadesMaiz'},
'enfermedad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Especies']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'planta_1': ('django.db.models.fields.IntegerField', [], {}),
'planta_2': ('django.db.models.fields.IntegerField', [], {}),
'planta_3': ('django.db.models.fields.IntegerField', [], {}),
'planta_4': ('django.db.models.fields.IntegerField', [], {}),
'planta_5': ('django.db.models.fields.IntegerField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.especies': {
'Meta': {'object_name': 'Especies'},
'control_biologico': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'control_cultural': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'control_quimico': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'dano1': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'descripcion': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre_cientifico': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'nombre_popular': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rango_max': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rango_min': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'reconocimiento': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'rubro': ('django.db.models.fields.IntegerField', [], {}),
'tipo': ('django.db.models.fields.IntegerField', [], {}),
'umbral': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.estimadocosechafrijol': {
'Meta': {'object_name': 'EstimadoCosechaFrijol'},
'estacion': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'planta_1': ('django.db.models.fields.IntegerField', [], {}),
'planta_2': ('django.db.models.fields.IntegerField', [], {}),
'planta_3': ('django.db.models.fields.IntegerField', [], {}),
'planta_4': ('django.db.models.fields.IntegerField', [], {}),
'planta_5': ('django.db.models.fields.IntegerField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.estimadocosechamaiz': {
'Meta': {'object_name': 'EstimadoCosechaMaiz'},
'estacion_1': ('django.db.models.fields.IntegerField', [], {}),
'estacion_2': ('django.db.models.fields.IntegerField', [], {}),
'estacion_3': ('django.db.models.fields.IntegerField', [], {}),
'estacion_4': ('django.db.models.fields.IntegerField', [], {}),
'estacion_5': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorca': ('django.db.models.fields.IntegerField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.estimadocosechamaiz2': {
'Meta': {'object_name': 'EstimadoCosechaMaiz2'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorca': ('django.db.models.fields.IntegerField', [], {}),
'peso': ('django.db.models.fields.FloatField', [], {}),
'peso_promedio': ('django.db.models.fields.IntegerField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.fotosespecies': {
'Meta': {'object_name': 'FotosEspecies'},
'especie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Especies']"}),
'foto': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ficha_granos_basicos.gastos': {
'Meta': {'object_name': 'Gastos'},
'fecha_siembra': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"}),
'rubro': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.granosplanta': {
'Meta': {'object_name': 'GranosPlanta'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.historialrendimiento': {
'Meta': {'object_name': 'HistorialRendimiento'},
'anio': ('django.db.models.fields.IntegerField', [], {}),
'ciclo_productivo': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitoreo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"}),
'rendimiento': ('django.db.models.fields.FloatField', [], {}),
'rubro': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.insumos': {
'Meta': {'object_name': 'Insumos'},
'fecha_siembra': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"}),
'rubro': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.liga_nested': {
'Meta': {'object_name': 'Liga_Nested'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Productos']"}),
'tabla_insumos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.TablaInsumos']"}),
'unidades': ('django.db.models.fields.FloatField', [], {})
},
u'ficha_granos_basicos.macrofauna': {
'Meta': {'object_name': 'Macrofauna'},
'especie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Especies']"}),
'est1': ('django.db.models.fields.IntegerField', [], {}),
'est2': ('django.db.models.fields.IntegerField', [], {}),
'est3': ('django.db.models.fields.IntegerField', [], {}),
'est4': ('django.db.models.fields.IntegerField', [], {}),
'est5': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.monitoreo': {
'Meta': {'object_name': 'Monitoreo'},
'acceso_agua': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'anio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ciclo_productivo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cultivo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'direccion_viento': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'distancia': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'edad_parcela': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'fuente_agua': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '7', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitud': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nombre_parcela': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'percepcion_fertilidad': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mapeo.Persona']"}),
'profundidad_capa': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tamano_parcela': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'ficha_granos_basicos.monitoreomalezas': {
'Meta': {'object_name': 'MonitoreoMalezas'},
'ciperaceas': ('django.db.models.fields.FloatField', [], {}),
'cobertura': ('django.db.models.fields.IntegerField', [], {}),
'cobertura_total': ('django.db.models.fields.FloatField', [], {}),
'gramineas': ('django.db.models.fields.FloatField', [], {}),
'hoja_ancha': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.parametrossuelo': {
'Meta': {'object_name': 'ParametrosSuelo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nivel_critico': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nivel_suficiencia': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'parametro': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unidad': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'ficha_granos_basicos.plagasfrijol': {
'Meta': {'object_name': 'PlagasFrijol'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plaga': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Especies']"}),
'porcentaje_dano_1': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_2': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_3': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_4': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_5': ('django.db.models.fields.FloatField', [], {}),
'presencia_1': ('django.db.models.fields.FloatField', [], {}),
'presencia_2': ('django.db.models.fields.FloatField', [], {}),
'presencia_3': ('django.db.models.fields.FloatField', [], {}),
'presencia_4': ('django.db.models.fields.FloatField', [], {}),
'presencia_5': ('django.db.models.fields.FloatField', [], {}),
'promedio_dano': ('django.db.models.fields.FloatField', [], {}),
'promedio_presencia': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.plagasmaiz': {
'Meta': {'object_name': 'PlagasMaiz'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plaga': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Especies']"}),
'porcentaje_dano_1': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_2': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_3': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_4': ('django.db.models.fields.FloatField', [], {}),
'porcentaje_dano_5': ('django.db.models.fields.FloatField', [], {}),
'presencia_1': ('django.db.models.fields.FloatField', [], {}),
'presencia_2': ('django.db.models.fields.FloatField', [], {}),
'presencia_3': ('django.db.models.fields.FloatField', [], {}),
'presencia_4': ('django.db.models.fields.FloatField', [], {}),
'presencia_5': ('django.db.models.fields.FloatField', [], {}),
'promedio_dano': ('django.db.models.fields.FloatField', [], {}),
'promedio_presencia': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.poblacionfrijol': {
'Meta': {'object_name': 'PoblacionFrijol'},
'distancia_frijol': ('django.db.models.fields.FloatField', [], {}),
'est1': ('django.db.models.fields.IntegerField', [], {}),
'est2': ('django.db.models.fields.IntegerField', [], {}),
'est3': ('django.db.models.fields.IntegerField', [], {}),
'est4': ('django.db.models.fields.IntegerField', [], {}),
'est5': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metros_lineales': ('django.db.models.fields.FloatField', [], {}),
'numero_surcos': ('django.db.models.fields.FloatField', [], {}),
'poblacion': ('django.db.models.fields.FloatField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.poblacionmaiz': {
'Meta': {'object_name': 'PoblacionMaiz'},
'distancia_maiz': ('django.db.models.fields.FloatField', [], {}),
'est1': ('django.db.models.fields.IntegerField', [], {}),
'est2': ('django.db.models.fields.IntegerField', [], {}),
'est3': ('django.db.models.fields.IntegerField', [], {}),
'est4': ('django.db.models.fields.IntegerField', [], {}),
'est5': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metros_lineales': ('django.db.models.fields.FloatField', [], {}),
'numero_surcos': ('django.db.models.fields.FloatField', [], {}),
'poblacion': ('django.db.models.fields.FloatField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.procedenciasemilla': {
'Meta': {'object_name': 'ProcedenciaSemilla'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'procedencia': ('django.db.models.fields.IntegerField', [], {}),
'rubro': ('django.db.models.fields.IntegerField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.productos': {
'Meta': {'object_name': 'Productos'},
'categoria': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre_comercial': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'presentacion': ('django.db.models.fields.IntegerField', [], {}),
'principio_activo': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ficha_granos_basicos.pruebagerminacion': {
'Meta': {'object_name': 'PruebaGerminacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'porcentaje': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'respuesta': ('django.db.models.fields.IntegerField', [], {}),
'rubro': ('django.db.models.fields.IntegerField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.recursossiembra': {
'Meta': {'object_name': 'RecursosSiembra'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitoreo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"}),
'respuesta': ('django.db.models.fields.IntegerField', [], {}),
'rubro': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.semillas': {
'Meta': {'object_name': 'Semillas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre_semilla': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rubro': ('django.db.models.fields.IntegerField', [], {}),
'tipo_semilla': ('django.db.models.fields.IntegerField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.sobrecosecha': {
'Meta': {'object_name': 'SobreCosecha'},
'almacenamiento': ('django.db.models.fields.FloatField', [], {}),
'cosecha': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'precio_mercado': ('django.db.models.fields.FloatField', [], {}),
'rubro': ('django.db.models.fields.IntegerField', [], {}),
'venta': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.suelo': {
'Meta': {'object_name': 'Suelo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parametro': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.ParametrosSuelo']"}),
'resultado': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.tabladecisiones': {
'Meta': {'object_name': 'TablaDecisiones'},
'area': ('django.db.models.fields.IntegerField', [], {}),
'decision': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'porque': ('django.db.models.fields.TextField', [], {}),
'seleccion': ('django.db.models.fields.IntegerField', [], {}),
'toma_deciciones': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.TomaDecisiones']"}),
'visita': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.tablagastos': {
'Meta': {'object_name': 'TablaGastos'},
'actividad': ('django.db.models.fields.IntegerField', [], {}),
'descripcion': ('django.db.models.fields.TextField', [], {}),
'dias_persona': ('django.db.models.fields.IntegerField', [], {}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'gastos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Gastos']"}),
'hombres': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mujeres': ('django.db.models.fields.IntegerField', [], {}),
'valor': ('django.db.models.fields.IntegerField', [], {})
},
u'ficha_granos_basicos.tablainsumos': {
'Meta': {'object_name': 'TablaInsumos'},
'bombas': ('django.db.models.fields.FloatField', [], {}),
'fecha': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insumos': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Insumos']"}),
'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Productos']"}),
'unidades': ('django.db.models.fields.FloatField', [], {})
},
u'ficha_granos_basicos.tablamalezas': {
'Meta': {'object_name': 'TablaMalezas'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maleza': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.TiposMalezas']"}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.tiposmalezas': {
'Meta': {'object_name': 'TiposMalezas'},
'categoria': ('django.db.models.fields.IntegerField', [], {}),
'ciclo': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre_cientifico': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'nombre_popular': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ficha_granos_basicos.tomadecisiones': {
'Meta': {'object_name': 'TomaDecisiones'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"})
},
u'ficha_granos_basicos.tratamientosemilla': {
'Meta': {'object_name': 'TratamientoSemilla'},
'dosis': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'preparacion': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'ficha_granos_basicos.vigorfrijol': {
'Meta': {'object_name': 'VigorFrijol'},
'est1': ('django.db.models.fields.IntegerField', [], {}),
'est2': ('django.db.models.fields.IntegerField', [], {}),
'est3': ('django.db.models.fields.IntegerField', [], {}),
'est4': ('django.db.models.fields.IntegerField', [], {}),
'est5': ('django.db.models.fields.IntegerField', [], {}),
'estimado_plantas': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'porcentaje': ('django.db.models.fields.FloatField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.vigormaiz': {
'Meta': {'object_name': 'VigorMaiz'},
'est1': ('django.db.models.fields.IntegerField', [], {}),
'est2': ('django.db.models.fields.IntegerField', [], {}),
'est3': ('django.db.models.fields.IntegerField', [], {}),
'est4': ('django.db.models.fields.IntegerField', [], {}),
'est5': ('django.db.models.fields.IntegerField', [], {}),
'estimado_plantas': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'porcentaje': ('django.db.models.fields.FloatField', [], {}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visita': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Visitas']"})
},
u'ficha_granos_basicos.visitas': {
'Meta': {'object_name': 'Visitas'},
'anio': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'areas': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '17'}),
'fecha': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ficha_granos_basicos.Monitoreo']"}),
'visita': ('django.db.models.fields.IntegerField', [], {})
},
u'lugar.comunidad': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre', 'nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'mapeo.persona': {
'Meta': {'object_name': 'Persona'},
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comunidad': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'edad': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {}),
'tipo_persona': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['ficha_granos_basicos'] | mit | 861,400,612,660,394,000 | 5,342,694,369,801,670,000 | 70.954274 | 166 | 0.553879 | false |
yjydmlh/zerorpc-python | zerorpc/socket.py | 134 | 1737 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .context import Context
from .events import Events
class SocketBase(object):
def __init__(self, zmq_socket_type, context=None):
self._context = context or Context.get_instance()
self._events = Events(zmq_socket_type, context)
def close(self):
self._events.close()
def connect(self, endpoint, resolve=True):
return self._events.connect(endpoint, resolve)
def bind(self, endpoint, resolve=True):
return self._events.bind(endpoint, resolve)
| mit | -6,445,730,666,407,966,000 | -2,904,973,091,355,142,700 | 39.395349 | 81 | 0.739781 | false |
alexwaters/python-readability-api | readability/models.py | 1 | 5472 | # -*- coding: utf-8 -*-
"""
readability.models
~~~~~~~~~~~~~~~~~~
This module provides the core Readability API models.
"""
from .helpers import to_python, to_api
class BaseResource(object):
"""A Base BaseResource object."""
def __init__(self):
super(BaseResource, self).__init__()
self._rdd = None
def __dir__(self):
d = self.__dict__.copy()
try:
del d['_rdd']
except KeyError:
pass
return d.keys()
class Bookmark(BaseResource):
"""Bookmark API Model."""
def __init__(self):
self.id = None
self.user_id = None
self.read_percent = None
self.date_updated = None
self.favorite = None
self.archive = None
self.date_archived = None
self.date_opened = None
self.date_added = None
self.article = None
def __repr__(self):
return '<bookmark id="%s" favorite="%s" archive="%s" read_percent="%s">' % (self.id, self.favorite, self.archive, self.read_percent)
@staticmethod
def new_from_dict(d, rdd=None):
b = to_python(
obj=Bookmark(), in_dict=d,
string_keys = (
'id', 'user_id', 'read_percent', 'favorite', 'archive',
'author',
),
date_keys = ('date_updated', 'date_archived', 'date_opened', 'date_added'),
object_map = {'article': Article},
_rdd = rdd
)
return b
def delete(self):
"""Deletes Bookmark."""
return self._rdd._delete_resource(('bookmarks', self.id))
def update(self):
"""Updates Bookmark."""
args = to_api(
dict(
favorite=self.favorite,
archive=self.archive,
read_percent=self.read_percent,
),
int_keys=('favorite', 'archive')
)
r = self._rdd._post_resource(('bookmarks', self.id), **args)
return r
class Article(BaseResource):
def __init__(self):
self.id = None
self.domain = None
self.title = None
self.url = None
self.short_url = None
self.author = None
self.word_count = None
self.content = None
self.excerpt = None
self.date_published = None
self.next_page_href = None
self.processed = None
self.content_size = None
def __repr__(self):
return '<article id="%s">' % (self.id,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=Article(), in_dict=d,
string_keys = (
'id', 'domain', 'title', 'url', 'short_url', 'author',
'word_count', 'content', 'excerpt', 'next_page_href',
'processed', 'content_size',
),
date_keys = ('date_published',),
_rdd = rdd
)
class Domain(BaseResource):
def __init__(self):
super(Domain, self).__init__()
self.fqdn = None
self.articles_ref = None
def __repr__(self):
return '<domain fqdn="%s">' % (self.fqdn,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=Domain(), in_dict=d,
string_keys = ('fqdn', 'articles_ref'),
_rdd = rdd
)
def articles(self, **filters):
"""Returns Article list, filtered by Domain."""
return self._rdd.get_articles(domain=self.fqdn, **filters)
def contributions(self, **filters):
"""Returns Article list, filtered by Domain."""
return self._rdd.get_contributions(domain=self.fqdn, **filters)
class Contribution(BaseResource):
def __init__(self):
super(Contribution, self).__init__()
self.date = None
self.contribution = None
self.user = None
self.domain = None
self.num_bookmarks = None
def __repr__(self):
return '<contribution domain="%s">' % (self.domain,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=Contribution(), in_dict=d,
string_keys = ('contribution', 'user', 'domain', 'num_bookmarks'),
date_keys = ('date'),
_rdd = rdd
)
class User(BaseResource):
"""User API Model."""
def __init__(self):
self.username = None
self.first_name = None
self.last_name = None
self.date_joined = None
def __repr__(self):
return '<user name="%s">' % (self.username,)
@staticmethod
def new_from_dict(d, rdd=None):
return to_python(
obj=User(), in_dict=d,
string_keys = ('username', 'first_name'),
date_keys = ('date_joined',),
_rdd=rdd
)
def bookmarks(self, **filters):
"""Returns Bookmark list, filtered by User."""
if self.username == self._rdd.username:
return self._rdd.get_bookmarks(user=self.username, **filters)
else:
return self._rdd.get_bookmarks_by_user(self.username, **filters)
def contributions(self, **filters):
"""Returns Contributions list, filtered by User."""
if self.username == self._rdd.username:
return self._rdd.get_contributions(user=self.username, **filters)
else:
return self._rdd.get_contributions_by_user(self.username, **filters)
| mit | 8,572,337,736,564,225,000 | -3,402,804,595,112,402,000 | 22.088608 | 140 | 0.524671 | false |
charlesccychen/incubator-beam | sdks/python/apache_beam/examples/complete/autocomplete_test.py | 5 | 2520 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the autocomplete example."""
from __future__ import absolute_import
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.examples.complete import autocomplete
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class AutocompleteTest(unittest.TestCase):
WORDS = ['this', 'this', 'that', 'to', 'to', 'to']
KINGLEAR_HASH_SUM = 3104188901048578415956
KINGLEAR_INPUT = 'gs://dataflow-samples/shakespeare/kinglear.txt'
def test_top_prefixes(self):
with TestPipeline() as p:
words = p | beam.Create(self.WORDS)
result = words | autocomplete.TopPerPrefix(5)
# values must be hashable for now
result = result | beam.Map(lambda k_vs: (k_vs[0], tuple(k_vs[1])))
assert_that(result, equal_to(
[
('t', ((3, 'to'), (2, 'this'), (1, 'that'))),
('to', ((3, 'to'), )),
('th', ((2, 'this'), (1, 'that'))),
('thi', ((2, 'this'), )),
('this', ((2, 'this'), )),
('tha', ((1, 'that'), )),
('that', ((1, 'that'), )),
]))
@attr('IT')
def test_autocomplete_it(self):
with TestPipeline(is_integration_test=True) as p:
words = p | beam.io.ReadFromText(self.KINGLEAR_INPUT)
result = words | autocomplete.TopPerPrefix(10)
# values must be hashable for now
result = result | beam.Map(lambda k_vs: (k_vs[0], tuple(k_vs[1])))
checksum = result | beam.Map(hash) | beam.CombineGlobally(sum)
assert_that(checksum, equal_to([self.KINGLEAR_HASH_SUM]))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,061,464,007,232,673,000 | 3,665,487,041,497,285,600 | 35.521739 | 74 | 0.653175 | false |
Wikidata/StrepHit | tests/test_classification.py | 1 | 4013 | # -*- encoding: utf-8 -*-
import unittest
from treetaggerwrapper import Tag
from strephit.classification import feature_extractors
class TestFactExtractorFeatureExtractor(unittest.TestCase):
def setUp(self):
self.gazetteer = {
'sentence': ['feature1', 'feature2']
}
self.sentences_data = [
{
'sentence': u'This is the first sentence',
'fes': {
'Subject': u'this',
'Missing': u'this is not',
'Object': u'first sentence',
},
},
{
'sentence': u'This is the second sentence',
'fes': {},
}
]
def test_sorted_set(self):
s = feature_extractors.SortedSet()
for i in xrange(5):
index = s.put(i)
self.assertEqual(index, i)
for i in xrange(5):
index = s.index(i)
self.assertEqual(index, i)
def test_sentence_to_tokens(self):
extractor = feature_extractors.FactExtractorFeatureExtractor('en')
tokens = extractor.sentence_to_tokens(**self.sentences_data[0])
self.assertEqual(tokens, [[u'this', u'DT', u'this', u'Subject'],
Tag(word=u'is', pos=u'VBZ', lemma=u'be'),
Tag(word=u'the', pos=u'DT', lemma=u'the'),
[u'first sentence', 'ENT', u'first sentence', u'Object']])
def test_feature_for(self):
extractor = feature_extractors.FactExtractorFeatureExtractor('en')
self.assertEqual(extractor.feature_for('word1', 'pos', 3, True), 1)
self.assertEqual(extractor.feature_for('word2', 'lemma', -2, True), 2)
self.assertEqual(extractor.feature_for('WoRd1', 'POs', 3, True), 1)
def test_extract_features_no_window(self):
extractor = feature_extractors.FactExtractorFeatureExtractor('en', 0)
_, f1 = extractor.extract_features(add_unknown=True, gazetteer=self.gazetteer,
**self.sentences_data[0])
_, f2 = extractor.extract_features(add_unknown=True, gazetteer=self.gazetteer,
**self.sentences_data[1])
self.assertEqual(f1[0][0], f2[0][0])
self.assertEqual(f1[1][0], f2[1][0])
self.assertEqual(f1[2][0], f2[2][0])
def test_extract_features_window(self):
window = 2
extractor = feature_extractors.FactExtractorFeatureExtractor('en', window)
_, feat = extractor.extract_features(add_unknown=True, gazetteer=self.gazetteer,
**self.sentences_data[1])
self.assertEqual(len(feat[2][0]), 3 * (2 * window + 1) + 2)
def test_feature_labels(self):
extractor = feature_extractors.FactExtractorFeatureExtractor('en')
_, tokens = extractor.extract_features(add_unknown=True, gazetteer=self.gazetteer,
**self.sentences_data[0])
self.assertEqual(tokens[0][1], 0)
self.assertEqual(tokens[1][1], 1)
self.assertEqual(tokens[2][1], 1)
self.assertEqual(tokens[3][1], 2)
def test_get_training_set(self):
extractor = feature_extractors.FactExtractorFeatureExtractor('en')
extractor.process_sentence(add_unknown=True, gazetteer=self.gazetteer,
**self.sentences_data[0])
extractor.process_sentence(add_unknown=True, gazetteer=self.gazetteer,
**self.sentences_data[1])
x, y = extractor.get_features()
self.assertEqual(x.shape, (9, 70))
self.assertEqual(list(y), [0, 1, 1, 2, 1, 1, 1, 1, 1])
def test_unknown_token(self):
extractor = feature_extractors.FactExtractorFeatureExtractor('en')
self.assertEqual(extractor.feature_for('a', 'b', 12, add_unknown=False),
extractor.unk_index)
| gpl-3.0 | -3,222,673,886,667,743,700 | -671,493,567,643,517,700 | 40.802083 | 92 | 0.555943 | false |
IPVL/swift-kilo | test/unit/common/middleware/test_bulk.py | 14 | 37987 | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import unittest
import os
import tarfile
import urllib
import zlib
import mock
from shutil import rmtree
from tempfile import mkdtemp
from StringIO import StringIO
from eventlet import sleep
from mock import patch, call
from swift.common import utils, constraints
from swift.common.middleware import bulk
from swift.common.swob import Request, Response, HTTPException
from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED
class FakeApp(object):
def __init__(self):
self.calls = 0
self.delete_paths = []
self.max_pathlen = 100
self.del_cont_total_calls = 2
self.del_cont_cur_call = 0
def __call__(self, env, start_response):
self.calls += 1
if env['PATH_INFO'].startswith('/unauth/'):
if env['PATH_INFO'].endswith('/c/f_ok'):
return Response(status='204 No Content')(env, start_response)
return Response(status=401)(env, start_response)
if env['PATH_INFO'].startswith('/create_cont/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='404 Not Found')(env, start_response)
return Response(status='201 Created')(env, start_response)
if env['PATH_INFO'].startswith('/create_cont_fail/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='403 Forbidden')(env, start_response)
return Response(status='404 Not Found')(env, start_response)
if env['PATH_INFO'].startswith('/create_obj_unauth/'):
if env['PATH_INFO'].endswith('/cont'):
return Response(status='201 Created')(env, start_response)
return Response(status=401)(env, start_response)
if env['PATH_INFO'].startswith('/tar_works/'):
if len(env['PATH_INFO']) > self.max_pathlen:
return Response(status='400 Bad Request')(env, start_response)
return Response(status='201 Created')(env, start_response)
if env['PATH_INFO'].startswith('/tar_works_cont_head_fail/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='404 Not Found')(env, start_response)
if len(env['PATH_INFO']) > 100:
return Response(status='400 Bad Request')(env, start_response)
return Response(status='201 Created')(env, start_response)
if (env['PATH_INFO'].startswith('/delete_works/')
and env['REQUEST_METHOD'] == 'DELETE'):
self.delete_paths.append(env['PATH_INFO'])
if len(env['PATH_INFO']) > self.max_pathlen:
return Response(status='400 Bad Request')(env, start_response)
if env['PATH_INFO'].endswith('404'):
return Response(status='404 Not Found')(env, start_response)
if env['PATH_INFO'].endswith('badutf8'):
return Response(
status='412 Precondition Failed')(env, start_response)
return Response(status='204 No Content')(env, start_response)
if env['PATH_INFO'].startswith('/delete_cont_fail/'):
return Response(status='409 Conflict')(env, start_response)
if env['PATH_INFO'].startswith('/broke/'):
return Response(status='500 Internal Error')(env, start_response)
if env['PATH_INFO'].startswith('/delete_cont_success_after_attempts/'):
if self.del_cont_cur_call < self.del_cont_total_calls:
self.del_cont_cur_call += 1
return Response(status='409 Conflict')(env, start_response)
else:
return Response(status='204 No Content')(env, start_response)
def build_dir_tree(start_path, tree_obj):
if isinstance(tree_obj, list):
for obj in tree_obj:
build_dir_tree(start_path, obj)
if isinstance(tree_obj, dict):
for dir_name, obj in tree_obj.iteritems():
dir_path = os.path.join(start_path, dir_name)
os.mkdir(dir_path)
build_dir_tree(dir_path, obj)
if isinstance(tree_obj, unicode):
tree_obj = tree_obj.encode('utf8')
if isinstance(tree_obj, str):
obj_path = os.path.join(start_path, tree_obj)
with open(obj_path, 'w+') as tree_file:
tree_file.write('testing')
def build_tar_tree(tar, start_path, tree_obj, base_path=''):
if isinstance(tree_obj, list):
for obj in tree_obj:
build_tar_tree(tar, start_path, obj, base_path=base_path)
if isinstance(tree_obj, dict):
for dir_name, obj in tree_obj.iteritems():
dir_path = os.path.join(start_path, dir_name)
tar_info = tarfile.TarInfo(dir_path[len(base_path):])
tar_info.type = tarfile.DIRTYPE
tar.addfile(tar_info)
build_tar_tree(tar, dir_path, obj, base_path=base_path)
if isinstance(tree_obj, unicode):
tree_obj = tree_obj.encode('utf8')
if isinstance(tree_obj, str):
obj_path = os.path.join(start_path, tree_obj)
tar_info = tarfile.TarInfo('./' + obj_path[len(base_path):])
tar.addfile(tar_info)
class TestUntar(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.bulk = bulk.filter_factory({})(self.app)
self.testdir = mkdtemp(suffix='tmp_test_bulk')
def tearDown(self):
self.app.calls = 0
rmtree(self.testdir, ignore_errors=1)
def handle_extract_and_iter(self, req, compress_format,
out_content_type='application/json'):
resp_body = ''.join(
self.bulk.handle_extract_iter(req, compress_format,
out_content_type=out_content_type))
return resp_body
def test_create_container_for_path(self):
req = Request.blank('/')
self.assertEquals(
self.bulk.create_container(req, '/create_cont/acc/cont'),
True)
self.assertEquals(self.app.calls, 2)
self.assertRaises(
bulk.CreateContainerError,
self.bulk.create_container,
req, '/create_cont_fail/acc/cont')
self.assertEquals(self.app.calls, 3)
def test_extract_tar_works(self):
# On systems where $TMPDIR is long (like OS X), we need to do this
# or else every upload will fail due to the path being too long.
self.app.max_pathlen += len(self.testdir)
for compress_format in ['', 'gz', 'bz2']:
base_name = 'base_works_%s' % compress_format
dir_tree = [
{base_name: [{'sub_dir1': ['sub1_file1', 'sub1_file2']},
{'sub_dir2': ['sub2_file1', u'test obj \u2661']},
'sub_file1',
{'sub_dir3': [{'sub4_dir1': '../sub4 file1'}]},
{'sub_dir4': None},
]}]
build_dir_tree(self.testdir, dir_tree)
mode = 'w'
extension = ''
if compress_format:
mode += ':' + compress_format
extension += '.' + compress_format
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar' + extension),
mode=mode)
tar.add(os.path.join(self.testdir, base_name))
tar.close()
req = Request.blank('/tar_works/acc/cont/')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, compress_format)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 6)
# test out xml
req = Request.blank('/tar_works/acc/cont/')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(
req, compress_format, 'application/xml')
self.assert_('<response_status>201 Created</response_status>' in
resp_body)
self.assert_('<number_files_created>6</number_files_created>' in
resp_body)
# test out nonexistent format
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar',
headers={'Accept': 'good_xml'})
req.environ['REQUEST_METHOD'] = 'PUT'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension))
req.headers['transfer-encoding'] = 'chunked'
def fake_start_response(*args, **kwargs):
pass
app_iter = self.bulk(req.environ, fake_start_response)
resp_body = ''.join([i for i in app_iter])
self.assert_('Response Status: 406' in resp_body)
def test_extract_call(self):
base_name = 'base_works_gz'
dir_tree = [
{base_name: [{'sub_dir1': ['sub1_file1', 'sub1_file2']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'sub_file1',
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
build_dir_tree(self.testdir, dir_tree)
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar.gz'),
mode='w:gz')
tar.add(os.path.join(self.testdir, base_name))
tar.close()
def fake_start_response(*args, **kwargs):
pass
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar.gz')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'))
self.bulk(req.environ, fake_start_response)
self.assertEquals(self.app.calls, 1)
self.app.calls = 0
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'))
req.headers['transfer-encoding'] = 'Chunked'
req.method = 'PUT'
app_iter = self.bulk(req.environ, fake_start_response)
list(app_iter) # iter over resp
self.assertEquals(self.app.calls, 7)
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/?extract-archive=bad')
req.method = 'PUT'
req.headers['transfer-encoding'] = 'Chunked'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'))
t = self.bulk(req.environ, fake_start_response)
self.assertEquals(t[0], "Unsupported archive format")
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar'),
mode='w')
tar.add(os.path.join(self.testdir, base_name))
tar.close()
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar')
req.method = 'PUT'
req.headers['transfer-encoding'] = 'Chunked'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar'))
app_iter = self.bulk(req.environ, fake_start_response)
list(app_iter) # iter over resp
self.assertEquals(self.app.calls, 7)
def test_bad_container(self):
req = Request.blank('/invalid/', body='')
resp_body = self.handle_extract_and_iter(req, '')
self.assertTrue('404 Not Found' in resp_body)
def test_content_length_required(self):
req = Request.blank('/create_cont_fail/acc/cont')
resp_body = self.handle_extract_and_iter(req, '')
self.assertTrue('411 Length Required' in resp_body)
def test_bad_tar(self):
req = Request.blank('/create_cont_fail/acc/cont', body='')
def bad_open(*args, **kwargs):
raise zlib.error('bad tar')
with patch.object(tarfile, 'open', bad_open):
resp_body = self.handle_extract_and_iter(req, '')
self.assertTrue('400 Bad Request' in resp_body)
def build_tar(self, dir_tree=None):
if not dir_tree:
dir_tree = [
{'base_fails1': [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'f' * 101,
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
tar = tarfile.open(name=os.path.join(self.testdir, 'tar_fails.tar'),
mode='w')
build_tar_tree(tar, self.testdir, dir_tree,
base_path=self.testdir + '/')
tar.close()
return tar
def test_extract_tar_with_basefile(self):
dir_tree = [
'base_lvl_file', 'another_base_file',
{'base_fails1': [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
self.build_tar(dir_tree)
req = Request.blank('/tar_works/acc/')
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 4)
def test_extract_tar_fail_cont_401(self):
self.build_tar()
req = Request.blank('/unauth/acc/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 1)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '401 Unauthorized')
self.assertEquals(resp_data['Errors'], [])
def test_extract_tar_fail_obj_401(self):
self.build_tar()
req = Request.blank('/create_obj_unauth/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '401 Unauthorized')
self.assertEquals(
resp_data['Errors'],
[['cont/base_fails1/sub_dir1/sub1_file1', '401 Unauthorized']])
def test_extract_tar_fail_obj_name_len(self):
self.build_tar()
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 6)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 4)
self.assertEquals(
resp_data['Errors'],
[['cont/base_fails1/' + ('f' * 101), '400 Bad Request']])
def test_extract_tar_fail_compress_type(self):
self.build_tar()
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, 'gz')
self.assertEquals(self.app.calls, 0)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(
resp_data['Response Body'].lower(),
'invalid tar file: not a gzip file')
def test_extract_tar_fail_max_failed_extractions(self):
self.build_tar()
with patch.object(self.bulk, 'max_failed_extractions', 1):
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 5)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 3)
self.assertEquals(
resp_data['Errors'],
[['cont/base_fails1/' + ('f' * 101), '400 Bad Request']])
@patch.object(constraints, 'MAX_FILE_SIZE', 4)
def test_extract_tar_fail_max_file_size(self):
tar = self.build_tar()
dir_tree = [{'test': [{'sub_dir1': ['sub1_file1']}]}]
build_dir_tree(self.testdir, dir_tree)
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar'),
mode='w')
tar.add(os.path.join(self.testdir, 'test'))
tar.close()
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(
resp_data['Errors'],
[['cont' + self.testdir + '/test/sub_dir1/sub1_file1',
'413 Request Entity Too Large']])
def test_extract_tar_fail_max_cont(self):
dir_tree = [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'f' * 101,
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]
self.build_tar(dir_tree)
with patch.object(self.bulk, 'max_containers', 1):
self.app.calls = 0
body = open(os.path.join(self.testdir, 'tar_fails.tar')).read()
req = Request.blank('/tar_works_cont_head_fail/acc/', body=body,
headers={'Accept': 'application/json'})
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 5)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(
resp_data['Response Body'],
'More than 1 containers to create from tar.')
def test_extract_tar_fail_create_cont(self):
dir_tree = [{'base_fails1': [
{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
{'./sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
self.build_tar(dir_tree)
req = Request.blank('/create_cont_fail/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(self.app.calls, 5)
self.assertEquals(len(resp_data['Errors']), 5)
def test_extract_tar_fail_create_cont_value_err(self):
self.build_tar()
req = Request.blank('/create_cont_fail/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
def bad_create(req, path):
raise ValueError('Test')
with patch.object(self.bulk, 'create_container', bad_create):
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(self.app.calls, 0)
self.assertEquals(len(resp_data['Errors']), 5)
self.assertEquals(
resp_data['Errors'][0],
['cont/base_fails1/sub_dir1/sub1_file1', '400 Bad Request'])
def test_extract_tar_fail_unicode(self):
dir_tree = [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2\xdefile1', 'sub2_file2']},
{'sub_\xdedir3': [{'sub4_dir1': 'sub4_file1'}]}]
self.build_tar(dir_tree)
req = Request.blank('/tar_works/acc/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(self.app.calls, 4)
self.assertEquals(resp_data['Number Files Created'], 2)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(
resp_data['Errors'],
[['sub_dir2/sub2%DEfile1', '412 Precondition Failed'],
['sub_%DEdir3/sub4_dir1/sub4_file1', '412 Precondition Failed']])
def test_get_response_body(self):
txt_body = bulk.get_response_body(
'bad_formay', {'hey': 'there'}, [['json > xml', '202 Accepted']])
self.assert_('hey: there' in txt_body)
xml_body = bulk.get_response_body(
'text/xml', {'hey': 'there'}, [['json > xml', '202 Accepted']])
self.assert_('>' in xml_body)
class TestDelete(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.bulk = bulk.filter_factory({})(self.app)
def tearDown(self):
self.app.calls = 0
self.app.delete_paths = []
def handle_delete_and_iter(self, req, out_content_type='application/json'):
resp_body = ''.join(self.bulk.handle_delete_iter(
req, out_content_type=out_content_type))
return resp_body
def test_bulk_delete_uses_predefined_object_errors(self):
req = Request.blank('/delete_works/AUTH_Acc')
objs_to_delete = [
{'name': '/c/file_a'},
{'name': '/c/file_b', 'error': {'code': HTTP_NOT_FOUND,
'message': 'not found'}},
{'name': '/c/file_c', 'error': {'code': HTTP_UNAUTHORIZED,
'message': 'unauthorized'}},
{'name': '/c/file_d'}]
resp_body = ''.join(self.bulk.handle_delete_iter(
req, objs_to_delete=objs_to_delete,
out_content_type='application/json'))
self.assertEquals(
self.app.delete_paths, ['/delete_works/AUTH_Acc/c/file_a',
'/delete_works/AUTH_Acc/c/file_d'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(resp_data['Number Deleted'], 2)
self.assertEquals(resp_data['Number Not Found'], 1)
self.assertEquals(resp_data['Errors'],
[['/c/file_c', 'unauthorized']])
def test_bulk_delete_works_with_POST_verb(self):
req = Request.blank('/delete_works/AUTH_Acc', body='/c/f\n/c/f404',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/f', '/delete_works/AUTH_Acc/c/f404'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(resp_data['Number Not Found'], 1)
def test_bulk_delete_works_with_DELETE_verb(self):
req = Request.blank('/delete_works/AUTH_Acc', body='/c/f\n/c/f404',
headers={'Accept': 'application/json'})
req.method = 'DELETE'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/f', '/delete_works/AUTH_Acc/c/f404'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(resp_data['Number Not Found'], 1)
def test_bulk_delete_bad_content_type(self):
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'badformat'})
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'application/json',
'Content-Type': 'text/xml'})
req.method = 'POST'
req.environ['wsgi.input'] = StringIO('/c/f\n/c/f404')
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '406 Not Acceptable')
def test_bulk_delete_call_and_content_type(self):
def fake_start_response(*args, **kwargs):
self.assertEquals(args[1][0], ('Content-Type', 'application/json'))
req = Request.blank('/delete_works/AUTH_Acc?bulk-delete')
req.method = 'POST'
req.headers['Transfer-Encoding'] = 'chunked'
req.headers['Accept'] = 'application/json'
req.environ['wsgi.input'] = StringIO('/c/f%20')
list(self.bulk(req.environ, fake_start_response)) # iterate over resp
self.assertEquals(
self.app.delete_paths, ['/delete_works/AUTH_Acc/c/f '])
self.assertEquals(self.app.calls, 1)
def test_bulk_delete_get_objs(self):
req = Request.blank('/delete_works/AUTH_Acc', body='1%20\r\n2\r\n')
req.method = 'POST'
with patch.object(self.bulk, 'max_deletes_per_request', 2):
results = self.bulk.get_objs_to_delete(req)
self.assertEquals(results, [{'name': '1 '}, {'name': '2'}])
with patch.object(self.bulk, 'max_path_length', 2):
results = []
req.environ['wsgi.input'] = StringIO('1\n2\n3')
results = self.bulk.get_objs_to_delete(req)
self.assertEquals(results,
[{'name': '1'}, {'name': '2'}, {'name': '3'}])
with patch.object(self.bulk, 'max_deletes_per_request', 9):
with patch.object(self.bulk, 'max_path_length', 1):
req_body = '\n'.join([str(i) for i in xrange(10)])
req = Request.blank('/delete_works/AUTH_Acc', body=req_body)
self.assertRaises(
HTTPException, self.bulk.get_objs_to_delete, req)
def test_bulk_delete_works_extra_newlines_extra_quoting(self):
req = Request.blank('/delete_works/AUTH_Acc',
body='/c/f\n\n\n/c/f404\n\n\n/c/%2525',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/f',
'/delete_works/AUTH_Acc/c/f404',
'/delete_works/AUTH_Acc/c/%25'])
self.assertEquals(self.app.calls, 3)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 2)
self.assertEquals(resp_data['Number Not Found'], 1)
def test_bulk_delete_too_many_newlines(self):
req = Request.blank('/delete_works/AUTH_Acc')
req.method = 'POST'
data = '\n\n' * self.bulk.max_deletes_per_request
req.environ['wsgi.input'] = StringIO(data)
req.content_length = len(data)
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('413 Request Entity Too Large' in resp_body)
def test_bulk_delete_works_unicode(self):
body = (u'/c/ obj \u2661\r\n'.encode('utf8') +
'c/ objbadutf8\r\n' +
'/c/f\xdebadutf8\n')
req = Request.blank('/delete_works/AUTH_Acc', body=body,
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/ obj \xe2\x99\xa1',
'/delete_works/AUTH_Acc/c/ objbadutf8'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(len(resp_data['Errors']), 2)
self.assertEquals(
resp_data['Errors'],
[[urllib.quote('c/ objbadutf8'), '412 Precondition Failed'],
[urllib.quote('/c/f\xdebadutf8'), '412 Precondition Failed']])
def test_bulk_delete_no_body(self):
req = Request.blank('/unauth/AUTH_acc/')
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('411 Length Required' in resp_body)
def test_bulk_delete_no_files_in_body(self):
req = Request.blank('/unauth/AUTH_acc/', body=' ')
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('400 Bad Request' in resp_body)
def test_bulk_delete_unauth(self):
req = Request.blank('/unauth/AUTH_acc/', body='/c/f\n/c/f_ok\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Errors'], [['/c/f', '401 Unauthorized']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(resp_data['Number Deleted'], 1)
def test_bulk_delete_500_resp(self):
req = Request.blank('/broke/AUTH_acc/', body='/c/f\nc/f2\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(
resp_data['Errors'],
[['/c/f', '500 Internal Error'], ['c/f2', '500 Internal Error']])
self.assertEquals(resp_data['Response Status'], '502 Bad Gateway')
def test_bulk_delete_bad_path(self):
req = Request.blank('/delete_cont_fail/')
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('404 Not Found' in resp_body)
def test_bulk_delete_container_delete(self):
req = Request.blank('/delete_cont_fail/AUTH_Acc', body='c\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 0)
self.assertEquals(resp_data['Errors'], [['c', '409 Conflict']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals([], mock_sleep.call_args_list)
def test_bulk_delete_container_delete_retry_and_fails(self):
self.bulk.retry_count = 3
req = Request.blank('/delete_cont_fail/AUTH_Acc', body='c\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 0)
self.assertEquals(resp_data['Errors'], [['c', '409 Conflict']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals([call(self.bulk.retry_interval),
call(self.bulk.retry_interval ** 2),
call(self.bulk.retry_interval ** 3)],
mock_sleep.call_args_list)
def test_bulk_delete_container_delete_retry_and_success(self):
self.bulk.retry_count = 3
self.app.del_container_total = 2
req = Request.blank('/delete_cont_success_after_attempts/AUTH_Acc',
body='c\n', headers={'Accept': 'application/json'})
req.method = 'DELETE'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(resp_data['Errors'], [])
self.assertEquals(resp_data['Response Status'], '200 OK')
self.assertEquals([call(self.bulk.retry_interval),
call(self.bulk.retry_interval ** 2)],
mock_sleep.call_args_list)
def test_bulk_delete_bad_file_too_long(self):
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'application/json'})
req.method = 'POST'
bad_file = 'c/' + ('1' * self.bulk.max_path_length)
data = '/c/f\n' + bad_file + '\n/c/f'
req.environ['wsgi.input'] = StringIO(data)
req.headers['Transfer-Encoding'] = 'chunked'
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 2)
self.assertEquals(resp_data['Errors'], [[bad_file, '400 Bad Request']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
def test_bulk_delete_bad_file_over_twice_max_length(self):
body = '/c/f\nc/' + ('123456' * self.bulk.max_path_length) + '\n'
req = Request.blank('/delete_works/AUTH_Acc', body=body)
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('400 Bad Request' in resp_body)
def test_bulk_delete_max_failures(self):
req = Request.blank('/unauth/AUTH_Acc', body='/c/f1\n/c/f2\n/c/f3',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch.object(self.bulk, 'max_failed_deletes', 2):
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(resp_data['Response Body'],
'Max delete failures exceeded')
self.assertEquals(resp_data['Errors'],
[['/c/f1', '401 Unauthorized'],
['/c/f2', '401 Unauthorized']])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
bulk.filter_factory({})
swift_info = utils.get_swift_info()
self.assertTrue('bulk_upload' in swift_info)
self.assertTrue(isinstance(
swift_info['bulk_upload'].get('max_containers_per_extraction'),
numbers.Integral))
self.assertTrue(isinstance(
swift_info['bulk_upload'].get('max_failed_extractions'),
numbers.Integral))
self.assertTrue('bulk_delete' in swift_info)
self.assertTrue(isinstance(
swift_info['bulk_delete'].get('max_deletes_per_request'),
numbers.Integral))
self.assertTrue(isinstance(
swift_info['bulk_delete'].get('max_failed_deletes'),
numbers.Integral))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,627,186,046,984,674,000 | 4,122,286,657,798,431,000 | 45.552696 | 79 | 0.556059 | false |
rspavel/spack | var/spack/repos/builtin/packages/hbase/package.py | 3 | 1402 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Hbase(Package):
"""Apache HBase is an open-source, distributed, versioned, column-oriented
store modeled after Google' Bigtable: A Distributed Storage System for
Structured Data by Chang et al. Just as Bigtable leverages the distributed
data storage provided by the Google File System, HBase provides
Bigtable-like capabilities on top of Apache Hadoop."""
homepage = "https://archive.apache.org/"
url = "https://archive.apache.org/dist/hbase/2.2.4/hbase-2.2.4-bin.tar.gz"
list_url = "https://archive.apache.org/dist/hbase"
list_depth = 1
version('2.2.5', sha256='25d08f8f038d9de5beb43dfb0392e8a8b34eae7e0f2670d6c2c172abc3855194')
version('2.2.4', sha256='ec91b628352931e22a091a206be93061b6bf5364044a28fb9e82f0023aca3ca4')
version('2.2.3', sha256='ea8fa72aa6220e038e30bd7c439d181b10bd7225383f7f2d224ebb5f5397310a')
version('2.2.2', sha256='97dcca3a031925a379a0ee6bbfb6007533fb4fdb982c23345e5fc04d6c52bebc')
version('2.1.8', sha256='d8296e8405b1c39c73f0dd03fc6b4d2af754035724168fd56e8f2a0ff175ad90')
depends_on('java@8', type='run')
def install(self, spec, prefix):
install_tree('.', prefix)
| lgpl-2.1 | -2,331,223,484,889,332,700 | -8,421,414,866,508,617,000 | 45.733333 | 96 | 0.750357 | false |
kaphka/catconv | convert.py | 1 | 1091 | import argparse
import signal
from tqdm import tqdm
import catconv.operations as co
import catconv.stabi as sb
exit = False
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
exit = True
parser = argparse.ArgumentParser()
parser.add_argument("source")
parser.add_argument("target")
parser.add_argument("-u", "--update", help="overwrite previous results",
action="store_true")
args = parser.parse_args()
source = sb.op.normpath(args.source)
target = sb.op.normpath(args.target)
data_dir, target_cat_name = sb.op.split(target)
pages = map(sb.page_from_path, sb.catalog_pages(source,ext=".tif"))
print("Source catalog:")
print("path:", source)
print("pages:", len(pages))
conversion = {"ext": ".jpg", "remove_type": True, "to_cat": data_dir,"cat": target_cat_name}
from_to = [(page, sb.convert_page_path(page, conversion)) for page in pages]
for ft in tqdm(from_to):
if exit:
break
from_page, to_page = ft
if sb.op.isfile(to_page['path']) and not args.update:
continue
else:
co.convert_to_png(*ft)
| apache-2.0 | -6,632,312,282,893,172,000 | 918,787,810,513,914,200 | 24.372093 | 92 | 0.669111 | false |
DavidNorman/tensorflow | tensorflow/python/ops/image_grad.py | 5 | 15565 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains Gradient functions for image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("ResizeNearestNeighbor")
def _ResizeNearestNeighborGrad(op, grad):
"""The derivatives for nearest neighbor resizing.
Args:
op: The ResizeNearestNeighbor op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input and the output.
"""
image = op.inputs[0]
if image.get_shape()[1:3].is_fully_defined():
image_shape = image.get_shape()[1:3]
else:
image_shape = array_ops.shape(image)[1:3]
grads = gen_image_ops.resize_nearest_neighbor_grad(
grad,
image_shape,
align_corners=op.get_attr("align_corners"),
half_pixel_centers=op.get_attr("half_pixel_centers"))
return [grads, None]
@ops.RegisterGradient("ResizeBilinear")
def _ResizeBilinearGrad(op, grad):
"""The derivatives for bilinear resizing.
Args:
op: The ResizeBilinear op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
grad0 = gen_image_ops.resize_bilinear_grad(
grad,
op.inputs[0],
align_corners=op.get_attr("align_corners"),
half_pixel_centers=op.get_attr("half_pixel_centers"))
return [grad0, None]
@ops.RegisterGradient("ScaleAndTranslate")
def _ScaleAndTranslateGrad(op, grad):
"""The derivatives for ScaleAndTranslate transformation op.
Args:
op: The ScaleAndTranslate op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
grad0 = gen_image_ops.scale_and_translate_grad(
grad,
op.inputs[0],
op.inputs[2],
op.inputs[3],
kernel_type=op.get_attr("kernel_type"),
antialias=op.get_attr("antialias"))
return [grad0, None, None, None]
@ops.RegisterGradient("ResizeBicubic")
def _ResizeBicubicGrad(op, grad):
"""The derivatives for bicubic resizing.
Args:
op: The ResizeBicubic op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
allowed_types = [dtypes.float32, dtypes.float64]
grad0 = None
if op.inputs[0].dtype in allowed_types:
grad0 = gen_image_ops.resize_bicubic_grad(
grad,
op.inputs[0],
align_corners=op.get_attr("align_corners"),
half_pixel_centers=op.get_attr("half_pixel_centers"))
return [grad0, None]
@ops.RegisterGradient("CropAndResize")
def _CropAndResizeGrad(op, grad):
"""The derivatives for crop_and_resize.
We back-propagate to the image only when the input image tensor has floating
point dtype but we always back-propagate to the input boxes tensor.
Args:
op: The CropAndResize op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input image, boxes, as well as the always-None
gradients w.r.t. box_ind and crop_size.
"""
image = op.inputs[0]
if image.get_shape().is_fully_defined():
image_shape = image.get_shape().as_list()
else:
image_shape = array_ops.shape(image)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64]
if op.inputs[0].dtype in allowed_types:
# pylint: disable=protected-access
grad0 = gen_image_ops.crop_and_resize_grad_image(
grad, op.inputs[1], op.inputs[2], image_shape, T=op.get_attr("T"),
method=op.get_attr("method"))
# pylint: enable=protected-access
else:
grad0 = None
# `grad0` is the gradient to the input image pixels and it
# has been implemented for nearest neighbor and bilinear sampling
# respectively. `grad1` is the gradient to the input crop boxes' coordinates.
# When using nearest neighbor sampling, the gradient to crop boxes'
# coordinates are not well defined. In practice, we still approximate
# grad1 using the gradient derived from bilinear sampling.
grad1 = gen_image_ops.crop_and_resize_grad_boxes(
grad, op.inputs[0], op.inputs[1], op.inputs[2])
return [grad0, grad1, None, None]
def _CustomReciprocal(x):
"""Wrapper function around `math_ops.div_no_nan()` to perform a "safe" reciprocal incase the input is zero. Avoids divide by zero and NaNs.
Input:
x -> input tensor to be reciprocat-ed.
Returns:
x_reciprocal -> reciprocal of x without NaNs.
"""
return math_ops.div_no_nan(1.0, x)
@ops.RegisterGradient("RGBToHSV")
def _RGBToHSVGrad(op, grad):
"""The gradients for `rgb_to_hsv` operation.
This function is a piecewise continuous function as defined here:
https://en.wikipedia.org/wiki/HSL_and_HSV#From_RGB
We perform the multi variate derivative and compute all partial derivates
seperately before adding them in the end. Formulas are given before each
partial derivative calculation.
Args:
op: The `rgb_to_hsv` `Operation` that we are differentiating.
grad: Gradient with respect to the output of the `rgb_to_hsv` op.
Returns:
Gradients with respect to the input of `rgb_to_hsv`.
"""
# Input Channels
reds = op.inputs[0][..., 0]
greens = op.inputs[0][..., 1]
blues = op.inputs[0][..., 2]
# Output Channels
saturation = op.outputs[0][..., 1]
value = op.outputs[0][..., 2]
# Mask/Indicator for max and min values of each pixel.
# Arbitrary assignment in case of tie breakers with R>G>B.
# Max values
red_biggest = math_ops.cast((reds >= blues) & \
(reds >= greens), dtypes.float32)
green_biggest = math_ops.cast((greens > reds) & \
(greens >= blues), dtypes.float32)
blue_biggest = math_ops.cast((blues > reds) & \
(blues > greens), dtypes.float32)
# Min values
red_smallest = math_ops.cast((reds < blues) & \
(reds < greens), dtypes.float32)
green_smallest = math_ops.cast((greens <= reds) & \
(greens < blues), dtypes.float32)
blue_smallest = math_ops.cast((blues <= reds) & \
(blues <= greens), dtypes.float32)
# Derivatives of R, G, B wrt Value slice
dv_dr = red_biggest
dv_dg = green_biggest
dv_db = blue_biggest
# Derivatives of R, G, B wrt Saturation slice
# The first term in the addition is the case when the corresponding color
# from (r,g,b) was "MAX"
# -> derivative = MIN/square(MAX), MIN could be one of the other two colors
# The second term is the case when the corresponding color from
# (r,g,b) was "MIN"
# -> derivative = -1/MAX, MAX could be one of the other two colours.
ds_dr = math_ops.cast(reds > 0, dtypes.float32) * \
math_ops.add(red_biggest * \
math_ops.add(green_smallest * greens, blue_smallest * blues) * \
_CustomReciprocal(math_ops.square(reds)),\
red_smallest * -1 * _CustomReciprocal((green_biggest * \
greens) + (blue_biggest * blues)))
ds_dg = math_ops.cast(greens > 0, dtypes.float32) * \
math_ops.add(green_biggest * \
math_ops.add(red_smallest * reds, blue_smallest * blues) * \
_CustomReciprocal(math_ops.square(greens)),\
green_smallest * -1 * _CustomReciprocal((red_biggest * \
reds) + (blue_biggest * blues)))
ds_db = math_ops.cast(blues > 0, dtypes.float32) * \
math_ops.add(blue_biggest * \
math_ops.add(green_smallest * greens, red_smallest * reds) * \
_CustomReciprocal(math_ops.square(blues)),\
blue_smallest * -1 * _CustomReciprocal((green_biggest * \
greens) + (red_biggest * reds)))
# Derivatives of R, G, B wrt Hue slice
# Need to go case by case for each color.
# for red, dh_dr -> dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5
# dh_dr_1 ->
# if red was MAX, then derivative = 60 * -1 * (G-B)/square(MAX-MIN) == 60 *\
# -1 * (greens-blues) * reciprocal(square(saturation)) * \
# reciprical(square(value))
# elif green was MAX, there are two subcases
# ie when red was MIN and when red was NOT MIN
# dh_dr_2 ->
# if red was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) +\
# (B-R)*(-1/square(MAX-MIN) * -1)) == 60 * (blues - greens) *\
# reciprocal(square(reds - greens))
# dh_dr_3 ->
# if red was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(greens-blues)
# elif blue was MAX, there are two subcases
# dh_dr_4 ->
# if red was MIN (similarly use the UV rule) -> 60 * (blues - greens) *\
# reciprocal(square(blues - reds))
# dh_dr_5 ->
# if red was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(blues-greens)
dh_dr_1 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
-1 * \
(greens - blues) * \
_CustomReciprocal(math_ops.square(saturation)) *\
_CustomReciprocal(math_ops.square(value)))
dh_dr_2 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
red_smallest * (blues - greens) * \
_CustomReciprocal(math_ops.square(reds - greens)))
dh_dr_3 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
blue_smallest * -1 * _CustomReciprocal(greens - blues))
dh_dr_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
red_smallest * (blues - greens) * \
_CustomReciprocal(math_ops.square(blues - reds)))
dh_dr_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
green_smallest * _CustomReciprocal(blues - greens))
dh_dr = dh_dr_1 + dh_dr_2 + dh_dr_3 + dh_dr_4 + dh_dr_5
# Converting from degrees to [0,1] scale as specified in
# https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv
dh_dr = dh_dr / 360
# for green, dh_dg -> dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5
# dh_dg_1 ->
# if green was MAX, then derivative = 60 * -1 * (B-R)/square(MAX-MIN) == 60 *\
# -1 * (blues - reds) * reciprocal(square(saturation)) * \
# reciprocal(square(value))
# elif red was MAX, there are two subcases ie
# when green was MIN and when green was NOT MIN
# dh_dg_2 ->
# if green was MIN (use UV rule) -> 60 * ((1 * 1/(MAX-MIN)) + \
# (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * \
# ((reciprocal(reds-greens) + (greens-blues) * \
# reciprocal(square(reds-greens))))
# dh_dg_3 ->
# if green was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(reds - blues)
# elif blue was MAX, there are two subcases
# dh_dg_4 ->
# if green was MIN (similarly use the UV rule) -> 60 * -1 * \
# (reciprocal(blues - greens) + (reds-greens)* -1 * \
# reciprocal(square(blues-greens)))
# dh_dr_5 ->
# if green was NOT MIN -> 60 * -1/MAX-MIN == -60 * reciprocal(blues - reds)
dh_dg_1 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
-1 * (blues - reds) * \
_CustomReciprocal(math_ops.square(saturation))\
* _CustomReciprocal(math_ops.square(value)))
dh_dg_2 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
green_smallest * (reds - blues) * \
_CustomReciprocal(math_ops.square(reds - greens)))
dh_dg_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
blue_smallest * _CustomReciprocal(reds - blues))
dh_dg_4 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
green_smallest * (reds - blues) * \
_CustomReciprocal(math_ops.square(blues - greens)))
dh_dg_5 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
red_smallest * -1 * _CustomReciprocal(blues - reds))
dh_dg = dh_dg_1 + dh_dg_2 + dh_dg_3 + dh_dg_4 + dh_dg_5
# Converting from degrees to [0,1] scale as specified in
# https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv
dh_dg = dh_dg / 360
# for blue, dh_db -> dh_db_1 + dh_db_2 + dh_db_3 + dh_db_4 + dh_db_5
# dh_db_1 ->
# if blue was MAX, then derivative = 60 * -1 * (R-G)/square(MAX-MIN) == 60 *\
# -1 * reciprocal(square(saturation)) * reciprocal(square(value))
# elif red was MAX, there are two subcases
# ie when blue was MIN and when blue was NOT MIN
# dh_dg_2 ->
# if blue was MIN (use UV rule) -> 60 * ((1 * -1/(MAX-MIN)) + \
# (greens-blues) * (-1/square(MAX-MIN) * -1)) == 60 * (greens - reds) *\
# reciprocal(square(reds - blues))
# dh_dg_3 ->
# if blue was NOT MIN -> 60 * -1/MAX-MIN == 60 * -1 * \
# reciprocal(reds - greens)
# elif green was MAX, there are two subcases
# dh_dg_4 ->
# if blue was MIN (similarly use the UV rule) -> 60 * -1 * \
# (reciprocal(greens - blues) + (blues - reds) * -1 * \
# reciprocal(square(greens - blues)))
# dh_dr_5 ->
# if blue was NOT MIN -> 60 * 1/MAX-MIN == 60 * reciprocal(greens - reds)
dh_db_1 = 60 * (math_ops.cast(blues > 0, dtypes.float32) * blue_biggest * \
-1 * \
(reds - greens) * \
_CustomReciprocal(math_ops.square(saturation)) * \
_CustomReciprocal(math_ops.square(value)))
dh_db_2 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest *\
blue_smallest * (greens - reds) * \
_CustomReciprocal(math_ops.square(reds - blues)))
dh_db_3 = 60 * (math_ops.cast(reds > 0, dtypes.float32) * red_biggest * \
green_smallest * -1 * _CustomReciprocal(reds - greens))
dh_db_4 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
blue_smallest * (greens - reds) * \
_CustomReciprocal(math_ops.square(greens - blues)))
dh_db_5 = 60 * (math_ops.cast(greens > 0, dtypes.float32) * green_biggest * \
red_smallest * _CustomReciprocal(greens - reds))
dh_db = dh_db_1 + dh_db_2 + dh_db_3 + dh_db_4 + dh_db_5
# Converting from degrees to [0,1] scale as specified in
# https://www.tensorflow.org/api_docs/python/tf/image/rgb_to_hsv
dh_db = dh_db / 360
# Gradients wrt to inputs
dv_drgb = array_ops.stack(
[grad[..., 2] * dv_dr, grad[..., 2] * dv_dg, grad[..., 2] * dv_db],
axis=-1)
ds_drgb = array_ops.stack(
[grad[..., 1] * ds_dr, grad[..., 1] * ds_dg, grad[..., 1] * ds_db],
axis=-1)
dh_drgb = array_ops.stack(
[grad[..., 0] * dh_dr, grad[..., 0] * dh_dg, grad[..., 0] * dh_db],
axis=-1)
gradient_input = math_ops.add(math_ops.add(dv_drgb, ds_drgb), dh_drgb)
return gradient_input
| apache-2.0 | 5,357,454,771,591,837,000 | 6,398,821,510,417,341,000 | 39.853018 | 141 | 0.613428 | false |
Bysmyyr/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py | 39 | 13796 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.layout_tests.controllers import repaint_overlay
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
def write_test_result(filesystem, port, results_directory, test_name, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
root_output_dir = results_directory
writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
if driver_output.error:
writer.write_stderr(driver_output.error)
for failure in failures:
# FIXME: Instead of this long 'if' block, each failure class might
# have a responsibility for writing a test result.
if isinstance(failure, (test_failures.FailureMissingResult,
test_failures.FailureTextMismatch,
test_failures.FailureTestHarnessAssertion)):
writer.write_text_files(driver_output.text, expected_driver_output.text)
writer.create_text_diff_and_write_result(driver_output.text, expected_driver_output.text)
elif isinstance(failure, test_failures.FailureMissingImage):
writer.write_image_files(driver_output.image, expected_image=None)
elif isinstance(failure, test_failures.FailureMissingImageHash):
writer.write_image_files(driver_output.image, expected_driver_output.image)
elif isinstance(failure, test_failures.FailureImageHashMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
writer.write_image_diff_files(driver_output.image_diff)
elif isinstance(failure, (test_failures.FailureAudioMismatch,
test_failures.FailureMissingAudio)):
writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
elif isinstance(failure, test_failures.FailureCrash):
crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
writer.write_crash_log(crashed_driver_output.crash_log)
elif isinstance(failure, test_failures.FailureLeak):
writer.write_leak_log(driver_output.leak_log)
elif isinstance(failure, test_failures.FailureReftestMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
# FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
# FIXME: We should always have 2 images here.
if driver_output.image and expected_driver_output.image:
diff_image, err_str = port.diff_image(expected_driver_output.image, driver_output.image)
if diff_image:
writer.write_image_diff_files(diff_image)
else:
_log.warn('ref test mismatch did not produce an image diff.')
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
writer.write_image_files(driver_output.image, expected_image=None)
if filesystem.exists(failure.reference_filename):
writer.write_reftest(failure.reference_filename)
else:
_log.warn("reference %s was not found" % failure.reference_filename)
else:
assert isinstance(failure, (test_failures.FailureTimeout, test_failures.FailureReftestNoImagesGenerated))
if expected_driver_output is not None:
writer.create_repaint_overlay_result(driver_output.text, expected_driver_output.text)
class TestResultWriter(object):
"""A class which handles all writing operations to the result directory."""
# Filename pieces when writing failures to the test results directory.
FILENAME_SUFFIX_ACTUAL = "-actual"
FILENAME_SUFFIX_EXPECTED = "-expected"
FILENAME_SUFFIX_DIFF = "-diff"
FILENAME_SUFFIX_STDERR = "-stderr"
FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
FILENAME_SUFFIX_SAMPLE = "-sample"
FILENAME_SUFFIX_LEAK_LOG = "-leak-log"
FILENAME_SUFFIX_WDIFF = "-wdiff.html"
FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
FILENAME_SUFFIX_OVERLAY = "-overlay.html"
def __init__(self, filesystem, port, root_output_dir, test_name):
self._filesystem = filesystem
self._port = port
self._root_output_dir = root_output_dir
self._test_name = test_name
def _make_output_directory(self):
"""Creates the output directory (if needed) for a given test filename."""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
fs.maybe_make_directory(fs.dirname(output_filename))
def output_filename(self, modifier):
"""Returns a filename inside the output dir that contains modifier.
For example, if test name is "fast/dom/foo.html" and modifier is "-expected.txt",
the return value is "/<path-to-root-output-dir>/fast/dom/foo-expected.txt".
Args:
modifier: a string to replace the extension of filename with
Return:
The absolute path to the output filename
"""
fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
return fs.splitext(output_filename)[0] + modifier
def _write_file(self, path, contents):
if contents is not None:
self._make_output_directory()
self._filesystem.write_binary_file(path, contents)
def _output_testname(self, modifier):
fs = self._filesystem
return fs.splitext(fs.basename(self._test_name))[0] + modifier
def write_output_files(self, file_type, output, expected):
"""Writes the test output, the expected output in the results directory.
The full output filename of the actual, for example, will be
<filename>-actual<file_type>
For instance,
my_test-actual.txt
Args:
file_type: A string describing the test output file type, e.g. ".txt"
output: A string containing the test output
expected: A string containing the expected test output
"""
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
self._write_file(actual_filename, output)
self._write_file(expected_filename, expected)
def write_stderr(self, error):
filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
self._write_file(filename, error)
def write_crash_log(self, crash_log):
filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
self._write_file(filename, crash_log.encode('utf8', 'replace'))
def write_leak_log(self, leak_log):
filename = self.output_filename(self.FILENAME_SUFFIX_LEAK_LOG + ".txt")
self._write_file(filename, leak_log)
def copy_sample_file(self, sample_file):
filename = self.output_filename(self.FILENAME_SUFFIX_SAMPLE + ".txt")
self._filesystem.copyfile(sample_file, filename)
def write_text_files(self, actual_text, expected_text):
self.write_output_files(".txt", actual_text, expected_text)
def create_text_diff_and_write_result(self, actual_text, expected_text):
# FIXME: This function is actually doing the diffs as well as writing results.
# It might be better to extract code which does 'diff' and make it a separate function.
if not actual_text or not expected_text:
return
file_type = '.txt'
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
# We treat diff output as binary. Diff output may contain multiple files
# in conflicting encodings.
diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
diff_filename = self.output_filename(self.FILENAME_SUFFIX_DIFF + file_type)
self._write_file(diff_filename, diff)
# Shell out to wdiff to get colored inline diffs.
if self._port.wdiff_available():
wdiff = self._port.wdiff_text(expected_filename, actual_filename)
wdiff_filename = self.output_filename(self.FILENAME_SUFFIX_WDIFF)
self._write_file(wdiff_filename, wdiff)
# Use WebKit's PrettyPatch.rb to get an HTML diff.
if self._port.pretty_patch_available():
pretty_patch = self._port.pretty_patch_text(diff_filename)
pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)
self._write_file(pretty_patch_filename, pretty_patch)
def create_repaint_overlay_result(self, actual_text, expected_text):
html = repaint_overlay.generate_repaint_overlay_html(self._test_name, actual_text, expected_text)
if html:
overlay_filename = self.output_filename(self.FILENAME_SUFFIX_OVERLAY)
self._write_file(overlay_filename, html)
def write_audio_files(self, actual_audio, expected_audio):
self.write_output_files('.wav', actual_audio, expected_audio)
def write_image_files(self, actual_image, expected_image):
self.write_output_files('.png', actual_image, expected_image)
def write_image_diff_files(self, image_diff):
diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
self._write_file(diff_filename, image_diff)
diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
# FIXME: old-run-webkit-tests shows the diff percentage as the text contents of the "diff" link.
# FIXME: old-run-webkit-tests include a link to the test file.
html = """<!DOCTYPE HTML>
<html>
<head>
<title>%(title)s</title>
<style>.label{font-weight:bold}</style>
</head>
<body>
Difference between images: <a href="%(diff_filename)s">diff</a><br>
<div class=imageText></div>
<div class=imageContainer data-prefix="%(prefix)s">Loading...</div>
<script>
(function() {
var preloadedImageCount = 0;
function preloadComplete() {
++preloadedImageCount;
if (preloadedImageCount < 2)
return;
toggleImages();
setInterval(toggleImages, 2000)
}
function preloadImage(url) {
image = new Image();
image.addEventListener('load', preloadComplete);
image.src = url;
return image;
}
function toggleImages() {
if (text.textContent == 'Expected Image') {
text.textContent = 'Actual Image';
container.replaceChild(actualImage, container.firstChild);
} else {
text.textContent = 'Expected Image';
container.replaceChild(expectedImage, container.firstChild);
}
}
var text = document.querySelector('.imageText');
var container = document.querySelector('.imageContainer');
var actualImage = preloadImage(container.getAttribute('data-prefix') + '-actual.png');
var expectedImage = preloadImage(container.getAttribute('data-prefix') + '-expected.png');
})();
</script>
</body>
</html>
""" % {
'title': self._test_name,
'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
'prefix': self._output_testname(''),
}
self._write_file(diffs_html_filename, html)
def write_reftest(self, src_filepath):
fs = self._filesystem
dst_dir = fs.dirname(fs.join(self._root_output_dir, self._test_name))
dst_filepath = fs.join(dst_dir, fs.basename(src_filepath))
self._write_file(dst_filepath, fs.read_binary_file(src_filepath))
| bsd-3-clause | 27,593,427,123,925,630 | 247,603,097,263,918,300 | 45.92517 | 119 | 0.676355 | false |
alirizakeles/zato | code/zato-zmq/src/zato/zmq_/mdp/worker.py | 1 | 9531 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
import logging
import time
from datetime import datetime, timedelta
# ZeroMQ
import zmq.green as zmq
# Zato
from zato.zmq_.mdp import BaseZMQConnection, const, EventWorkerDisconnect, EventWorkerHeartbeat, EventReady, EventWorkerReply
# ################################################################################################################################
logger = logging.getLogger(__name__)
# ################################################################################################################################
class Worker(BaseZMQConnection):
""" Standalone implementation of a worker for ZeroMQ Majordomo Protocol 0.1 http://rfc.zeromq.org/spec:7
"""
def __init__(self, service_name, broker_address='tcp://localhost:47047', linger=0, poll_interval=100, log_details=False,
heartbeat=3, heartbeat_mult=2, reconnect_sleep=2):
self.service_name = service_name
super(Worker, self).__init__(broker_address, linger, poll_interval, log_details)
# How often, in seconds, to send a heartbeat to the broker or expect one from the broker
self.heartbeat = heartbeat
# If self.heartbeat * self.heartbeat_mult is exceeded, we assume the broker is down
self.heartbeat_mult = heartbeat_mult
# How long, in seconds, to wait before attempting to reconnect to the broker
self.reconnect_sleep = reconnect_sleep
# When did we last hear from the broker
self.broker_last_heartbeat = None
# When did we last send our own heartbeat to the broker
self.worker_last_heartbeat = None
# Timestamp of when we started to run
self.last_connected = datetime.utcnow()
self.has_debug = logger.isEnabledFor(logging.DEBUG)
# Maps event IDs to methods that handle a given one
self.handle_event_map = {
const.v01.request_to_worker: self.on_event_request_to_worker,
const.v01.heartbeat: self.on_event_heartbeat,
const.v01.disconnect: self.on_event_disconnect,
}
# ################################################################################################################################
def connect(self):
logger.info('Connecting to broker %s', self.broker_address)
# Open ZeroMQ sockets first
# From worker to broker
self.client_socket.connect(self.broker_address)
# From broker to worker
self.worker_socket = self.ctx.socket(zmq.DEALER)
self.worker_socket.linger = self.linger
self.worker_poller = zmq.Poller()
self.worker_poller.register(self.worker_socket, zmq.POLLIN)
self.worker_socket.connect(self.broker_address)
# Ok, we are ready
self.notify_ready()
# We can assume that the broker received our message
self.last_connected = datetime.utcnow()
# ################################################################################################################################
def stop(self):
self.worker_poller.unregister(self.worker_socket)
self.worker_socket.close()
self.stop_client_socket()
self.connect_client_socket()
logger.info('Stopped worker for %s', self.broker_address)
# ################################################################################################################################
def needs_reconnect(self):
base_timestamp = self.broker_last_heartbeat if self.broker_last_heartbeat else self.last_connected
return datetime.utcnow() >= base_timestamp + timedelta(seconds=self.heartbeat * self.heartbeat_mult)
# ################################################################################################################################
def reconnect(self):
last_hb = '{} (UTC)'.format(self.broker_last_heartbeat.isoformat()) if self.broker_last_heartbeat else 'never'
logger.info('Sleeping for %ss before reconnecting to broker %s, last HB from broker: %s',
self.reconnect_sleep, self.broker_address, last_hb)
time.sleep(self.reconnect_sleep)
logger.info('Reconnecting to broker %s', self.broker_address)
self.stop()
self.connect()
# Let's give the other side a moment to reply to our ready event
time.sleep(self.reconnect_sleep)
# ################################################################################################################################
def needs_hb_to_broker(self):
return datetime.utcnow() >= self.worker_last_heartbeat + timedelta(seconds=self.heartbeat)
# ################################################################################################################################
def serve_forever(self):
# To speed up look-ups
log_details = self.log_details
# Main loop
while self.keep_running:
try:
items = self.worker_poller.poll(self.poll_interval)
except KeyboardInterrupt:
self.notify_disconnect()
break
if items:
msg = self.worker_socket.recv_multipart()
if log_details:
logger.info('Received msg at %s %s', self.broker_address, msg)
self.handle(msg)
else:
if log_details:
logger.info('No items for worker at %s', self.broker_address)
if self.needs_hb_to_broker():
self.notify_heartbeat()
if self.needs_reconnect():
self.reconnect()
# ################################################################################################################################
def on_event_request_to_worker(self, msg):
logger.info('In _handle %s', msg)
return datetime.utcnow().isoformat()
# ################################################################################################################################
def on_event_heartbeat(self, *ignored):
""" A no-op since self.handle already handles heartbeats from the broker.
"""
# ################################################################################################################################
def on_event_disconnect(self, *ignored):
""" Our broker tells us to disconnect - according to the spec we now must re-open the connection.
"""
self.reconnect()
# ################################################################################################################################
def handle(self, msg):
logger.info('Handling %s', msg)
# Since we received this message, it means the broker is up so the message,
# no matter what event it is, allows us to update the timestamp of the last HB from broker
self.broker_last_heartbeat = datetime.utcnow()
sender_id = None
body = None
command = msg[2]
if command == const.v01.request_to_worker:
sender_id = msg[3]
body = msg[4]
# Hand over the message to an actual implementation and reply if told to
response = self.handle_event_map[command](body)
if response:
self.send(EventWorkerReply(response, sender_id).serialize())
# Message handled, we are ready to handle a new one, assuming this one was a request
if command == const.v01.request_to_worker:
self.notify_ready()
# ################################################################################################################################
def send(self, data, needs_hb=True):
""" Sends data to the broker and updates an internal timer of when the last time we send a heartbeat to the broker
since sending anything in that direction should be construed by the broker as a heartbeat itself.
"""
# Send data first
self.worker_socket.send_multipart(data)
# Update the timer
if needs_hb:
self.worker_last_heartbeat = datetime.utcnow()
# ################################################################################################################################
def notify_ready(self):
""" Notify the broker that we are ready to handle a new message.
"""
self.send(EventReady(self.service_name).serialize())
# ################################################################################################################################
def notify_heartbeat(self):
""" Notify the broker that we are still around.
"""
self.send(EventWorkerHeartbeat().serialize())
# ################################################################################################################################
def notify_disconnect(self):
""" Notify the broker that we are to disconnect from it.
"""
self.send(EventWorkerDisconnect().serialize(), needs_hb=False)
# ################################################################################################################################
if __name__ == '__main__':
w = Worker(b'My service', 'tcp://localhost:47047')
w.connect()
w.serve_forever()
| gpl-3.0 | -6,911,233,677,694,049,000 | -6,142,441,434,644,398,000 | 37.587045 | 130 | 0.484 | false |
halwai/cvxpy | cvxpy/problems/iterative.py | 12 | 4961 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
# Methods for SCS iterative solver.
from cvxpy.lin_ops.tree_mat import mul, tmul, sum_dicts
import numpy as np
import scipy.sparse.linalg as LA
def get_mul_funcs(sym_data):
def accAmul(x, y, is_abs=False):
# y += A*x
rows = y.shape[0]
var_dict = vec_to_dict(x, sym_data.var_offsets,
sym_data.var_sizes)
y += constr_mul(sym_data.constraints, var_dict, rows, is_abs)
def accATmul(x, y, is_abs=False):
# y += A.T*x
terms = constr_unpack(sym_data.constraints, x)
val_dict = constr_tmul(sym_data.constraints, terms, is_abs)
y += dict_to_vec(val_dict, sym_data.var_offsets,
sym_data.var_sizes, sym_data.x_length)
return (accAmul, accATmul)
def constr_unpack(constraints, vector):
"""Unpacks a vector into a list of values for constraints.
"""
values = []
offset = 0
for constr in constraints:
rows, cols = constr.size
val = np.zeros((rows, cols))
for col in range(cols):
val[:, col] = vector[offset:offset+rows]
offset += rows
values.append(val)
return values
def vec_to_dict(vector, var_offsets, var_sizes):
"""Converts a vector to a map of variable id to value.
Parameters
----------
vector : NumPy matrix
The vector of values.
var_offsets : dict
A map of variable id to offset in the vector.
var_sizes : dict
A map of variable id to variable size.
Returns
-------
dict
A map of variable id to variable value.
"""
val_dict = {}
for id_, offset in var_offsets.items():
size = var_sizes[id_]
value = np.zeros(size)
offset = var_offsets[id_]
for col in range(size[1]):
value[:, col] = vector[offset:size[0]+offset]
offset += size[0]
val_dict[id_] = value
return val_dict
def dict_to_vec(val_dict, var_offsets, var_sizes, vec_len):
"""Converts a map of variable id to value to a vector.
Parameters
----------
val_dict : dict
A map of variable id to value.
var_offsets : dict
A map of variable id to offset in the vector.
var_sizes : dict
A map of variable id to variable size.
vector : NumPy matrix
The vector to store the values in.
"""
# TODO take in vector.
vector = np.zeros(vec_len)
for id_, value in val_dict.items():
size = var_sizes[id_]
offset = var_offsets[id_]
for col in range(size[1]):
# Handle scalars separately.
if np.isscalar(value):
vector[offset:size[0]+offset] = value
else:
vector[offset:size[0]+offset] = np.squeeze(value[:, col])
offset += size[0]
return vector
def constr_mul(constraints, var_dict, vec_size, is_abs):
"""Multiplies a vector by the matrix implied by the constraints.
Parameters
----------
constraints : list
A list of linear constraints.
var_dict : dict
A dictionary mapping variable id to value.
vec_size : int
The length of the product vector.
is_abs : bool
Multiply by the absolute value of the matrix?
"""
product = np.zeros(vec_size)
offset = 0
for constr in constraints:
result = mul(constr.expr, var_dict, is_abs)
rows, cols = constr.size
for col in range(cols):
# Handle scalars separately.
if np.isscalar(result):
product[offset:offset+rows] = result
else:
product[offset:offset+rows] = np.squeeze(result[:, col])
offset += rows
return product
def constr_tmul(constraints, values, is_abs):
"""Multiplies a vector by the transpose of the constraints matrix.
Parameters
----------
constraints : list
A list of linear constraints.
values : list
A list of NumPy matrices.
is_abs : bool
Multiply by the absolute value of the matrix?
Returns
-------
dict
A mapping of variable id to value.
"""
products = []
for constr, val in zip(constraints, values):
products.append(tmul(constr.expr, val, is_abs))
return sum_dicts(products)
| gpl-3.0 | -7,383,235,742,679,626,000 | -2,868,206,782,850,287,600 | 29.25 | 73 | 0.605725 | false |
jeffheaton/aifh | vol3/vol3-python-examples/examples/example_timeseries.py | 2 | 1695 | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import os
import sys
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
import numpy as np
from window import *
# Create a simple 3-column dataset. This will hold the values:
# [1, 10, 100]
# [2, 20, 200]
# ...
# [10, 100, 1000]
raw_data = []
for i in range(1,11):
raw_data.append([i,i*10,i*100])
raw_data = np.array(raw_data)
result_x, result_y = encode_timeseries_window(raw_data, 3, 1, [True, True, True], [False, False, True])
result_x = np.array(result_x)
result_y = np.array(result_y)
for x,y in zip(result_x, result_y):
print("{} --> {}".format(x,y)) | apache-2.0 | -4,433,676,287,382,750,700 | -5,223,454,320,367,802,000 | 31 | 103 | 0.690855 | false |
pcingola/server | ga4gh/cli.py | 1 | 32399 | """
Command line interface programs for the GA4GH reference implementation.
TODO: document how to use these for development and simple deployment.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import unittest
import unittest.loader
import unittest.suite
import requests
import ga4gh.client as client
import ga4gh.converters as converters
import ga4gh.frontend as frontend
import ga4gh.configtest as configtest
import ga4gh.exceptions as exceptions
# the maximum value of a long type in avro = 2**63 - 1
# (64 bit signed integer)
# http://avro.apache.org/docs/1.7.7/spec.html#schema_primitive
# AVRO_LONG_MAX = (1 << 63) - 1
# TODO in the meantime, this is the max value pysam can handle
# This should be removed once pysam input sanitisation has been
# implemented.
AVRO_LONG_MAX = 2**31 - 1
##############################################################################
# Server
##############################################################################
def addServerOptions(parser):
parser.add_argument(
"--port", "-P", default=8000, type=int,
help="The port to listen on")
parser.add_argument(
"--host", "-H", default="127.0.0.1",
help="The server host string; use 0.0.0.0 to allow all connections.")
parser.add_argument(
"--config", "-c", default='DevelopmentConfig', type=str,
help="The configuration to use")
parser.add_argument(
"--config-file", "-f", type=str, default=None,
help="The configuration file to use")
parser.add_argument(
"--tls", "-t", action="store_true", default=False,
help="Start in TLS (https) mode.")
parser.add_argument(
"--dont-use-reloader", default=False, action="store_true",
help="Don't use the flask reloader")
addDisableUrllibWarningsArgument(parser)
def server_main(parser=None):
if parser is None:
parser = argparse.ArgumentParser(
description="GA4GH reference server")
addServerOptions(parser)
args = parser.parse_args()
if args.disable_urllib_warnings:
requests.packages.urllib3.disable_warnings()
frontend.configure(
args.config_file, args.config, args.port)
sslContext = None
if args.tls or ("OIDC_PROVIDER" in frontend.app.config):
sslContext = "adhoc"
frontend.app.run(
host=args.host, port=args.port,
use_reloader=not args.dont_use_reloader, ssl_context=sslContext)
##############################################################################
# Client
##############################################################################
def verbosityToLogLevel(verbosity):
"""
Returns the specfied verbosity level interpreted as a logging level.
"""
ret = 0
if verbosity == 1:
ret = logging.INFO
elif verbosity >= 2:
ret = logging.DEBUG
return ret
class AbstractQueryRunner(object):
"""
Abstract base class for runner classes
"""
def __init__(self, args):
self._key = args.key
self._httpClient = client.HttpClient(
args.baseUrl, verbosityToLogLevel(args.verbose), self._key)
class FormattedOutputRunner(AbstractQueryRunner):
"""
Superclass of runners that support output in common formats.
"""
def __init__(self, args):
super(FormattedOutputRunner, self).__init__(args)
self._output = self._textOutput
if args.outputFormat == "json":
self._output = self._jsonOutput
def _jsonOutput(self, gaObjects):
"""
Outputs the specified protocol objects as one JSON string per
line.
"""
for gaObject in gaObjects:
print(gaObject.toJsonString())
def _textOutput(self, gaObjects):
"""
Outputs a text summary of the specified protocol objects, one
per line.
"""
for gaObject in gaObjects:
print(gaObject.id, gaObject.name, sep="\t")
class AbstractGetRunner(FormattedOutputRunner):
"""
Abstract base class for get runner classes
"""
def __init__(self, args):
super(AbstractGetRunner, self).__init__(args)
self._id = args.id
self._httpClient = client.HttpClient(
args.baseUrl, verbosityToLogLevel(args.verbose), self._key)
def run(self):
response = self._method(self._id)
self._output([response])
class AbstractSearchRunner(FormattedOutputRunner):
"""
Abstract base class for search runner classes
"""
def __init__(self, args):
super(AbstractSearchRunner, self).__init__(args)
self._pageSize = args.pageSize
self._httpClient.setPageSize(self._pageSize)
def getAllDatasets(self):
"""
Returns all datasets on the server.
"""
return self._httpClient.searchDatasets()
def getAllVariantSets(self):
"""
Returns all variant sets on the server.
"""
for dataset in self.getAllDatasets():
iterator = self._httpClient.searchVariantSets(datasetId=dataset.id)
for variantSet in iterator:
yield variantSet
def getAllReadGroupSets(self):
"""
Returns all readgroup sets on the server.
"""
for dataset in self.getAllDatasets():
iterator = self._httpClient.searchReadGroupSets(
datasetId=dataset.id)
for readGroupSet in iterator:
yield readGroupSet
def getAllReferenceSets(self):
"""
Returns all reference sets on the server.
"""
return self._httpClient.searchReferenceSets()
# Runners for the various search methods
class SearchDatasetsRunner(AbstractSearchRunner):
"""
Runner class for the datasets/search method
"""
def __init__(self, args):
super(SearchDatasetsRunner, self).__init__(args)
def run(self):
iterator = self._httpClient.searchDatasets()
self._output(iterator)
class SearchReferenceSetsRunner(AbstractSearchRunner):
"""
Runner class for the referencesets/search method.
"""
def __init__(self, args):
super(SearchReferenceSetsRunner, self).__init__(args)
self._accession = args.accession
self._md5checksum = args.md5checksum
def run(self):
iterator = self._httpClient.searchReferenceSets(
accession=self._accession, md5checksum=self._md5checksum)
self._output(iterator)
class SearchReferencesRunner(AbstractSearchRunner):
"""
Runner class for the references/search method
"""
def __init__(self, args):
super(SearchReferencesRunner, self).__init__(args)
self._referenceSetId = args.referenceSetId
self._accession = args.accession
self._md5checksum = args.md5checksum
def _run(self, referenceSetId):
iterator = self._httpClient.searchReferences(
accession=self._accession, md5checksum=self._md5checksum,
referenceSetId=referenceSetId)
self._output(iterator)
def run(self):
if self._referenceSetId is None:
for referenceSet in self.getAllReferenceSets():
self._run(referenceSet.id)
else:
self._run(self._referenceSetId)
class SearchVariantSetsRunner(AbstractSearchRunner):
"""
Runner class for the variantsets/search method.
"""
def __init__(self, args):
super(SearchVariantSetsRunner, self).__init__(args)
self._datasetId = args.datasetId
def _run(self, datasetId):
iterator = self._httpClient.searchVariantSets(datasetId=datasetId)
self._output(iterator)
def run(self):
if self._datasetId is None:
for dataset in self.getAllDatasets():
self._run(dataset.id)
else:
self._run(self._datasetId)
class SearchReadGroupSetsRunner(AbstractSearchRunner):
"""
Runner class for the readgroupsets/search method
"""
def __init__(self, args):
super(SearchReadGroupSetsRunner, self).__init__(args)
self._datasetId = args.datasetId
self._name = args.name
def _run(self, datasetId):
iterator = self._httpClient.searchReadGroupSets(
datasetId=datasetId, name=self._name)
self._output(iterator)
def run(self):
if self._datasetId is None:
for dataset in self.getAllDatasets():
self._run(dataset.id)
else:
self._run(self._datasetId)
class SearchCallSetsRunner(AbstractSearchRunner):
"""
Runner class for the callsets/search method
"""
def __init__(self, args):
super(SearchCallSetsRunner, self).__init__(args)
self._variantSetId = args.variantSetId
self._name = args.name
def _run(self, variantSetId):
iterator = self._httpClient.searchCallSets(
variantSetId=variantSetId, name=self._name)
self._output(iterator)
def run(self):
if self._variantSetId is None:
for variantSet in self.getAllVariantSets():
self._run(variantSet.id)
else:
self._run(self._variantSetId)
class VariantFormatterMixin(object):
"""
Simple mixin to format variant objects.
"""
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for variant in gaObjects:
print(
variant.id, variant.variantSetId, variant.names,
variant.referenceName, variant.start, variant.end,
variant.referenceBases, variant.alternateBases,
sep="\t", end="\t")
for key, value in variant.info.items():
print(key, value, sep="=", end=";")
print("\t", end="")
for c in variant.calls:
print(
c.callSetId, c.genotype, c.genotypeLikelihood, c.info,
c.phaseset, sep=":", end="\t")
print()
class SearchVariantsRunner(VariantFormatterMixin, AbstractSearchRunner):
"""
Runner class for the variants/search method.
"""
def __init__(self, args):
super(SearchVariantsRunner, self).__init__(args)
self._referenceName = args.referenceName
self._variantSetId = args.variantSetId
self._start = args.start
self._end = args.end
if args.callSetIds == []:
self._callSetIds = []
elif args.callSetIds == '*':
self._callSetIds = None
else:
self._callSetIds = args.callSetIds.split(",")
def _run(self, variantSetId):
iterator = self._httpClient.searchVariants(
start=self._start, end=self._end,
referenceName=self._referenceName,
variantSetId=variantSetId, callSetIds=self._callSetIds)
self._output(iterator)
def run(self):
if self._variantSetId is None:
for variantSet in self.getAllVariantSets():
self._run(variantSet.id)
else:
self._run(self._variantSetId)
class SearchReadsRunner(AbstractSearchRunner):
"""
Runner class for the reads/search method
"""
def __init__(self, args):
super(SearchReadsRunner, self).__init__(args)
self._start = args.start
self._end = args.end
self._referenceId = args.referenceId
self._readGroupIds = None
if args.readGroupIds is not None:
self._readGroupIds = args.readGroupIds.split(",")
def run(self):
# TODO add support for looking up ReadGroupSets and References
# like we do with SearchVariants and others.
iterator = self._httpClient.searchReads(
readGroupIds=self._readGroupIds, referenceId=self._referenceId,
start=self._start, end=self._end)
self._output(iterator)
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for read in gaObjects:
# TODO add in some more useful output here.
print(read.id)
# ListReferenceBases is an oddball, and doesn't fit either get or
# search patterns.
class ListReferenceBasesRunner(AbstractQueryRunner):
"""
Runner class for the references/{id}/bases method
"""
def __init__(self, args):
super(ListReferenceBasesRunner, self).__init__(args)
self._referenceId = args.id
self._start = args.start
self._end = args.end
def run(self):
iterator = self._httpClient.listReferenceBases(
self._referenceId, self._start, self._end)
# TODO add support for FASTA output.
for segment in iterator:
print(segment, end="")
print()
# Runners for the various GET methods.
class GetReferenceSetRunner(AbstractGetRunner):
"""
Runner class for the referencesets/{id} method
"""
def __init__(self, args):
super(GetReferenceSetRunner, self).__init__(args)
self._method = self._httpClient.getReferenceSet
class GetReferenceRunner(AbstractGetRunner):
"""
Runner class for the references/{id} method
"""
def __init__(self, args):
super(GetReferenceRunner, self).__init__(args)
self._method = self._httpClient.getReference
class GetReadGroupSetRunner(AbstractGetRunner):
"""
Runner class for the readgroupsets/{id} method
"""
def __init__(self, args):
super(GetReadGroupSetRunner, self).__init__(args)
self._method = self._httpClient.getReadGroupSet
class GetReadGroupRunner(AbstractGetRunner):
"""
Runner class for the references/{id} method
"""
def __init__(self, args):
super(GetReadGroupRunner, self).__init__(args)
self._method = self._httpClient.getReadGroup
class GetCallsetRunner(AbstractGetRunner):
"""
Runner class for the callsets/{id} method
"""
def __init__(self, args):
super(GetCallsetRunner, self).__init__(args)
self._method = self._httpClient.getCallset
class GetDatasetRunner(AbstractGetRunner):
"""
Runner class for the datasets/{id} method
"""
def __init__(self, args):
super(GetDatasetRunner, self).__init__(args)
self._method = self._httpClient.getDataset
class GetVariantRunner(VariantFormatterMixin, AbstractGetRunner):
"""
Runner class for the variants/{id} method
"""
def __init__(self, args):
super(GetVariantRunner, self).__init__(args)
self._method = self._httpClient.getVariant
def addDisableUrllibWarningsArgument(parser):
parser.add_argument(
"--disable-urllib-warnings", default=False, action="store_true",
help="Disable urllib3 warnings")
def addVariantSearchOptions(parser):
"""
Adds common options to a variant searches command line parser.
"""
addVariantSetIdArgument(parser)
addReferenceNameArgument(parser)
addCallSetIdsArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
addPageSizeArgument(parser)
def addVariantSetIdArgument(parser):
parser.add_argument(
"--variantSetId", "-V", default=None,
help="The variant set id to search over")
def addReferenceNameArgument(parser):
parser.add_argument(
"--referenceName", "-r", default="1",
help="Only return variants on this reference.")
def addCallSetIdsArgument(parser):
parser.add_argument(
"--callSetIds", "-c", default=[],
help="""Return variant calls which belong to call sets
with these IDs. Pass in IDs as a comma separated list (no spaces).
Use '*' to request all call sets (the quotes are important!).
""")
def addStartArgument(parser):
parser.add_argument(
"--start", "-s", default=0, type=int,
help="The start of the search range (inclusive).")
def addEndArgument(parser, defaultValue=AVRO_LONG_MAX):
parser.add_argument(
"--end", "-e", default=defaultValue, type=int,
help="The end of the search range (exclusive).")
def addIdArgument(parser):
parser.add_argument("id", default=None, help="The id of the object")
def addGetArguments(parser):
addUrlArgument(parser)
addIdArgument(parser)
addOutputFormatArgument(parser)
def addUrlArgument(parser):
"""
Adds the URL endpoint argument to the specified parser.
"""
parser.add_argument("baseUrl", help="The URL of the API endpoint")
def addOutputFormatArgument(parser):
parser.add_argument(
"--outputFormat", "-O", choices=['text', 'json'], default="text",
help=(
"The format for object output. Currently supported are "
"'text' (default), which gives a short summary of the object and "
"'json', which outputs each object in line-delimited JSON"))
def addAccessionArgument(parser):
parser.add_argument(
"--accession", default=None,
help="The accession to search for")
def addMd5ChecksumArgument(parser):
parser.add_argument(
"--md5checksum", default=None,
help="The md5checksum to search for")
def addPageSizeArgument(parser):
parser.add_argument(
"--pageSize", "-m", default=None, type=int,
help=(
"The maximum number of results returned in one page. "
"The default is to let the server decide how many "
"results to return in a single page."))
def addDatasetIdArgument(parser):
parser.add_argument(
"--datasetId", default=None,
help="The datasetId to search over")
def addReferenceSetIdArgument(parser):
parser.add_argument(
"--referenceSetId", default=None,
help="The referenceSet to search over")
def addNameArgument(parser):
parser.add_argument(
"--name", default=None,
help="The name to search over")
def addClientGlobalOptions(parser):
parser.add_argument(
'--verbose', '-v', action='count', default=0,
help="Increase verbosity; can be supplied multiple times")
parser.add_argument(
"--key", "-k", default='invalid',
help="Auth Key. Found on server index page.")
addDisableUrllibWarningsArgument(parser)
def addHelpParser(subparsers):
parser = subparsers.add_parser(
"help", description="ga4gh_client help",
help="show this help message and exit")
return parser
def addVariantsSearchParser(subparsers):
parser = subparsers.add_parser(
"variants-search",
description="Search for variants",
help="Search for variants.")
parser.set_defaults(runner=SearchVariantsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addVariantSearchOptions(parser)
return parser
def addVariantSetsSearchParser(subparsers):
parser = subparsers.add_parser(
"variantsets-search",
description="Search for variantSets",
help="Search for variantSets.")
parser.set_defaults(runner=SearchVariantSetsRunner)
addOutputFormatArgument(parser)
addUrlArgument(parser)
addPageSizeArgument(parser)
addDatasetIdArgument(parser)
return parser
def addReferenceSetsSearchParser(subparsers):
parser = subparsers.add_parser(
"referencesets-search",
description="Search for referenceSets",
help="Search for referenceSets")
parser.set_defaults(runner=SearchReferenceSetsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addAccessionArgument(parser)
addMd5ChecksumArgument(parser)
parser.add_argument(
"--assemblyId",
help="The assembly id to search for")
return parser
def addReferencesSearchParser(subparsers):
parser = subparsers.add_parser(
"references-search",
description="Search for references",
help="Search for references")
parser.set_defaults(runner=SearchReferencesRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addAccessionArgument(parser)
addMd5ChecksumArgument(parser)
addReferenceSetIdArgument(parser)
return parser
def addReadGroupSetsSearchParser(subparsers):
parser = subparsers.add_parser(
"readgroupsets-search",
description="Search for readGroupSets",
help="Search for readGroupSets")
parser.set_defaults(runner=SearchReadGroupSetsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addDatasetIdArgument(parser)
addNameArgument(parser)
return parser
def addCallsetsSearchParser(subparsers):
parser = subparsers.add_parser(
"callsets-search",
description="Search for callSets",
help="Search for callSets")
parser.set_defaults(runner=SearchCallSetsRunner)
addUrlArgument(parser)
addOutputFormatArgument(parser)
addPageSizeArgument(parser)
addNameArgument(parser)
addVariantSetIdArgument(parser)
return parser
def addReadsSearchParser(subparsers):
parser = subparsers.add_parser(
"reads-search",
description="Search for reads",
help="Search for reads")
parser.set_defaults(runner=SearchReadsRunner)
addOutputFormatArgument(parser)
addReadsSearchParserArguments(parser)
return parser
def addDatasetsGetParser(subparsers):
parser = subparsers.add_parser(
"datasets-get",
description="Get a dataset",
help="Get a dataset")
parser.set_defaults(runner=GetDatasetRunner)
addGetArguments(parser)
def addDatasetsSearchParser(subparsers):
parser = subparsers.add_parser(
"datasets-search",
description="Search for datasets",
help="Search for datasets")
parser.set_defaults(runner=SearchDatasetsRunner)
addUrlArgument(parser)
addPageSizeArgument(parser)
addOutputFormatArgument(parser)
return parser
def addReadsSearchParserArguments(parser):
addUrlArgument(parser)
addPageSizeArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
parser.add_argument(
"--readGroupIds", default=None,
help="The readGroupIds to search over")
parser.add_argument(
"--referenceId", default=None,
help="The referenceId to search over")
def addReferenceSetsGetParser(subparsers):
parser = subparsers.add_parser(
"referencesets-get",
description="Get a referenceset",
help="Get a referenceset")
parser.set_defaults(runner=GetReferenceSetRunner)
addGetArguments(parser)
def addReferencesGetParser(subparsers):
parser = subparsers.add_parser(
"references-get",
description="Get a reference",
help="Get a reference")
parser.set_defaults(runner=GetReferenceRunner)
addGetArguments(parser)
def addReadGroupSetsGetParser(subparsers):
parser = subparsers.add_parser(
"readgroupsets-get",
description="Get a read group set",
help="Get a read group set")
parser.set_defaults(runner=GetReadGroupSetRunner)
addGetArguments(parser)
def addReadGroupsGetParser(subparsers):
parser = subparsers.add_parser(
"readgroups-get",
description="Get a read group",
help="Get a read group")
parser.set_defaults(runner=GetReadGroupRunner)
addGetArguments(parser)
def addCallsetsGetParser(subparsers):
parser = subparsers.add_parser(
"callsets-get",
description="Get a callset",
help="Get a callset")
parser.set_defaults(runner=GetCallsetRunner)
addGetArguments(parser)
def addVariantsGetParser(subparsers):
parser = subparsers.add_parser(
"variants-get",
description="Get a variant",
help="Get a variant")
parser.set_defaults(runner=GetVariantRunner)
addGetArguments(parser)
def addReferencesBasesListParser(subparsers):
parser = subparsers.add_parser(
"references-list-bases",
description="List bases of a reference",
help="List bases of a reference")
parser.set_defaults(runner=ListReferenceBasesRunner)
addUrlArgument(parser)
addIdArgument(parser)
addStartArgument(parser)
addEndArgument(parser, defaultValue=None)
def getClientParser():
parser = argparse.ArgumentParser(
description="GA4GH reference client")
addClientGlobalOptions(parser)
subparsers = parser.add_subparsers(title='subcommands',)
addHelpParser(subparsers)
addVariantsSearchParser(subparsers)
addVariantSetsSearchParser(subparsers)
addReferenceSetsSearchParser(subparsers)
addReferencesSearchParser(subparsers)
addReadGroupSetsSearchParser(subparsers)
addCallsetsSearchParser(subparsers)
addReadsSearchParser(subparsers)
addDatasetsSearchParser(subparsers)
addReferenceSetsGetParser(subparsers)
addReferencesGetParser(subparsers)
addReadGroupSetsGetParser(subparsers)
addReadGroupsGetParser(subparsers)
addCallsetsGetParser(subparsers)
addVariantsGetParser(subparsers)
addDatasetsGetParser(subparsers)
addReferencesBasesListParser(subparsers)
return parser
def client_main():
parser = getClientParser()
args = parser.parse_args()
if "runner" not in args:
parser.print_help()
else:
if args.disable_urllib_warnings:
requests.packages.urllib3.disable_warnings()
try:
runner = args.runner(args)
runner.run()
except (exceptions.BaseClientException,
requests.exceptions.RequestException) as exception:
# TODO suppress exception unless debug settings are enabled
raise exception
##############################################################################
# ga2vcf
##############################################################################
class Ga2VcfRunner(SearchVariantsRunner):
"""
Runner class for the ga2vcf
"""
def __init__(self, args):
super(Ga2VcfRunner, self).__init__(args)
self._outputFile = args.outputFile
self._binaryOutput = False
if args.outputFormat == "bcf":
self._binaryOutput = True
def run(self):
variantSet = self._httpClient.getVariantSet(self._variantSetId)
iterator = self._httpClient.searchVariants(
start=self._start, end=self._end,
referenceName=self._referenceName,
variantSetId=self._variantSetId,
callSetIds=self._callSetIds)
# do conversion
vcfConverter = converters.VcfConverter(
variantSet, iterator, self._outputFile, self._binaryOutput)
vcfConverter.convert()
def addOutputFileArgument(parser):
parser.add_argument(
"--outputFile", "-o", default=None,
help="the file to write the output to")
def getGa2VcfParser():
parser = argparse.ArgumentParser(
description=(
"GA4GH VCF conversion tool. Converts variant information "
"stored in a GA4GH repository into VCF format."))
addClientGlobalOptions(parser)
addOutputFileArgument(parser)
addUrlArgument(parser)
parser.add_argument("variantSetId", help="The variant set to convert")
parser.add_argument(
"--outputFormat", "-O", choices=['vcf', 'bcf'], default="vcf",
help=(
"The format for object output. Currently supported are "
"'vcf' (default), which is a text-based format and "
"'bcf', which is the binary equivalent"))
addReferenceNameArgument(parser)
addCallSetIdsArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
addPageSizeArgument(parser)
return parser
def ga2vcf_main():
parser = getGa2VcfParser()
args = parser.parse_args()
if "baseUrl" not in args:
parser.print_help()
else:
runner = Ga2VcfRunner(args)
runner.run()
##############################################################################
# ga2sam
##############################################################################
class Ga2SamRunner(SearchReadsRunner):
"""
Runner class for the ga2vcf
"""
def __init__(self, args):
args.readGroupIds = args.readGroupId
super(Ga2SamRunner, self).__init__(args)
self._outputFile = args.outputFile
self._binaryOutput = False
if args.outputFormat == "bam":
self._binaryOutput = True
def run(self):
readGroup = self._httpClient.getReadGroup(self._readGroupIds[0])
iterator = self._httpClient.searchReads(
readGroupIds=self._readGroupIds, referenceId=self._referenceId,
start=self._start, end=self._end)
# do conversion
samConverter = converters.SamConverter(
readGroup, iterator, self._outputFile, self._binaryOutput)
samConverter.convert()
def getGa2SamParser():
parser = argparse.ArgumentParser(
description="GA4GH SAM conversion tool")
addClientGlobalOptions(parser)
addUrlArgument(parser)
parser.add_argument(
"readGroupId",
help="The ReadGroup to convert to SAM/BAM format.")
addPageSizeArgument(parser)
addStartArgument(parser)
addEndArgument(parser)
parser.add_argument(
"--referenceId", default=None,
help="The referenceId to search over")
parser.add_argument(
"--outputFormat", "-O", default="sam", choices=["sam", "bam"],
help=(
"The format for object output. Currently supported are "
"'sam' (default), which is a text-based format and "
"'bam', which is the binary equivalent"))
addOutputFileArgument(parser)
return parser
def ga2sam_main():
parser = getGa2SamParser()
args = parser.parse_args()
if "baseUrl" not in args:
parser.print_help()
else:
runner = Ga2SamRunner(args)
runner.run()
##############################################################################
# Configuration testing
##############################################################################
class SimplerResult(unittest.TestResult):
"""
The TestResult class gives formatted tracebacks as error messages, which
is not what we want. Instead we just want the error message from the
err praram. Hence this subclass.
"""
def addError(self, test, err):
self.errors.append((test,
"{0}: {1}".format(err[0].__name__, err[1])))
def addFailure(self, test, err):
self.failures.append((test,
"{0}: {1}".format(err[0].__name__, err[1])))
def configtest_main(parser=None):
if parser is None:
parser = argparse.ArgumentParser(
description="GA4GH server configuration validator")
parser.add_argument(
"--config", "-c", default='DevelopmentConfig', type=str,
help="The configuration to use")
parser.add_argument(
"--config-file", "-f", type=str, default=None,
help="The configuration file to use")
args = parser.parse_args()
configStr = 'ga4gh.serverconfig:{0}'.format(args.config)
configtest.TestConfig.configStr = configStr
configtest.TestConfig.configFile = args.config_file
configtest.TestConfig.configEnv = "GA4GH_CONFIGURATION"
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(configtest)
results = SimplerResult()
tests.run(results)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
log.info('{0} Tests run. {1} errors, {2} failures, {3} skipped'.
format(results.testsRun,
len(results.errors),
len(results.failures),
len(results.skipped)))
for result in results.errors:
if result is not None:
log.critical('Error: {0}: {1}'.format(result[0].id(), result[1]))
for result in results.failures:
if result is not None:
log.critical('Failure: {0}: {1}'.format(result[0].id(), result[1]))
for result in results.skipped:
if result is not None:
log.info('Skipped: {0}: {1}'.format(result[0].id(), result[1]))
| apache-2.0 | 3,246,826,149,598,245,400 | -5,434,474,136,596,055,000 | 30.212909 | 79 | 0.635112 | false |
cgwalters/pykickstart | tests/orderedset.py | 3 | 1277 | import unittest
from pykickstart.orderedset import OrderedSet
class OrderedSet_TestCase(unittest.TestCase):
def runTest(self):
# __eq__, __len__, etc.
self.assertEqual(OrderedSet([]), OrderedSet([]))
self.assertEqual(OrderedSet([1, 2, 3]), OrderedSet([1, 2, 3]))
self.assertEqual(OrderedSet([1, 2, 3]), [1, 2, 3])
# __reversed__
self.assertEqual(reversed(OrderedSet([2, 4, 1, 3])), OrderedSet([3, 1, 4, 2]))
# discard
self.assertEqual(len(OrderedSet(["one", "two", "three"])), 3)
os = OrderedSet(["one", "two", "three"])
os.discard("two")
self.assertEqual(len(os), 2)
os = OrderedSet(["one", "two", "three"])
os.discard("four")
self.assertEqual(len(os), 3)
# pop
self.assertRaises(KeyError, OrderedSet().pop)
self.assertEqual(OrderedSet(["one", "two", "three"]).pop(), "three")
self.assertEqual(OrderedSet(["one"]).pop(), "one")
os = OrderedSet(["one"])
os.pop()
self.assertEqual(len(os), 0)
# __repr__
self.assertEqual(repr(OrderedSet()), "OrderedSet()")
self.assertEqual(repr(OrderedSet([1, 2, 3])), "OrderedSet([1, 2, 3])")
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | -973,026,867,611,034,600 | -6,406,317,808,038,262,000 | 33.513514 | 86 | 0.556774 | false |
antonygc/liblightbase | liblightbase/lbdoc/metaclass.py | 1 | 6065 | from liblightbase import lbutils
from liblightbase.lbdoc.metadata import DocumentMetadata
def generate_metaclass(struct, base=None):
"""
Generate document metaclass. The document metaclass
is an abstraction of document model defined by base
structures.
@param struct: Field or Group object.
@param base: Base object or None.
"""
build_metadata = False
if base is None:
base = struct
build_metadata = True
snames = struct.content.__snames__
rnames = struct.content.__rnames__
class MetaClass(object):
"""
Document metaclass. Describes the structures defifined by
document structure model.
"""
# @property __valreq__: Flag used to validate required
# fields or not.
__valreq__ = True
# @property __slots__: reserves space for the declared
# variables and prevents the automatic creation of
# __dict__ and __weakref__ for each instance.
__slots__ = ['_' + sname for sname in snames]
if build_metadata:
__slots__.append('__metadata__')
def __init__(self, **kwargs):
""" Document MetaClass constructor
"""
if self.__valreq__:
lbutils.validate_required(rnames, kwargs)
for arg in kwargs:
setattr(self, arg, kwargs[arg])
for childstruct in struct.content:
structname, prop = generate_property(base, childstruct)
setattr(MetaClass, structname, prop)
if build_metadata:
MetaClass._metadata = build_metadata_prop()
MetaClass.__name__ = struct.metadata.name
return MetaClass
def generate_property(base, struct):
"""
Make python's property based on structure attributes.
@param base: Base object.
@param struct: Field or Group object.
"""
if struct.is_field:
structname = struct.name
elif struct.is_group:
structname = struct.metadata.name
attr_name = '_' + structname
def getter(self):
value = getattr(self, attr_name)
if struct.is_field:
return getattr(value, '__value__')
return value
def setter(self, value):
struct_metaclass = base.metaclass(structname)
if struct.is_field:
value = struct_metaclass(value)
elif struct.is_group:
if struct.metadata.multivalued:
msg = 'object {} should be instance of {}'.format(
struct.metadata.name, list)
assert isinstance(value, list), msg
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assertion = all(isinstance(element, struct_metaclass) \
for element in value)
assert assertion, msg
value = generate_multimetaclass(struct,
struct_metaclass)(value)
else:
msg = '{} object should be an instance of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(value, struct_metaclass), msg
setattr(self, attr_name, value)
def deleter(self):
delattr(self, attr_name)
return structname, property(getter,
setter, deleter, structname)
def build_metadata_prop():
def fget(self):
return self.__metadata__
def fset(self, value):
msg = '_metadata attribute should be a DocumentMetadata object.'
assert isinstance(value, DocumentMetadata)
self.__metadata__ = value
def fdel(self):
del self.__metadata__
return property(fget, fset, fdel, '_metadata')
def generate_multimetaclass(struct, struct_metaclass):
"""
Generate metaclass to use with multivalued groups.
@param struct: Field or Group object
@param struct_metaclass: The struct Metaclass
"""
class MultiGroupMetaClass(list):
"""
Multivalued Group Metaclass. Metaclass used to ensure list
elements are instances of right metaclasses.
"""
def __setitem__(self, index, element):
""" x.__setitem__(y, z) <==> x[y] = z
"""
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(element, struct_metaclass), msg
return super(MultiGroupMetaClass, self).__setitem__(index,
element)
def append(self, element):
""" L.append(object) -- append object to end
"""
msg = '{} list elements should be instances of {}'.format(
struct.metadata.name, struct_metaclass)
assert isinstance(element, struct_metaclass), msg
return super(MultiGroupMetaClass, self).append(element)
return MultiGroupMetaClass
def generate_field_metaclass(field, base):
"""
Generate field metaclass. The field metaclass
validates incoming value against fields' datatype.
@param field: Field object.
@param base: Base object.
"""
class FieldMetaClass(object):
"""
Field MetaClass. validates incoming
value against fields' datatype.
"""
def __init__(self, value):
self.__value__ = value
def __setattr__(self, obj, value):
validator = field._datatype.__schema__(base, field, 0)
if field.multivalued is True:
msg = 'Expected type list for {}, but found {}'
assert isinstance(value, list), msg.format(
field.name, type(value))
value = [validator(element) for element in value]
else:
value = validator(value)
super(FieldMetaClass, self).__setattr__('__value__', value)
def __getattr__(self, obj):
return super(FieldMetaClass, self).__getattribute__('__value__')
FieldMetaClass.__name__ = field.name
return FieldMetaClass
| gpl-2.0 | 1,282,288,448,267,029,200 | -4,100,457,103,713,895,000 | 33.460227 | 76 | 0.588458 | false |
aio-libs/aiozmq | examples/core_dealer_router.py | 1 | 1579 | import asyncio
import aiozmq
import zmq
class ZmqDealerProtocol(aiozmq.ZmqProtocol):
transport = None
def __init__(self, queue, on_close):
self.queue = queue
self.on_close = on_close
def connection_made(self, transport):
self.transport = transport
def msg_received(self, msg):
self.queue.put_nowait(msg)
def connection_lost(self, exc):
self.on_close.set_result(exc)
class ZmqRouterProtocol(aiozmq.ZmqProtocol):
transport = None
def __init__(self, on_close):
self.on_close = on_close
def connection_made(self, transport):
self.transport = transport
def msg_received(self, msg):
self.transport.write(msg)
def connection_lost(self, exc):
self.on_close.set_result(exc)
async def go():
router_closed = asyncio.Future()
dealer_closed = asyncio.Future()
router, _ = await aiozmq.create_zmq_connection(
lambda: ZmqRouterProtocol(router_closed), zmq.ROUTER, bind="tcp://127.0.0.1:*"
)
addr = list(router.bindings())[0]
queue = asyncio.Queue()
dealer, _ = await aiozmq.create_zmq_connection(
lambda: ZmqDealerProtocol(queue, dealer_closed), zmq.DEALER, connect=addr
)
for i in range(10):
msg = (b"data", b"ask", str(i).encode("utf-8"))
dealer.write(msg)
answer = await queue.get()
print(answer)
dealer.close()
await dealer_closed
router.close()
await router_closed
def main():
asyncio.run(go())
print("DONE")
if __name__ == "__main__":
main()
| bsd-2-clause | -2,206,074,230,738,931,000 | 1,522,380,401,982,336,000 | 21.239437 | 86 | 0.621279 | false |
pyfa-org/eos | eos/item/mixin/effect_stats/remote_repair.py | 1 | 1829 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.eve_obj.effect.repairs.base import RemoteArmorRepairEffect
from eos.eve_obj.effect.repairs.base import RemoteShieldRepairEffect
from eos.item.mixin.base import BaseItemMixin
class RemoteRepairMixin(BaseItemMixin):
def __repair_effect_iter(self, effect_class):
for effect in self._type_effects.values():
if not isinstance(effect, effect_class):
continue
if effect.id not in self._running_effect_ids:
continue
yield effect
def get_armor_rps(self, reload=False):
rps = 0
for effect in self.__repair_effect_iter(RemoteArmorRepairEffect):
rps += effect.get_rps(self, reload=reload)
return rps
def get_shield_rps(self, reload=False):
rps = 0
for effect in self.__repair_effect_iter(RemoteShieldRepairEffect):
rps += effect.get_rps(self, reload=reload)
return rps
| lgpl-3.0 | 2,898,848,563,206,567,400 | -2,198,580,888,664,857,300 | 37.914894 | 80 | 0.636413 | false |
bfirsh/django-old | django/contrib/localflavor/ie/ie_counties.py | 503 | 1127 | """
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
| bsd-3-clause | -4,039,257,596,218,367,500 | -3,062,824,494,397,734,400 | 27.175 | 72 | 0.47205 | false |
netfirms/erpnext | erpnext/crm/doctype/newsletter_list/newsletter_list.py | 15 | 3079 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import validate_email_add
from frappe import _
from email.utils import parseaddr
class NewsletterList(Document):
def onload(self):
singles = [d.name for d in frappe.db.get_all("DocType", "name", {"issingle": 1})]
self.get("__onload").import_types = [{"value": d.parent, "label": "{0} ({1})".format(d.parent, d.label)} \
for d in frappe.db.get_all("DocField", ("parent", "label"), {"options": "Email"}) if d.parent not in singles]
def import_from(self, doctype):
"""Extract email ids from given doctype and add them to the current list"""
meta = frappe.get_meta(doctype)
email_field = [d.fieldname for d in meta.fields if d.fieldtype in ("Data", "Small Text") and d.options=="Email"][0]
unsubscribed_field = "unsubscribed" if meta.get_field("unsubscribed") else None
added = 0
for user in frappe.db.get_all(doctype, [email_field, unsubscribed_field or "name"]):
try:
email = parseaddr(user.get(email_field))[1]
if email:
frappe.get_doc({
"doctype": "Newsletter List Subscriber",
"newsletter_list": self.name,
"email": email,
"unsubscribed": user.get(unsubscribed_field) if unsubscribed_field else 0
}).insert(ignore_permissions=True)
added += 1
except Exception, e:
# already added, ignore
if e.args[0]!=1062:
raise
frappe.msgprint(_("{0} subscribers added").format(added))
return self.update_total_subscribers()
def update_total_subscribers(self):
self.total_subscribers = self.get_total_subscribers()
self.db_update()
return self.total_subscribers
def get_total_subscribers(self):
return frappe.db.sql("""select count(*) from `tabNewsletter List Subscriber`
where newsletter_list=%s""", self.name)[0][0]
def on_trash(self):
for d in frappe.get_all("Newsletter List Subscriber", "name", {"newsletter_list": self.name}):
frappe.delete_doc("Newsletter List Subscriber", d.name)
@frappe.whitelist()
def import_from(name, doctype):
nlist = frappe.get_doc("Newsletter List", name)
if nlist.has_permission("write"):
return nlist.import_from(doctype)
@frappe.whitelist()
def add_subscribers(name, email_list):
if not isinstance(email_list, (list, tuple)):
email_list = email_list.replace(",", "\n").split("\n")
count = 0
for email in email_list:
email = email.strip()
validate_email_add(email, True)
if email:
if not frappe.db.get_value("Newsletter List Subscriber",
{"newsletter_list": name, "email": email}):
frappe.get_doc({
"doctype": "Newsletter List Subscriber",
"newsletter_list": name,
"email": email
}).insert(ignore_permissions = frappe.flags.ignore_permissions)
count += 1
else:
pass
frappe.msgprint(_("{0} subscribers added").format(count))
return frappe.get_doc("Newsletter List", name).update_total_subscribers()
| agpl-3.0 | 2,007,260,510,382,502,000 | 3,700,175,683,274,159,000 | 33.595506 | 117 | 0.690809 | false |
bollu/polymage | sandbox/apps/python/img_proc/harris/init.py | 1 | 1485 | import sys
import os.path
from PIL import Image
import numpy as np
from arg_parser import parse_args
from printer import print_header, print_usage, print_line
def init_images(app_data):
print("[init.py] : initializing images...")
app_args = app_data['app_args']
# input image:
img_path = app_args.img_file
img = np.array(Image.open(img_path).convert('1'))
rows, cols = img.shape
# convert to float image
IN = np.array(img)
IN = IN.astype(np.float32).ravel()
# final output image
OUT = np.zeros((rows, cols), np.float32).ravel()
img_data = {}
img_data['IN'] = IN
img_data['OUT'] = OUT
app_data['img_data'] = img_data
app_data['rows'] = rows
app_data['cols'] = cols
return
def get_input(app_data):
# parse the command-line arguments
app_args = parse_args()
app_data['app_args'] = app_args
app_data['mode'] = app_args.mode
app_data['runs'] = int(app_args.runs)
app_data['graph_gen'] = bool(app_args.graph_gen)
app_data['timer'] = app_args.timer
# storage optimization
app_data['optimize_storage'] = bool(app_args.optimize_storage)
# early freeing of allocated arrays
app_data['early_free'] = bool(app_args.early_free)
# pool allocate option
app_data['pool_alloc'] = bool(app_args.pool_alloc)
return
def init_all(app_data):
pipe_data = {}
app_data['pipe_data'] = pipe_data
get_input(app_data)
init_images(app_data)
return
| apache-2.0 | 8,622,597,208,586,231,000 | -9,012,673,933,444,810,000 | 22.203125 | 66 | 0.630976 | false |
lucalianas/openmicroscopy | components/tools/OmeroPy/test/integration/test_files.py | 9 | 2553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2014 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of client upload/download functionality
"""
import pytest
import library as lib
from omero.util.temp_files import create_path
def tmpfile():
file = create_path()
file.write_lines(["abc", "def", "123"])
return file
class TestFiles(lib.ITest):
def testUploadDownload(self):
uploaded = tmpfile()
downloaded = create_path()
ofile = self.client.upload(str(uploaded), type="text/plain")
self.client.download(ofile, str(downloaded))
lines = downloaded.lines()
assert "abc\n" == lines[0], lines[0]
assert "def\n" == lines[1], lines[1]
assert "123\n" == lines[2], lines[2]
sha1_upload = self.client.sha1(str(uploaded))
sha1_download = self.client.sha1(str(downloaded))
assert sha1_upload == sha1_download, "%s!=%s" % (
sha1_upload, sha1_download)
@pytest.mark.broken(ticket="11610")
def testUploadDifferentSizeTicket2337(self):
uploaded = tmpfile()
ofile = self.client.upload(str(uploaded), type="text/plain")
uploaded.write_lines(["abc", "def"]) # Shorten
ofile = self.client.upload(
str(uploaded), type="text/plain", ofile=ofile)
downloaded = create_path()
self.client.download(ofile, str(downloaded))
lines = downloaded.lines()
assert 2 == len(lines)
assert "abc\n" == lines[0], lines[0]
assert "def\n" == lines[1], lines[1]
sha1_upload = self.client.sha1(str(uploaded))
sha1_download = self.client.sha1(str(downloaded))
assert sha1_upload == sha1_download, "%s!=%s" % (
sha1_upload, sha1_download)
| gpl-2.0 | 4,049,459,252,324,650,500 | -1,072,425,501,067,757,200 | 33.972603 | 73 | 0.658049 | false |
zhanrnl/ag | webapp/models/roomassignment.py | 1 | 2751 | from google.appengine.ext import ndb
from models.grading import SITTING_ROOM_TYPES
from models.team import (
Team,
Contestant,
)
from models.sitting import Sitting
import random
SINGLE_TEST_SITTING = {
'alg' : 'alg,at',
'at' : 'at,calc',
'calc' : 'calc,geo',
'geo' : 'at,geo',
'team' : 'power,team',
'power' : 'power,team'
}
class RoomAssignment(ndb.Model):
testing_id = ndb.StringProperty(required=True)
sitting_nid = ndb.IntegerProperty(required=True)
@classmethod
def assign_team(cls, team_id):
team = Team.get_by_team_id(team_id)
contestants = Contestant.fetch_by_team(team.key)
cls.delete_team_assignments(team.key)
def get_sitting_type(tests):
if len(tests) == 1 and tests[0] != 'gen':
return SINGLE_TEST_SITTING[tests[0]]
return ','.join(sorted(list(tests)))
def select_sitting(sitting_type):
sittings = Sitting.fetch_by_exam(sitting_type)
weights = [s.capacity for s in sittings]
total = sum(weights)
index = random.randint(1, total)
counter = 0
i = 0
while (counter < index):
counter += weights[i]
i += 1
return sittings[i-1]
def assign_to_sitting(testing_id, tests, size):
sitting_type = get_sitting_type(tests)
sitting = select_sitting(sitting_type)
assignment = RoomAssignment(
testing_id=testing_id,
sitting_nid=sitting.nid,
parent=team.key,
)
assignment.put()
if len(team.team_tests) > 0:
assign_to_sitting(str(team_id), team.team_tests, len(contestants))
for c in contestants:
if len(c.tests) == 0: continue
assign_to_sitting(c.contestant_id, c.tests, 1)
@staticmethod
def get_assigned_team_ids():
team_ids = set()
all_room_assignments = RoomAssignment.query().fetch()
for ra in all_room_assignments:
try:
team_id = int(ra.testing_id)
team_ids.add(team_id)
except ValueError as e:
continue
return list(team_ids)
@staticmethod
def delete_all():
ndb.delete_multi(RoomAssignment.query().iter(keys_only=True))
@staticmethod
def delete_team_assignments(team_key):
for a in RoomAssignment.query(ancestor=team_key).fetch():
a.key.delete()
@staticmethod
def fetch_by_team(team_key):
return RoomAssignment.query(ancestor=team_key).fetch()
@staticmethod
def fetch_all():
return RoomAssignment.query().fetch()
| mit | 2,788,121,117,376,553,000 | -4,902,259,198,531,969,000 | 27.957895 | 78 | 0.571429 | false |
google/tf-quant-finance | tf_quant_finance/experimental/pricing_platform/framework/market_data/market_data_test.py | 1 | 5816 | # Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the market data."""
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
core = tff.experimental.pricing_platform.framework.core
market_data = tff.experimental.pricing_platform.framework.market_data
interpolation_method = tff.experimental.pricing_platform.framework.core.interpolation_method
@test_util.run_all_in_graph_and_eager_modes
class MarketDataTest(tf.test.TestCase):
def setUp(self):
valuation_date = [(2020, 6, 24)]
fixing_dates = [(2020, 2, 24), (2020, 3, 12), (2020, 4, 14), (2020, 5, 21)]
fixing_rates = [0.01, 0.02, 0.03, 0.025]
dates = [[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8],
[2027, 2, 8], [2030, 2, 8], [2050, 2, 8]]
discounts = [0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.8013675,
0.72494879, 0.37602059]
vol_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2027, 2, 8]]
strikes = [[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510]]
volatilities = [[0.1, 0.12, 0.13],
[0.15, 0.2, 0.15],
[0.1, 0.2, 0.1],
[0.1, 0.2, 0.1],
[0.1, 0.1, 0.3]]
risk_free_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2050, 2, 8]]
risk_free_discounts = [
0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.37602059]
self._market_data_dict = {
"rates": {
"USD": {
"risk_free_curve": {
"dates": risk_free_dates, "discounts": risk_free_discounts
},
"OIS": {
"dates": dates, "discounts": discounts
},
"LIBOR_3M": {
"dates": dates,
"discounts": discounts,
"fixing_dates": fixing_dates,
"fixing_rates": fixing_rates,
"fixing_daycount": "ACTUAL_365",
"config": {
"interpolation_method": interpolation_method.
InterpolationMethod.LINEAR
}
},
},
},
"equities": {
"USD": {
"GOOG": {
"spot": 1500,
"volatility_surface": {
"dates": vol_dates,
"strikes": strikes,
"implied_volatilities": volatilities
}
}
}
},
"reference_date": valuation_date,
}
self._libor_discounts = discounts
self._risk_free_discounts = risk_free_discounts
super(MarketDataTest, self).setUp()
def test_discount_curve(self):
market = market_data.MarketDataDict(
self._market_data_dict)
# Get the risk free discount curve
risk_free_curve_type = core.curve_types.RiskFreeCurve(currency="USD")
risk_free_curve = market.yield_curve(risk_free_curve_type)
# Get LIBOR 3M discount
libor_3m = core.rate_indices.RateIndex(type="LIBOR_3M")
rate_index_curve_type = core.curve_types.RateIndexCurve(
currency="USD", index=libor_3m)
libor_3m_curve = market.yield_curve(rate_index_curve_type)
with self.subTest("RiskFree"):
discount_factor_nodes = risk_free_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._risk_free_discounts)
with self.subTest("LIBOR_3M"):
discount_factor_nodes = libor_3m_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._libor_discounts)
def test_volatility(self):
market = market_data.MarketDataDict(
self._market_data_dict)
# Get volatility surface
vol_surface = market.volatility_surface(currency=["USD", "USD"],
asset=["GOOG", "GOOG"])
expiry = tff.datetime.dates_from_year_month_day(
year=[[2023], [2030]], month=[[5], [10]], day=[[10], [15]])
vols = vol_surface.volatility(expiry_dates=expiry, strike=[[1510], [1520]])
self.assertAllClose(
self.evaluate(vols), [[0.108], [0.31]], atol=1e-6)
def test_fixings(self):
market = market_data.MarketDataDict(
self._market_data_dict)
index_curve_3m = core.curve_types.RateIndexCurve(
"USD", core.rate_indices.RateIndex(type="LIBOR_3M"))
index_curve_ois = core.curve_types.RateIndexCurve(
"USD", core.rate_indices.RateIndex(type="OIS"))
dates = [(2020, 5, 24), (2020, 3, 24)]
with self.subTest("LIBOR_3M"):
fixings, fixings_daycount = market.fixings(dates, index_curve_3m)
self.assertAllClose(
self.evaluate(fixings), [0.025, 0.03], atol=1e-6)
self.assertEqual(fixings_daycount.value, "ACTUAL_365")
with self.subTest("OIS"):
fixings, _ = market.fixings(dates, index_curve_ois)
self.assertAllClose(
self.evaluate(fixings), [0.0, 0.0], atol=1e-6)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | 7,668,316,204,056,016,000 | 3,881,299,222,452,990,000 | 39.388889 | 95 | 0.569464 | false |
liluo/pygments-main | external/markdown-processor.py | 42 | 2041 | # -*- coding: utf-8 -*-
"""
The Pygments Markdown Preprocessor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Markdown_ preprocessor that renders source code
to HTML via Pygments. To use it, invoke Markdown like so::
import markdown
html = markdown.markdown(someText, extensions=[CodeBlockExtension()])
This uses CSS classes by default, so use
``pygmentize -S <some style> -f html > pygments.css``
to create a stylesheet to be added to the website.
You can then highlight source code in your markdown markup::
[sourcecode:lexer]
some code
[/sourcecode]
.. _Markdown: https://pypi.python.org/pypi/Markdown
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
import re
from markdown.preprocessors import Preprocessor
from markdown.extensions import Extension
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class CodeBlockPreprocessor(Preprocessor):
pattern = re.compile(r'\[sourcecode:(.+?)\](.+?)\[/sourcecode\]', re.S)
formatter = HtmlFormatter(noclasses=INLINESTYLES)
def run(self, lines):
def repl(m):
try:
lexer = get_lexer_by_name(m.group(1))
except ValueError:
lexer = TextLexer()
code = highlight(m.group(2), lexer, self.formatter)
code = code.replace('\n\n', '\n \n').replace('\n', '<br />')
return '\n\n<div class="code">%s</div>\n\n' % code
joined_lines = "\n".join(lines)
joined_lines = self.pattern.sub(repl, joined_lines)
return joined_lines.split("\n")
class CodeBlockExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add('CodeBlockPreprocessor', CodeBlockPreprocessor(), '_begin')
| bsd-2-clause | -8,591,548,385,679,040,000 | 1,895,797,200,751,012,600 | 29.462687 | 88 | 0.644292 | false |
eLBati/server-tools | dbfilter_from_header/__init__.py | 44 | 1534 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>).
# This module copyright (C) 2014 ACSONE SA/NV (<http://acsone.eu>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import http
db_filter_org = http.db_filter
def db_filter(dbs, httprequest=None):
dbs = db_filter_org(dbs, httprequest)
httprequest = httprequest or http.request.httprequest
db_filter_hdr = \
httprequest.environ.get('HTTP_X_ODOO_DBFILTER') or \
httprequest.environ.get('HTTP_X_OPENERP_DBFILTER')
if db_filter_hdr:
dbs = [db for db in dbs if re.match(db_filter_hdr, db)]
return dbs
http.db_filter = db_filter
| agpl-3.0 | 7,245,721,539,676,350,000 | 8,752,831,924,597,540,000 | 39.368421 | 78 | 0.627771 | false |
towerjoo/DjangoNotes | Django-1.5.1/tests/modeltests/model_forms/models.py | 44 | 8840 | """
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
from __future__ import unicode_literals
import os
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
temp_storage_dir = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
@python_2_unicode_compatible
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self):
import datetime
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save()
def __str__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
@python_2_unicode_compatible
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
@python_2_unicode_compatible
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __str__(self):
return self.description
try:
# If PIL is available, try testing ImageFields. Checking for the existence
# of Image is enough for CPython, but for PyPy, you need to check for the
# underlying modules If PIL is not available, ImageField tests are omitted.
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image, _imaging
except ImportError:
import Image, _imaging
test_images = True
@python_2_unicode_compatible
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
@python_2_unicode_compatible
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
except ImportError:
test_images = False
@python_2_unicode_compatible
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __str__(self):
return self.field
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, default=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
@python_2_unicode_compatible
class ExplicitPK(models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __str__(self):
return self.key
@python_2_unicode_compatible
class Post(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField()
def __str__(self):
return self.name
class DerivedPost(Post):
pass
@python_2_unicode_compatible
class BigInt(models.Model):
biggie = models.BigIntegerField()
def __str__(self):
return six.text_type(self.biggie)
class MarkupField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super(MarkupField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
class CustomFieldForExclusionModel(models.Model):
name = models.CharField(max_length=10)
markup = MarkupField()
class FlexibleDatePost(models.Model):
title = models.CharField(max_length=50, unique_for_date='posted', blank=True)
slug = models.CharField(max_length=50, unique_for_year='posted', blank=True)
subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True)
posted = models.DateField(blank=True, null=True)
@python_2_unicode_compatible
class Colour(models.Model):
name = models.CharField(max_length=50)
def __iter__(self):
for number in xrange(5):
yield number
def __str__(self):
return self.name
class ColourfulItem(models.Model):
name = models.CharField(max_length=50)
colours = models.ManyToManyField(Colour)
| mit | -1,333,387,139,168,596,000 | -9,075,085,504,755,940,000 | 30.571429 | 95 | 0.669231 | false |
syaiful6/django | django/contrib/gis/geos/prototypes/topology.py | 338 | 2145 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
from ctypes import c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_string,
)
from django.contrib.gis.geos.prototypes.geom import geos_char_p
class Topology(GEOSFuncFactory):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# Topology Routines
geos_boundary = Topology('GEOSBoundary')
geos_buffer = Topology('GEOSBuffer', argtypes=[GEOM_PTR, c_double, c_int])
geos_centroid = Topology('GEOSGetCentroid')
geos_convexhull = Topology('GEOSConvexHull')
geos_difference = Topology('GEOSDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_envelope = Topology('GEOSEnvelope')
geos_intersection = Topology('GEOSIntersection', argtypes=[GEOM_PTR, GEOM_PTR])
geos_linemerge = Topology('GEOSLineMerge')
geos_pointonsurface = Topology('GEOSPointOnSurface')
geos_preservesimplify = Topology('GEOSTopologyPreserveSimplify', argtypes=[GEOM_PTR, c_double])
geos_simplify = Topology('GEOSSimplify', argtypes=[GEOM_PTR, c_double])
geos_symdifference = Topology('GEOSSymDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_union = Topology('GEOSUnion', argtypes=[GEOM_PTR, GEOM_PTR])
geos_cascaded_union = GEOSFuncFactory('GEOSUnionCascaded', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFuncFactory(
'GEOSRelate', argtypes=[GEOM_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
# Linear referencing routines
geos_project = GEOSFuncFactory(
'GEOSProject', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate = Topology('GEOSInterpolate', argtypes=[GEOM_PTR, c_double])
geos_project_normalized = GEOSFuncFactory(
'GEOSProjectNormalized', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate_normalized = Topology('GEOSInterpolateNormalized', argtypes=[GEOM_PTR, c_double])
| bsd-3-clause | -5,856,886,059,290,412,000 | 215,667,910,030,549,340 | 40.25 | 102 | 0.767832 | false |
Endika/mitmproxy | libmproxy/contentviews.py | 1 | 16688 | """
Mitmproxy Content Views
=======================
mitmproxy includes a set of content views which can be used to format/decode/highlight data.
While they are currently used for HTTP message bodies only, the may be used in other contexts
in the future, e.g. to decode protobuf messages sent as WebSocket frames.
Thus, the View API is very minimalistic. The only arguments are `data` and `**metadata`,
where `data` is the actual content (as bytes). The contents on metadata depend on the protocol in
use. For HTTP, the message headers are passed as the ``headers`` keyword argument.
"""
from __future__ import (absolute_import, print_function, division)
import cStringIO
import json
import logging
import subprocess
import sys
import lxml.html
import lxml.etree
import datetime
from PIL import Image
from PIL.ExifTags import TAGS
import html2text
import six
from netlib.odict import ODict
from netlib import encoding
from netlib.utils import clean_bin, hexdump, urldecode, multipartdecode, parse_content_type
from . import utils
from .exceptions import ContentViewException
from .contrib import jsbeautifier
from .contrib.wbxml.ASCommandResponse import ASCommandResponse
try:
import pyamf
from pyamf import remoting, flex
except ImportError: # pragma nocover
pyamf = None
try:
import cssutils
except ImportError: # pragma nocover
cssutils = None
else:
cssutils.log.setLevel(logging.CRITICAL)
cssutils.ser.prefs.keepComments = True
cssutils.ser.prefs.omitLastSemicolon = False
cssutils.ser.prefs.indentClosingBrace = False
cssutils.ser.prefs.validOnly = False
# Default view cutoff *in lines*
VIEW_CUTOFF = 512
KEY_MAX = 30
def format_dict(d):
"""
Helper function that transforms the given dictionary into a list of
("key", key )
("value", value)
tuples, where key is padded to a uniform width.
"""
max_key_len = max(len(k) for k in d.keys())
max_key_len = min(max_key_len, KEY_MAX)
for key, value in d.items():
key += ":"
key = key.ljust(max_key_len + 2)
yield [
("header", key),
("text", value)
]
def format_text(text):
"""
Helper function that transforms bytes into the view output format.
"""
for line in text.splitlines():
yield [("text", line)]
class View(object):
name = None
prompt = ()
content_types = []
def __call__(self, data, **metadata):
"""
Transform raw data into human-readable output.
Args:
data: the data to decode/format as bytes.
metadata: optional keyword-only arguments for metadata. Implementations must not
rely on a given argument being present.
Returns:
A (description, content generator) tuple.
The content generator yields lists of (style, text) tuples, where each list represents
a single line. ``text`` is a unfiltered byte string which may need to be escaped,
depending on the used output.
Caveats:
The content generator must not yield tuples of tuples,
because urwid cannot process that. You have to yield a *list* of tuples per line.
"""
raise NotImplementedError()
class ViewAuto(View):
name = "Auto"
prompt = ("auto", "a")
content_types = []
def __call__(self, data, **metadata):
headers = metadata.get("headers", {})
ctype = headers.get("content-type")
if ctype:
ct = parse_content_type(ctype) if ctype else None
ct = "%s/%s" % (ct[0], ct[1])
if ct in content_types_map:
return content_types_map[ct][0](data, **metadata)
elif utils.isXML(data):
return get("XML")(data, **metadata)
if utils.isMostlyBin(data):
return get("Hex")(data)
return get("Raw")(data)
class ViewRaw(View):
name = "Raw"
prompt = ("raw", "r")
content_types = []
def __call__(self, data, **metadata):
return "Raw", format_text(data)
class ViewHex(View):
name = "Hex"
prompt = ("hex", "e")
content_types = []
@staticmethod
def _format(data):
for offset, hexa, s in hexdump(data):
yield [
("offset", offset + " "),
("text", hexa + " "),
("text", s)
]
def __call__(self, data, **metadata):
return "Hex", self._format(data)
class ViewXML(View):
name = "XML"
prompt = ("xml", "x")
content_types = ["text/xml"]
def __call__(self, data, **metadata):
parser = lxml.etree.XMLParser(
remove_blank_text=True,
resolve_entities=False,
strip_cdata=False,
recover=False
)
try:
document = lxml.etree.fromstring(data, parser)
except lxml.etree.XMLSyntaxError:
return None
docinfo = document.getroottree().docinfo
prev = []
p = document.getroottree().getroot().getprevious()
while p is not None:
prev.insert(
0,
lxml.etree.tostring(p)
)
p = p.getprevious()
doctype = docinfo.doctype
if prev:
doctype += "\n".join(prev).strip()
doctype = doctype.strip()
s = lxml.etree.tostring(
document,
pretty_print=True,
xml_declaration=True,
doctype=doctype or None,
encoding=docinfo.encoding
)
return "XML-like data", format_text(s)
class ViewJSON(View):
name = "JSON"
prompt = ("json", "s")
content_types = ["application/json"]
def __call__(self, data, **metadata):
pretty_json = utils.pretty_json(data)
if pretty_json:
return "JSON", format_text(pretty_json)
class ViewHTML(View):
name = "HTML"
prompt = ("html", "h")
content_types = ["text/html"]
def __call__(self, data, **metadata):
if utils.isXML(data):
parser = lxml.etree.HTMLParser(
strip_cdata=True,
remove_blank_text=True
)
d = lxml.html.fromstring(data, parser=parser)
docinfo = d.getroottree().docinfo
s = lxml.etree.tostring(
d,
pretty_print=True,
doctype=docinfo.doctype,
encoding='utf8'
)
return "HTML", format_text(s)
class ViewHTMLOutline(View):
name = "HTML Outline"
prompt = ("html outline", "o")
content_types = ["text/html"]
def __call__(self, data, **metadata):
data = data.decode("utf-8")
h = html2text.HTML2Text(baseurl="")
h.ignore_images = True
h.body_width = 0
outline = h.handle(data)
return "HTML Outline", format_text(outline)
class ViewURLEncoded(View):
name = "URL-encoded"
prompt = ("urlencoded", "u")
content_types = ["application/x-www-form-urlencoded"]
def __call__(self, data, **metadata):
d = urldecode(data)
return "URLEncoded form", format_dict(ODict(d))
class ViewMultipart(View):
name = "Multipart Form"
prompt = ("multipart", "m")
content_types = ["multipart/form-data"]
@staticmethod
def _format(v):
yield [("highlight", "Form data:\n")]
for message in format_dict(ODict(v)):
yield message
def __call__(self, data, **metadata):
headers = metadata.get("headers", {})
v = multipartdecode(headers, data)
if v:
return "Multipart form", self._format(v)
if pyamf:
class DummyObject(dict):
def __init__(self, alias):
dict.__init__(self)
def __readamf__(self, input):
data = input.readObject()
self["data"] = data
def pyamf_class_loader(s):
for i in pyamf.CLASS_LOADERS:
if i != pyamf_class_loader:
v = i(s)
if v:
return v
return DummyObject
pyamf.register_class_loader(pyamf_class_loader)
class ViewAMF(View):
name = "AMF"
prompt = ("amf", "f")
content_types = ["application/x-amf"]
def unpack(self, b, seen=set([])):
if hasattr(b, "body"):
return self.unpack(b.body, seen)
if isinstance(b, DummyObject):
if id(b) in seen:
return "<recursion>"
else:
seen.add(id(b))
for k, v in b.items():
b[k] = self.unpack(v, seen)
return b
elif isinstance(b, dict):
for k, v in b.items():
b[k] = self.unpack(v, seen)
return b
elif isinstance(b, list):
return [self.unpack(i) for i in b]
elif isinstance(b, datetime.datetime):
return str(b)
elif isinstance(b, flex.ArrayCollection):
return [self.unpack(i, seen) for i in b]
else:
return b
def _format(self, envelope):
for target, message in iter(envelope):
if isinstance(message, pyamf.remoting.Request):
yield [
("header", "Request: "),
("text", str(target)),
]
else:
yield [
("header", "Response: "),
("text", "%s, code %s" % (target, message.status)),
]
s = json.dumps(self.unpack(message), indent=4)
for msg in format_text(s):
yield msg
def __call__(self, data, **metadata):
envelope = remoting.decode(data, strict=False)
if envelope:
return "AMF v%s" % envelope.amfVersion, self._format(envelope)
class ViewJavaScript(View):
name = "JavaScript"
prompt = ("javascript", "j")
content_types = [
"application/x-javascript",
"application/javascript",
"text/javascript"
]
def __call__(self, data, **metadata):
opts = jsbeautifier.default_options()
opts.indent_size = 2
res = jsbeautifier.beautify(data, opts)
return "JavaScript", format_text(res)
class ViewCSS(View):
name = "CSS"
prompt = ("css", "c")
content_types = [
"text/css"
]
def __call__(self, data, **metadata):
if cssutils:
sheet = cssutils.parseString(data)
beautified = sheet.cssText
else:
beautified = data
return "CSS", format_text(beautified)
class ViewImage(View):
name = "Image"
prompt = ("image", "i")
content_types = [
"image/png",
"image/jpeg",
"image/gif",
"image/vnd.microsoft.icon",
"image/x-icon",
]
def __call__(self, data, **metadata):
try:
img = Image.open(cStringIO.StringIO(data))
except IOError:
return None
parts = [
("Format", str(img.format_description)),
("Size", "%s x %s px" % img.size),
("Mode", str(img.mode)),
]
for i in sorted(img.info.keys()):
if i != "exif":
parts.append(
(str(i), str(img.info[i]))
)
if hasattr(img, "_getexif"):
ex = img._getexif()
if ex:
for i in sorted(ex.keys()):
tag = TAGS.get(i, i)
parts.append(
(str(tag), str(ex[i]))
)
fmt = format_dict(ODict(parts))
return "%s image" % img.format, fmt
class ViewProtobuf(View):
"""Human friendly view of protocol buffers
The view uses the protoc compiler to decode the binary
"""
name = "Protocol Buffer"
prompt = ("protobuf", "p")
content_types = [
"application/x-protobuf",
"application/x-protobuffer",
]
@staticmethod
def is_available():
try:
p = subprocess.Popen(
["protoc", "--version"],
stdout=subprocess.PIPE
)
out, _ = p.communicate()
return out.startswith("libprotoc")
except:
return False
def decode_protobuf(self, content):
# if Popen raises OSError, it will be caught in
# get_content_view and fall back to Raw
p = subprocess.Popen(['protoc', '--decode_raw'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate(input=content)
if out:
return out
else:
return err
def __call__(self, data, **metadata):
decoded = self.decode_protobuf(data)
return "Protobuf", format_text(decoded)
class ViewWBXML(View):
name = "WBXML"
prompt = ("wbxml", "w")
content_types = [
"application/vnd.wap.wbxml",
"application/vnd.ms-sync.wbxml"
]
def __call__(self, data, **metadata):
try:
parser = ASCommandResponse(data)
parsedContent = parser.xmlString
if parsedContent:
return "WBXML", format_text(parsedContent)
except:
return None
views = []
content_types_map = {}
view_prompts = []
def get(name):
for i in views:
if i.name == name:
return i
def get_by_shortcut(c):
for i in views:
if i.prompt[1] == c:
return i
def add(view):
# TODO: auto-select a different name (append an integer?)
for i in views:
if i.name == view.name:
raise ContentViewException("Duplicate view: " + view.name)
# TODO: the UI should auto-prompt for a replacement shortcut
for prompt in view_prompts:
if prompt[1] == view.prompt[1]:
raise ContentViewException("Duplicate view shortcut: " + view.prompt[1])
views.append(view)
for ct in view.content_types:
l = content_types_map.setdefault(ct, [])
l.append(view)
view_prompts.append(view.prompt)
def remove(view):
for ct in view.content_types:
l = content_types_map.setdefault(ct, [])
l.remove(view)
if not len(l):
del content_types_map[ct]
view_prompts.remove(view.prompt)
views.remove(view)
add(ViewAuto())
add(ViewRaw())
add(ViewHex())
add(ViewJSON())
add(ViewXML())
add(ViewWBXML())
add(ViewHTML())
add(ViewHTMLOutline())
add(ViewJavaScript())
add(ViewCSS())
add(ViewURLEncoded())
add(ViewMultipart())
add(ViewImage())
if pyamf:
add(ViewAMF())
if ViewProtobuf.is_available():
add(ViewProtobuf())
def safe_to_print(lines, encoding="utf8"):
"""
Wraps a content generator so that each text portion is a *safe to print* unicode string.
"""
for line in lines:
clean_line = []
for (style, text) in line:
try:
text = clean_bin(text.decode(encoding, "strict"))
except UnicodeDecodeError:
text = clean_bin(text).decode(encoding, "strict")
clean_line.append((style, text))
yield clean_line
def get_content_view(viewmode, data, **metadata):
"""
Args:
viewmode: the view to use.
data, **metadata: arguments passed to View instance.
Returns:
A (description, content generator) tuple.
In contrast to calling the views directly, text is always safe-to-print unicode.
Raises:
ContentViewException, if the content view threw an error.
"""
if not data:
return "No content", []
msg = []
headers = metadata.get("headers", {})
enc = headers.get("content-encoding")
if enc and enc != "identity":
decoded = encoding.decode(enc, data)
if decoded:
data = decoded
msg.append("[decoded %s]" % enc)
try:
ret = viewmode(data, **metadata)
# Third-party viewers can fail in unexpected ways...
except Exception as e:
six.reraise(
ContentViewException,
ContentViewException(str(e)),
sys.exc_info()[2]
)
if not ret:
ret = get("Raw")(data, **metadata)
msg.append("Couldn't parse: falling back to Raw")
else:
msg.append(ret[0])
return " ".join(msg), safe_to_print(ret[1])
| mit | -3,668,204,398,155,162,000 | -6,860,563,466,854,113,000 | 26.583471 | 98 | 0.54704 | false |
airspeed-velocity/asv | asv/plugins/virtualenv.py | 2 | 5810 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from distutils.version import LooseVersion
import sys
import re
import os
import six
from .. import environment
from ..console import log
from .. import util
WIN = (os.name == "nt")
class Virtualenv(environment.Environment):
"""
Manage an environment using virtualenv.
"""
tool_name = "virtualenv"
def __init__(self, conf, python, requirements, tagged_env_vars):
"""
Parameters
----------
conf : Config instance
python : str
Version of Python. Must be of the form "MAJOR.MINOR".
executable : str
Path to Python executable.
requirements : dict
Dictionary mapping a PyPI package name to a version
identifier string.
"""
executable = Virtualenv._find_python(python)
if executable is None:
raise environment.EnvironmentUnavailable(
"No executable found for python {0}".format(python))
self._executable = executable
self._python = python
self._requirements = requirements
super(Virtualenv, self).__init__(conf,
python,
requirements,
tagged_env_vars)
try:
import virtualenv
except ImportError:
raise environment.EnvironmentUnavailable(
"virtualenv package not installed")
@staticmethod
def _find_python(python):
"""Find Python executable for the given Python version"""
is_pypy = python.startswith("pypy")
# Parse python specifier
if is_pypy:
executable = python
if python == 'pypy':
python_version = '2'
else:
python_version = python[4:]
else:
python_version = python
executable = "python{0}".format(python_version)
# Find Python executable on path
try:
return util.which(executable)
except IOError:
pass
# Maybe the current one is correct?
current_is_pypy = hasattr(sys, 'pypy_version_info')
current_versions = ['{0[0]}'.format(sys.version_info),
'{0[0]}.{0[1]}'.format(sys.version_info)]
if is_pypy == current_is_pypy and python_version in current_versions:
return sys.executable
return None
@property
def name(self):
"""
Get a name to uniquely identify this environment.
"""
python = self._python
if self._python.startswith('pypy'):
# get_env_name adds py-prefix
python = python[2:]
return environment.get_env_name(self.tool_name,
python,
self._requirements,
self._tagged_env_vars)
@classmethod
def matches(self, python):
if not (re.match(r'^[0-9].*$', python) or re.match(r'^pypy[0-9.]*$', python)):
# The python name should be a version number, or pypy+number
return False
try:
import virtualenv
except ImportError:
return False
else:
if LooseVersion(virtualenv.__version__) == LooseVersion('1.11.0'):
log.warning(
"asv is not compatible with virtualenv 1.11 due to a bug in "
"setuptools.")
if LooseVersion(virtualenv.__version__) < LooseVersion('1.10'):
log.warning(
"If using virtualenv, it much be at least version 1.10")
executable = Virtualenv._find_python(python)
return executable is not None
def _setup(self):
"""
Setup the environment on disk using virtualenv.
Then, all of the requirements are installed into
it using `pip install`.
"""
env = dict(os.environ)
env.update(self.build_env_vars)
log.info("Creating virtualenv for {0}".format(self.name))
util.check_call([
sys.executable,
"-mvirtualenv",
"-p",
self._executable,
self._path], env=env)
log.info("Installing requirements for {0}".format(self.name))
self._install_requirements()
def _install_requirements(self):
if sys.version_info[:2] == (3, 2):
pip_args = ['install', '-v', 'wheel<0.29.0', 'pip<8']
else:
pip_args = ['install', '-v', 'wheel', 'pip>=8']
env = dict(os.environ)
env.update(self.build_env_vars)
self._run_pip(pip_args, env=env)
if self._requirements:
args = ['install', '-v', '--upgrade']
for key, val in six.iteritems(self._requirements):
pkg = key
if key.startswith('pip+'):
pkg = key[4:]
if val:
args.append("{0}=={1}".format(pkg, val))
else:
args.append(pkg)
self._run_pip(args, timeout=self._install_timeout, env=env)
def _run_pip(self, args, **kwargs):
# Run pip via python -m pip, so that it works on Windows when
# upgrading pip itself, and avoids shebang length limit on Linux
return self.run_executable('python', ['-mpip'] + list(args), **kwargs)
def run(self, args, **kwargs):
log.debug("Running '{0}' in {1}".format(' '.join(args), self.name))
return self.run_executable('python', args, **kwargs)
| bsd-3-clause | -8,751,004,389,436,242,000 | -1,458,432,531,432,022,800 | 31.099448 | 86 | 0.53494 | false |
akirk/youtube-dl | youtube_dl/extractor/pyvideo.py | 158 | 1983 | from __future__ import unicode_literals
import re
import os
from .common import InfoExtractor
class PyvideoIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?pyvideo\.org/video/(?P<id>\d+)/(.*)'
_TESTS = [
{
'url': 'http://pyvideo.org/video/1737/become-a-logging-expert-in-30-minutes',
'md5': 'de317418c8bc76b1fd8633e4f32acbc6',
'info_dict': {
'id': '24_4WWkSmNo',
'ext': 'mp4',
'title': 'Become a logging expert in 30 minutes',
'description': 'md5:9665350d466c67fb5b1598de379021f7',
'upload_date': '20130320',
'uploader': 'NextDayVideo',
'uploader_id': 'NextDayVideo',
},
'add_ie': ['Youtube'],
},
{
'url': 'http://pyvideo.org/video/2542/gloriajw-spotifywitherikbernhardsson182m4v',
'md5': '5fe1c7e0a8aa5570330784c847ff6d12',
'info_dict': {
'id': '2542',
'ext': 'm4v',
'title': 'Gloriajw-SpotifyWithErikBernhardsson182',
},
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', webpage)
if m_youtube is not None:
return self.url_result(m_youtube.group(1), 'Youtube')
title = self._html_search_regex(
r'<div class="section">\s*<h3(?:\s+class="[^"]*"[^>]*)?>([^>]+?)</h3>',
webpage, 'title', flags=re.DOTALL)
video_url = self._search_regex(
[r'<source src="(.*?)"', r'<dt>Download</dt>.*?<a href="(.+?)"'],
webpage, 'video url', flags=re.DOTALL)
return {
'id': video_id,
'title': os.path.splitext(title)[0],
'url': video_url,
}
| unlicense | 7,609,911,477,288,226,000 | -1,051,740,499,629,365,400 | 32.610169 | 94 | 0.501261 | false |
armando-migliaccio/tempest | tempest/api/compute/images/test_images_oneserver_negative.py | 1 | 6602 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import clients
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.test import attr
from tempest.test import skip_because
LOG = logging.getLogger(__name__)
class ImagesOneServerNegativeTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
def tearDown(self):
"""Terminate test instances created after a test is executed."""
for image_id in self.image_ids:
self.client.delete_image(image_id)
self.image_ids.remove(image_id)
super(ImagesOneServerNegativeTestJSON, self).tearDown()
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
super(ImagesOneServerNegativeTestJSON, self).setUp()
# Check if the server is in a clean state after test
try:
self.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
except Exception as exc:
LOG.exception(exc)
# Rebuild server if cannot reach the ACTIVE state
# Usually it means the server had a serius accident
self._reset_server()
def _reset_server(self):
self.__class__.server_id = self.rebuild_server(self.server_id)
@classmethod
def setUpClass(cls):
super(ImagesOneServerNegativeTestJSON, cls).setUpClass()
cls.client = cls.images_client
if not cls.config.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
try:
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
except Exception:
cls.tearDownClass()
raise
cls.image_ids = []
if cls.multi_user:
if cls.config.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.images_client
@skip_because(bug="1006725")
@attr(type=['negative', 'gate'])
def test_create_image_specify_multibyte_character_image_name(self):
# Return an error if the image name has multi-byte characters
snapshot_name = data_utils.rand_name('\xef\xbb\xbf')
self.assertRaises(exceptions.BadRequest,
self.client.create_image, self.server_id,
snapshot_name)
@attr(type=['negative', 'gate'])
def test_create_image_specify_invalid_metadata(self):
# Return an error when creating image with invalid metadata
snapshot_name = data_utils.rand_name('test-snap-')
meta = {'': ''}
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server_id, snapshot_name, meta)
@attr(type=['negative', 'gate'])
def test_create_image_specify_metadata_over_limits(self):
# Return an error when creating image with meta data over 256 chars
snapshot_name = data_utils.rand_name('test-snap-')
meta = {'a' * 260: 'b' * 260}
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server_id, snapshot_name, meta)
@attr(type=['negative', 'gate'])
def test_create_second_image_when_first_image_is_being_saved(self):
# Disallow creating another image when first image is being saved
# Create first snapshot
snapshot_name = data_utils.rand_name('test-snap-')
resp, body = self.client.create_image(self.server_id,
snapshot_name)
self.assertEqual(202, resp.status)
image_id = data_utils.parse_image_id(resp['location'])
self.image_ids.append(image_id)
self.addCleanup(self._reset_server)
# Create second snapshot
alt_snapshot_name = data_utils.rand_name('test-snap-')
self.assertRaises(exceptions.Conflict, self.client.create_image,
self.server_id, alt_snapshot_name)
@attr(type=['negative', 'gate'])
def test_create_image_specify_name_over_256_chars(self):
# Return an error if snapshot name over 256 characters is passed
snapshot_name = data_utils.rand_name('a' * 260)
self.assertRaises(exceptions.BadRequest, self.client.create_image,
self.server_id, snapshot_name)
@attr(type=['negative', 'gate'])
def test_delete_image_that_is_not_yet_active(self):
# Return an error while trying to delete an image what is creating
snapshot_name = data_utils.rand_name('test-snap-')
resp, body = self.client.create_image(self.server_id, snapshot_name)
self.assertEqual(202, resp.status)
image_id = data_utils.parse_image_id(resp['location'])
self.image_ids.append(image_id)
self.addCleanup(self._reset_server)
# Do not wait, attempt to delete the image, ensure it's successful
resp, body = self.client.delete_image(image_id)
self.assertEqual('204', resp['status'])
self.image_ids.remove(image_id)
self.assertRaises(exceptions.NotFound, self.client.get_image, image_id)
class ImagesOneServerNegativeTestXML(ImagesOneServerNegativeTestJSON):
_interface = 'xml'
| apache-2.0 | -3,820,259,634,987,758,600 | -5,123,151,596,448,384,000 | 41.050955 | 79 | 0.63193 | false |
bennojoy/ansible | v1/ansible/runner/__init__.py | 77 | 69625 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import multiprocessing
import signal
import os
import pwd
import Queue
import random
import traceback
import tempfile
import time
import collections
import socket
import base64
import sys
import pipes
import jinja2
import subprocess
import getpass
import ansible.constants as C
import ansible.inventory
from ansible import utils
from ansible.utils import template
from ansible.utils import check_conditional
from ansible.utils import string_functions
from ansible import errors
from ansible import module_common
import poller
import connection
from return_data import ReturnData
from ansible.callbacks import DefaultRunnerCallbacks, vv
from ansible.module_common import ModuleReplacer
from ansible.module_utils.splitter import split_args, unquote
from ansible.cache import FactCache
from ansible.utils import update_hash
module_replacer = ModuleReplacer(strip_comments=False)
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
HAS_ATFORK=True
try:
from Crypto.Random import atfork
except ImportError:
HAS_ATFORK=False
multiprocessing_runner = None
OUTPUT_LOCKFILE = tempfile.TemporaryFile()
PROCESS_LOCKFILE = tempfile.TemporaryFile()
################################################
def _executor_hook(job_queue, result_queue, new_stdin):
# attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
# this function also not present in CentOS 6
if HAS_ATFORK:
atfork()
signal.signal(signal.SIGINT, signal.SIG_IGN)
while not job_queue.empty():
try:
host = job_queue.get(block=False)
return_data = multiprocessing_runner._executor(host, new_stdin)
result_queue.put(return_data)
except Queue.Empty:
pass
except:
traceback.print_exc()
class HostVars(dict):
''' A special view of vars_cache that adds values from the inventory when needed. '''
def __init__(self, vars_cache, inventory, vault_password=None):
self.vars_cache = vars_cache
self.inventory = inventory
self.lookup = {}
self.update(vars_cache)
self.vault_password = vault_password
def __getitem__(self, host):
if host not in self.lookup:
result = self.inventory.get_variables(host, vault_password=self.vault_password).copy()
result.update(self.vars_cache.get(host, {}))
self.lookup[host] = template.template('.', result, self.vars_cache)
return self.lookup[host]
class Runner(object):
''' core API interface to ansible '''
# see bin/ansible for how this is used...
def __init__(self,
host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
module_path=None, # ex: /usr/share/ansible
module_name=C.DEFAULT_MODULE_NAME, # ex: copy
module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b"
forks=C.DEFAULT_FORKS, # parallelism level
timeout=C.DEFAULT_TIMEOUT, # SSH timeout
pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org'
remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
remote_port=None, # if SSH on different ports
private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
background=0, # async poll every X seconds, else 0 for non-async
basedir=None, # directory of playbook, if applicable
setup_cache=None, # used to share fact data w/ other tasks
vars_cache=None, # used to store variables about hosts
transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
conditional='True', # run only if this fact expression evals to true
callbacks=None, # used for output
module_vars=None, # a playbooks internals thing
play_vars=None, #
play_file_vars=None, #
role_vars=None, #
role_params=None, #
default_vars=None, #
extra_vars=None, # extra vars specified with he playbook(s)
is_playbook=False, # running from playbook or not?
inventory=None, # reference to Inventory object
subset=None, # subset pattern
check=False, # don't make any changes, just try to probe for potential changes
diff=False, # whether to show diffs for template files that change
environment=None, # environment variables (as dict) to use inside the command
complex_args=None, # structured data in addition to module_args, must be a dict
error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False
accelerate=False, # use accelerated connection
accelerate_ipv6=False, # accelerated connection w/ IPv6
accelerate_port=None, # port to use with accelerated connection
vault_pass=None,
run_hosts=None, # an optional list of pre-calculated hosts to run on
no_log=False, # option to enable/disable logging for a given task
run_once=False, # option to enable/disable host bypass loop for a given task
become=False, # whether to run privilege escalation or not
become_method=C.DEFAULT_BECOME_METHOD,
become_user=C.DEFAULT_BECOME_USER, # ex: 'root'
become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None
become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo
):
# used to lock multiprocess inputs and outputs at various levels
self.output_lockfile = OUTPUT_LOCKFILE
self.process_lockfile = PROCESS_LOCKFILE
if not complex_args:
complex_args = {}
# storage & defaults
self.check = check
self.diff = diff
self.setup_cache = utils.default(setup_cache, lambda: ansible.cache.FactCache())
self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
self.basedir = utils.default(basedir, lambda: os.getcwd())
self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
self.generated_jid = str(random.randint(0, 999999999999))
self.transport = transport
self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list))
self.module_vars = utils.default(module_vars, lambda: {})
self.play_vars = utils.default(play_vars, lambda: {})
self.play_file_vars = utils.default(play_file_vars, lambda: {})
self.role_vars = utils.default(role_vars, lambda: {})
self.role_params = utils.default(role_params, lambda: {})
self.default_vars = utils.default(default_vars, lambda: {})
self.extra_vars = utils.default(extra_vars, lambda: {})
self.always_run = None
self.connector = connection.Connector(self)
self.conditional = conditional
self.delegate_to = None
self.module_name = module_name
self.forks = int(forks)
self.pattern = pattern
self.module_args = module_args
self.timeout = timeout
self.remote_user = remote_user
self.remote_pass = remote_pass
self.remote_port = remote_port
self.private_key_file = private_key_file
self.background = background
self.become = become
self.become_method = become_method
self.become_user_var = become_user
self.become_user = None
self.become_pass = become_pass
self.become_exe = become_exe
self.is_playbook = is_playbook
self.environment = environment
self.complex_args = complex_args
self.error_on_undefined_vars = error_on_undefined_vars
self.accelerate = accelerate
self.accelerate_port = accelerate_port
self.accelerate_ipv6 = accelerate_ipv6
self.callbacks.runner = self
self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
self.vault_pass = vault_pass
self.no_log = no_log
self.run_once = run_once
if self.transport == 'smart':
# If the transport is 'smart', check to see if certain conditions
# would prevent us from using ssh, and fallback to paramiko.
# 'smart' is the default since 1.2.1/1.3
self.transport = "ssh"
if sys.platform.startswith('darwin') and self.remote_pass:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
self.transport = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if "Bad configuration option" in err:
self.transport = "paramiko"
# save the original transport, in case it gets
# changed later via options like accelerate
self.original_transport = self.transport
# misc housekeeping
if subset and self.inventory._subset is None:
# don't override subset when passed from playbook
self.inventory.subset(subset)
# If we get a pre-built list of hosts to run on, from say a playbook, use them.
# Also where we will store the hosts to run on once discovered
self.run_hosts = run_hosts
if self.transport == 'local':
self.remote_user = pwd.getpwuid(os.geteuid())[0]
if module_path is not None:
for i in module_path.split(os.pathsep):
utils.plugins.module_finder.add_directory(i)
utils.plugins.push_basedir(self.basedir)
# ensure we are using unique tmp paths
random.seed()
# *****************************************************
def _complex_args_hack(self, complex_args, module_args):
"""
ansible-playbook both allows specifying key=value string arguments and complex arguments
however not all modules use our python common module system and cannot
access these. An example might be a Bash module. This hack allows users to still pass "args"
as a hash of simple scalars to those arguments and is short term. We could technically
just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented
it does mean values in 'args' have LOWER priority than those on the key=value line, allowing
args to provide yet another way to have pluggable defaults.
"""
if complex_args is None:
return module_args
if not isinstance(complex_args, dict):
raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args)
for (k,v) in complex_args.iteritems():
if isinstance(v, basestring):
module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
return module_args
# *****************************************************
def _transfer_str(self, conn, tmp, name, data):
''' transfer string to remote file '''
if type(data) == dict:
data = utils.jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
if not isinstance(data, unicode):
#ensure the data is valid UTF-8
data.decode('utf-8')
else:
data = data.encode('utf-8')
afo.write(data)
except:
raise errors.AnsibleError("failure encoding into utf-8")
afo.flush()
afo.close()
remote = conn.shell.join_path(tmp, name)
try:
conn.put_file(afile, remote)
finally:
os.unlink(afile)
return remote
# *****************************************************
def _compute_environment_string(self, conn, inject=None):
''' what environment variables to use when running the command? '''
enviro = {}
if self.environment:
enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
enviro = utils.safe_eval(enviro)
if type(enviro) != dict:
raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
return conn.shell.env_prefix(**enviro)
# *****************************************************
def _compute_delegate(self, password, remote_inject):
""" Build a dictionary of all attributes for the delegate host """
delegate = {}
# allow delegated host to be templated
delegate['inject'] = remote_inject.copy()
# set any interpreters
interpreters = []
for i in delegate['inject']:
if i.startswith("ansible_") and i.endswith("_interpreter"):
interpreters.append(i)
for i in interpreters:
del delegate['inject'][i]
port = C.DEFAULT_REMOTE_PORT
# get the vars for the delegate by its name
try:
this_info = delegate['inject']['hostvars'][self.delegate_to]
except:
# make sure the inject is empty for non-inventory hosts
this_info = {}
# get the real ssh_address for the delegate
# and allow ansible_ssh_host to be templated
delegate['ssh_host'] = template.template(
self.basedir,
this_info.get('ansible_ssh_host', self.delegate_to),
this_info,
fail_on_undefined=True
)
delegate['port'] = this_info.get('ansible_ssh_port', port)
delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject'])
delegate['pass'] = this_info.get('ansible_ssh_pass', password)
delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file)
delegate['transport'] = this_info.get('ansible_connection', self.transport)
delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass))
# Last chance to get private_key_file from global variables.
# this is useful if delegated host is not defined in the inventory
if delegate['private_key_file'] is None:
delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
if delegate['private_key_file'] is not None:
delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
for i in this_info:
if i.startswith("ansible_") and i.endswith("_interpreter"):
delegate['inject'][i] = this_info[i]
return delegate
def _compute_delegate_user(self, host, inject):
""" Calculate the remote user based on an order of preference """
# inventory > playbook > original_host
actual_user = inject.get('ansible_ssh_user', self.remote_user)
thisuser = None
try:
if host in inject['hostvars']:
if inject['hostvars'][host].get('ansible_ssh_user'):
# user for delegate host in inventory
thisuser = inject['hostvars'][host].get('ansible_ssh_user')
else:
# look up the variables for the host directly from inventory
host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
if 'ansible_ssh_user' in host_vars:
thisuser = host_vars['ansible_ssh_user']
except errors.AnsibleError, e:
# the hostname was not found in the inventory, so
# we just ignore this and try the next method
pass
if thisuser is None and self.remote_user:
# user defined by play/runner
thisuser = self.remote_user
if thisuser is not None:
actual_user = thisuser
else:
# fallback to the inventory user of the play host
#actual_user = inject.get('ansible_ssh_user', actual_user)
actual_user = inject.get('ansible_ssh_user', self.remote_user)
return actual_user
def _count_module_args(self, args, allow_dupes=False):
'''
Count the number of k=v pairs in the supplied module args. This is
basically a specialized version of parse_kv() from utils with a few
minor changes.
'''
options = {}
if args is not None:
try:
vargs = split_args(args)
except Exception, e:
if "unbalanced jinja2 block or quotes" in str(e):
raise errors.AnsibleError("error parsing argument string '%s', try quoting the entire line." % args)
else:
raise
for x in vargs:
quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
if "=" in x and not quoted:
k, v = x.split("=",1)
is_shell_module = self.module_name in ('command', 'shell')
is_shell_param = k in ('creates', 'removes', 'chdir', 'executable')
if k in options and not allow_dupes:
if not(is_shell_module and not is_shell_param):
raise errors.AnsibleError("a duplicate parameter was found in the argument string (%s)" % k)
if is_shell_module and is_shell_param or not is_shell_module:
options[k] = v
return len(options)
# *****************************************************
def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True):
''' transfer and run a module along with its arguments on the remote side'''
# hack to support fireball mode
if module_name == 'fireball':
args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
if 'port' not in args:
args += " port=%s" % C.ZEROMQ_PORT
(
module_style,
shebang,
module_data
) = self._configure_module(conn, module_name, args, inject, complex_args)
# a remote tmp path may be necessary and not already created
if self._late_needs_tmp_path(conn, tmp, module_style):
tmp = self._make_tmp_path(conn)
remote_module_path = conn.shell.join_path(tmp, module_name)
if (module_style != 'new'
or async_jid is not None
or not conn.has_pipelining
or not C.ANSIBLE_SSH_PIPELINING
or C.DEFAULT_KEEP_REMOTE_FILES
or self.become_method == 'su'):
self._transfer_str(conn, tmp, module_name, module_data)
environment_string = self._compute_environment_string(conn, inject)
if "tmp" in tmp and (self.become and self.become_user != 'root'):
# deal with possible umask issues once you become another user
self._remote_chmod(conn, 'a+r', remote_module_path, tmp)
cmd = ""
in_data = None
if module_style != 'new':
if 'CHECKMODE=True' in args:
# if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to
# do --check mode, so to be safe we will not run it.
return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules"))
elif 'NO_LOG' in args:
return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules"))
args = template.template(self.basedir, args, inject)
# decide whether we need to transfer JSON or key=value
argsfile = None
if module_style == 'non_native_want_json':
if complex_args:
complex_args.update(utils.parse_kv(args))
argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args))
else:
argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args)))
else:
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
if self.become and self.become_user != 'root':
# deal with possible umask issues once become another user
self._remote_chmod(conn, 'a+r', argsfile, tmp)
if async_jid is None:
cmd = "%s %s" % (remote_module_path, argsfile)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
else:
if async_jid is None:
if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su':
in_data = module_data
else:
cmd = "%s" % (remote_module_path)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
if not shebang:
raise errors.AnsibleError("module is missing interpreter line")
rm_tmp = None
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self.become or self.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the become_user
sudoable = False
res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data)
if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self.become and self.become_user != 'root':
# not becoming root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = conn.shell.remove(tmp, recurse=True)
self._low_level_exec_command(conn, cmd2, tmp, sudoable=False)
data = utils.parse_json(res['stdout'], from_remote=True, no_exceptions=True)
if 'parsed' in data and data['parsed'] == False:
data['msg'] += res['stderr']
return ReturnData(conn=conn, result=data)
# *****************************************************
def _executor(self, host, new_stdin):
''' handler for multiprocessing library '''
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
try:
self._new_stdin = new_stdin
if not new_stdin and fileno is not None:
try:
self._new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
exec_rc = self._executor_internal(host, new_stdin)
if type(exec_rc) != ReturnData:
raise Exception("unexpected return type: %s" % type(exec_rc))
# redundant, right?
if not exec_rc.comm_ok:
self.callbacks.on_unreachable(host, exec_rc.result)
return exec_rc
except errors.AnsibleError, ae:
msg = str(ae)
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
except Exception:
msg = traceback.format_exc()
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
# *****************************************************
def get_combined_cache(self):
# merge the VARS and SETUP caches for this host
combined_cache = self.setup_cache.copy()
return utils.merge_hash(combined_cache, self.vars_cache)
def get_inject_vars(self, host):
host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
combined_cache = self.get_combined_cache()
# use combined_cache and host_variables to template the module_vars
# we update the inject variables with the data we're about to template
# since some of the variables we'll be replacing may be contained there too
module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {}))
module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject)
module_vars = template.template(self.basedir, self.module_vars, module_vars_inject)
# remove bad variables from the module vars, which may be in there due
# the way role declarations are specified in playbooks
if 'tags' in module_vars:
del module_vars['tags']
if 'when' in module_vars:
del module_vars['when']
# start building the dictionary of injected variables
inject = {}
# default vars are the lowest priority
inject = utils.combine_vars(inject, self.default_vars)
# next come inventory variables for the host
inject = utils.combine_vars(inject, host_variables)
# then the setup_cache which contains facts gathered
inject = utils.combine_vars(inject, self.setup_cache.get(host, {}))
# next come variables from vars and vars files
inject = utils.combine_vars(inject, self.play_vars)
inject = utils.combine_vars(inject, self.play_file_vars)
# next come variables from role vars/main.yml files
inject = utils.combine_vars(inject, self.role_vars)
# then come the module variables
inject = utils.combine_vars(inject, module_vars)
# followed by vars_cache things (set_fact, include_vars, and
# vars_files which had host-specific templating done)
inject = utils.combine_vars(inject, self.vars_cache.get(host, {}))
# role parameters next
inject = utils.combine_vars(inject, self.role_params)
# and finally -e vars are the highest priority
inject = utils.combine_vars(inject, self.extra_vars)
# and then special vars
inject.setdefault('ansible_ssh_user', self.remote_user)
inject['group_names'] = host_variables.get('group_names', [])
inject['groups'] = self.inventory.groups_list()
inject['vars'] = self.module_vars
inject['defaults'] = self.default_vars
inject['environment'] = self.environment
inject['playbook_dir'] = os.path.abspath(self.basedir)
inject['omit'] = self.omit_token
inject['combined_cache'] = combined_cache
return inject
def _executor_internal(self, host, new_stdin):
''' executes any module one or more times '''
# We build the proper injected dictionary for all future
# templating operations in this run
inject = self.get_inject_vars(host)
# Then we selectively merge some variable dictionaries down to a
# single dictionary, used to template the HostVars for this host
temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
temp_vars = utils.combine_vars(temp_vars, self.play_vars)
temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
# and we save the HostVars in the injected dictionary so they
# may be referenced from playbooks/templates
inject['hostvars'] = hostvars
host_connection = inject.get('ansible_connection', self.transport)
if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]:
port = hostvars.get('ansible_ssh_port', self.remote_port)
if port is None:
port = C.DEFAULT_REMOTE_PORT
else:
# fireball, local, etc
port = self.remote_port
if self.inventory.basedir() is not None:
inject['inventory_dir'] = self.inventory.basedir()
if self.inventory.src() is not None:
inject['inventory_file'] = self.inventory.src()
# could be already set by playbook code
inject.setdefault('ansible_version', utils.version_info(gitinfo=False))
# allow with_foo to work in playbooks...
items = None
items_plugin = self.module_vars.get('items_lookup_plugin', None)
if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
basedir = self.basedir
if '_original_file' in inject:
basedir = os.path.dirname(inject['_original_file'])
filesdir = os.path.join(basedir, '..', 'files')
if os.path.exists(filesdir):
basedir = filesdir
try:
items_terms = self.module_vars.get('items_lookup_terms', '')
items_terms = template.template(basedir, items_terms, inject)
items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject)
except errors.AnsibleUndefinedVariable, e:
if 'has no attribute' in str(e):
# the undefined variable was an attribute of a variable that does
# exist, so try and run this through the conditional check to see
# if the user wanted to skip something on being undefined
if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True):
# the conditional check passed, so we have to fail here
raise
else:
# the conditional failed, so we skip this task
result = utils.jsonify(dict(changed=False, skipped=True))
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, result=result)
except errors.AnsibleError, e:
raise
except Exception, e:
raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e))
# strip out any jinja2 template syntax within
# the data returned by the lookup plugin
items = utils._clean_data_struct(items, from_remote=True)
if items is None:
items = []
else:
if type(items) != list:
raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
if len(items) and utils.is_list_of_strings(items) and self.module_name in ( 'apt', 'yum', 'pkgng', 'zypper', 'dnf' ):
# hack for apt, yum, and pkgng so that with_items maps back into a single module call
use_these_items = []
for x in items:
inject['item'] = x
if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
use_these_items.append(x)
inject['item'] = ",".join(use_these_items)
items = None
def _safe_template_complex_args(args, inject):
# Ensure the complex args here are a dictionary, but
# first template them if they contain a variable
returned_args = args
if isinstance(args, basestring):
# If the complex_args were evaluated to a dictionary and there are
# more keys in the templated version than the evaled version, some
# param inserted additional keys (the template() call also runs
# safe_eval on the var if it looks like it's a datastructure). If the
# evaled_args are not a dict, it's most likely a whole variable (ie.
# args: {{var}}), in which case there's no way to detect the proper
# count of params in the dictionary.
templated_args = template.template(self.basedir, args, inject, convert_bare=True)
evaled_args = utils.safe_eval(args)
if isinstance(evaled_args, dict) and len(evaled_args) > 0 and len(evaled_args) != len(templated_args):
raise errors.AnsibleError("a variable tried to insert extra parameters into the args for this task")
# set the returned_args to the templated_args
returned_args = templated_args
# and a final check to make sure the complex args are a dict
if returned_args is not None and not isinstance(returned_args, dict):
raise errors.AnsibleError("args must be a dictionary, received %s" % returned_args)
return returned_args
# logic to decide how to run things depends on whether with_items is used
if items is None:
complex_args = _safe_template_complex_args(self.complex_args, inject)
return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args)
elif len(items) > 0:
# executing using with_items, so make multiple calls
# TODO: refactor
if self.background > 0:
raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks")
all_comm_ok = True
all_changed = False
all_failed = False
results = []
for x in items:
# use a fresh inject for each item
this_inject = inject.copy()
this_inject['item'] = x
complex_args = _safe_template_complex_args(self.complex_args, this_inject)
result = self._executor_internal_inner(
host,
self.module_name,
self.module_args,
this_inject,
port,
complex_args=complex_args
)
if 'stdout' in result.result and 'stdout_lines' not in result.result:
result.result['stdout_lines'] = result.result['stdout'].splitlines()
results.append(result.result)
if result.comm_ok == False:
all_comm_ok = False
all_failed = True
break
for x in results:
if x.get('changed') == True:
all_changed = True
if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]:
all_failed = True
break
msg = 'All items completed'
if all_failed:
msg = "One or more items failed."
rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
if not all_failed:
del rd_result['failed']
return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
else:
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True))
# *****************************************************
def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None):
''' decides how to invoke a module '''
# late processing of parameterized become_user (with_items,..)
if self.become_user_var is not None:
self.become_user = template.template(self.basedir, self.become_user_var, inject)
# module_name may be dynamic (but cannot contain {{ ansible_ssh_user }})
module_name = template.template(self.basedir, module_name, inject)
if module_name in utils.plugins.action_loader:
if self.background != 0:
raise errors.AnsibleError("async mode is not supported with the %s module" % module_name)
handler = utils.plugins.action_loader.get(module_name, self)
elif self.background == 0:
handler = utils.plugins.action_loader.get('normal', self)
else:
handler = utils.plugins.action_loader.get('async', self)
if type(self.conditional) != list:
self.conditional = [ self.conditional ]
for cond in self.conditional:
if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
result = dict(changed=False, skipped=True)
if self.no_log:
result = utils.censor_unlogged_data(result)
self.callbacks.on_skipped(host, result)
else:
self.callbacks.on_skipped(host, inject.get('item',None))
return ReturnData(host=host, result=utils.jsonify(result))
if getattr(handler, 'setup', None) is not None:
handler.setup(module_name, inject)
conn = None
actual_host = inject.get('ansible_ssh_host', host)
# allow ansible_ssh_host to be templated
actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True)
actual_port = port
actual_user = inject.get('ansible_ssh_user', self.remote_user)
actual_pass = inject.get('ansible_ssh_pass', self.remote_pass)
actual_transport = inject.get('ansible_connection', self.transport)
actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
self.become_method = inject.get('ansible_become_method', self.become_method)
# select default root user in case self.become requested
# but no user specified; happens e.g. in host vars when
# just ansible_become=True is specified
if self.become and self.become_user is None:
self.become_user = 'root'
if actual_private_key_file is not None:
actual_private_key_file = os.path.expanduser(actual_private_key_file)
if self.accelerate and actual_transport != 'local':
#Fix to get the inventory name of the host to accelerate plugin
if inject.get('ansible_ssh_host', None):
self.accelerate_inventory_host = host
else:
self.accelerate_inventory_host = None
# if we're using accelerated mode, force the
# transport to accelerate
actual_transport = "accelerate"
if not self.accelerate_port:
self.accelerate_port = C.ACCELERATE_PORT
actual_port = inject.get('ansible_ssh_port', port)
# the delegated host may have different SSH port configured, etc
# and we need to transfer those, and only those, variables
self.delegate_to = inject.get('delegate_to', None)
if self.delegate_to:
self.delegate_to = template.template(self.basedir, self.delegate_to, inject)
if self.delegate_to is not None:
delegate = self._compute_delegate(actual_pass, inject)
actual_transport = delegate['transport']
actual_host = delegate['ssh_host']
actual_port = delegate['port']
actual_user = delegate['user']
actual_pass = delegate['pass']
actual_private_key_file = delegate['private_key_file']
self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass'))
inject = delegate['inject']
# set resolved delegate_to into inject so modules can call _remote_checksum
inject['delegate_to'] = self.delegate_to
# user/pass may still contain variables at this stage
actual_user = template.template(self.basedir, actual_user, inject)
try:
actual_pass = template.template(self.basedir, actual_pass, inject)
self.become_pass = template.template(self.basedir, self.become_pass, inject)
except:
# ignore password template errors, could be triggered by password charaters #10468
pass
# make actual_user available as __magic__ ansible_ssh_user variable
inject['ansible_ssh_user'] = actual_user
try:
if actual_transport == 'accelerate':
# for accelerate, we stuff both ports into a single
# variable so that we don't have to mangle other function
# calls just to accommodate this one case
actual_port = [actual_port, self.accelerate_port]
elif actual_port is not None:
actual_port = int(template.template(self.basedir, actual_port, inject))
except ValueError, e:
result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port)
return ReturnData(host=host, comm_ok=False, result=result)
try:
if self.delegate_to or host != actual_host:
delegate_host = host
else:
delegate_host = None
conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file, delegate_host)
default_shell = getattr(conn, 'default_shell', '')
shell_type = inject.get('ansible_shell_type')
if not shell_type:
if default_shell:
shell_type = default_shell
else:
shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
shell_plugin = utils.plugins.shell_loader.get(shell_type)
if shell_plugin is None:
shell_plugin = utils.plugins.shell_loader.get('sh')
conn.shell = shell_plugin
except errors.AnsibleConnectionFailed, e:
result = dict(failed=True, msg="FAILED: %s" % str(e))
return ReturnData(host=host, comm_ok=False, result=result)
tmp = ''
# action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir
if self._early_needs_tmp_path(module_name, handler):
tmp = self._make_tmp_path(conn)
# allow module args to work as a dictionary
# though it is usually a string
if isinstance(module_args, dict):
module_args = utils.serialize_args(module_args)
# render module_args and complex_args templates
try:
# When templating module_args, we need to be careful to ensure
# that no variables inadvertently (or maliciously) add params
# to the list of args. We do this by counting the number of k=v
# pairs before and after templating.
num_args_pre = self._count_module_args(module_args, allow_dupes=True)
module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars)
num_args_post = self._count_module_args(module_args)
if num_args_pre != num_args_post:
raise errors.AnsibleError("A variable inserted a new parameter into the module args. " + \
"Be sure to quote variables if they contain equal signs (for example: \"{{var}}\").")
# And we also make sure nothing added in special flags for things
# like the command/shell module (ie. #USE_SHELL)
if '#USE_SHELL' in module_args:
raise errors.AnsibleError("A variable tried to add #USE_SHELL to the module arguments.")
complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars)
except jinja2.exceptions.UndefinedError, e:
raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
# filter omitted arguments out from complex_args
if complex_args:
complex_args = dict(filter(lambda x: x[1] != self.omit_token, complex_args.iteritems()))
# Filter omitted arguments out from module_args.
# We do this with split_args instead of parse_kv to ensure
# that things are not unquoted/requoted incorrectly
args = split_args(module_args)
final_args = []
for arg in args:
if '=' in arg:
k,v = arg.split('=', 1)
if unquote(v) != self.omit_token:
final_args.append(arg)
else:
# not a k=v param, append it
final_args.append(arg)
module_args = ' '.join(final_args)
result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
# Code for do until feature
until = self.module_vars.get('until', None)
if until is not None and result.comm_ok:
inject[self.module_vars.get('register')] = result.result
cond = template.template(self.basedir, until, inject, expand_lists=False)
if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False)
delay = self.module_vars.get('delay')
for x in range(1, int(retries) + 1):
# template the delay, cast to float and sleep
delay = template.template(self.basedir, delay, inject, expand_lists=False)
delay = float(delay)
time.sleep(delay)
tmp = ''
if self._early_needs_tmp_path(module_name, handler):
tmp = self._make_tmp_path(conn)
result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
result.result['attempts'] = x
vv("Result from run %i is: %s" % (x, result.result))
inject[self.module_vars.get('register')] = result.result
cond = template.template(self.basedir, until, inject, expand_lists=False)
if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
break
if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
result.result['failed'] = True
result.result['msg'] = "Task failed as maximum retries was encountered"
else:
result.result['attempts'] = 0
conn.close()
if not result.comm_ok:
# connection or parsing errors...
self.callbacks.on_unreachable(host, result.result)
else:
data = result.result
# https://github.com/ansible/ansible/issues/4958
if hasattr(sys.stdout, "isatty"):
if "stdout" in data and sys.stdout.isatty():
if not string_functions.isprintable(data['stdout']):
data['stdout'] = ''.join(c for c in data['stdout'] if string_functions.isprintable(c))
if 'item' in inject:
result.result['item'] = inject['item']
result.result['invocation'] = dict(
module_args=module_args,
module_name=module_name
)
changed_when = self.module_vars.get('changed_when')
failed_when = self.module_vars.get('failed_when')
if (changed_when is not None or failed_when is not None) and self.background == 0:
register = self.module_vars.get('register')
if register is not None:
if 'stdout' in data:
data['stdout_lines'] = data['stdout'].splitlines()
inject[register] = data
# only run the final checks if the async_status has finished,
# or if we're not running an async_status check at all
if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status':
if changed_when is not None and 'skipped' not in data:
data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
if failed_when is not None and 'skipped' not in data:
data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
if is_chained:
# no callbacks
return result
if 'skipped' in data:
self.callbacks.on_skipped(host, inject.get('item',None))
if self.no_log:
data = utils.censor_unlogged_data(data)
if not result.is_successful():
ignore_errors = self.module_vars.get('ignore_errors', False)
self.callbacks.on_failed(host, data, ignore_errors)
else:
if self.diff:
self.callbacks.on_file_diff(conn.host, result.diff)
self.callbacks.on_ok(host, data)
return result
def _early_needs_tmp_path(self, module_name, handler):
''' detect if a tmp path should be created before the handler is called '''
if module_name in utils.plugins.action_loader:
return getattr(handler, 'TRANSFERS_FILES', False)
# other modules never need tmp path at early stage
return False
def _late_needs_tmp_path(self, conn, tmp, module_style):
if "tmp" in tmp:
# tmp has already been created
return False
if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su':
# tmp is necessary to store module source code
return True
if not conn.has_pipelining:
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
# *****************************************************
def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False,
executable=None, become=False, in_data=None):
''' execute a command string over SSH, return the output '''
# this can be skipped with powershell modules when there is no analog to a Windows command (like chmod)
if cmd:
if executable is None:
executable = C.DEFAULT_EXECUTABLE
become_user = self.become_user
# compare connection user to (su|sudo)_user and disable if the same
# assume connection type is local if no user attribute
this_user = getattr(conn, 'user', getpass.getuser())
if (not become and this_user == become_user):
sudoable = False
become = False
rc, stdin, stdout, stderr = conn.exec_command(cmd,
tmp,
become_user=become_user,
sudoable=sudoable,
executable=executable,
in_data=in_data)
if type(stdout) not in [ str, unicode ]:
out = ''.join(stdout.readlines())
else:
out = stdout
if type(stderr) not in [ str, unicode ]:
err = ''.join(stderr.readlines())
else:
err = stderr
if rc is not None:
return dict(rc=rc, stdout=out, stderr=err)
else:
return dict(stdout=out, stderr=err)
return dict(rc=None, stdout='', stderr='')
# *****************************************************
def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False):
''' issue a remote chmod command '''
cmd = conn.shell.chmod(mode, path)
return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become)
# *****************************************************
def _remote_expand_user(self, conn, path, tmp):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'):
return path
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self.become and self.become_user:
expand_path = '~%s' % self.become_user
cmd = conn.shell.expand_user(expand_path)
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False)
initial_fragment = utils.last_non_blank_line(data['stdout'])
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return conn.shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
# *****************************************************
def _remote_checksum(self, conn, tmp, path, inject):
''' takes a remote checksum and returns 1 if no file '''
# Lookup the python interp from the host or delegate
# host == inven_host when there is no delegate
host = inject['inventory_hostname']
if 'delegate_to' in inject:
delegate = inject['delegate_to']
if delegate:
# host == None when the delegate is not in inventory
host = None
# delegate set, check whether the delegate has inventory vars
delegate = template.template(self.basedir, delegate, inject)
if delegate in inject['hostvars']:
# host == delegate if we need to lookup the
# python_interpreter from the delegate's inventory vars
host = delegate
if host:
python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python')
else:
python_interp = 'python'
cmd = conn.shell.checksum(path, python_interp)
#TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods
if self.become_method == 'sudo':
sudoable = True
else:
sudoable = False
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable)
data2 = utils.last_non_blank_line(data['stdout'])
try:
if data2 == '':
# this may happen if the connection to the remote server
# failed, so just return "INVALIDCHECKSUM" to avoid errors
return "INVALIDCHECKSUM"
else:
return data2.split()[0]
except IndexError:
sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
sys.stderr.write("command: %s\n" % cmd)
sys.stderr.write("----\n")
sys.stderr.write("output: %s\n" % data)
sys.stderr.write("----\n")
# this will signal that it changed and allow things to keep going
return "INVALIDCHECKSUM"
# *****************************************************
def _make_tmp_path(self, conn):
''' make and return a temporary path on a remote box '''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self.become and self.become_user != 'root':
use_system_tmp = True
tmp_mode = None
if self.remote_user != 'root' or (self.become and self.become_user != 'root'):
tmp_mode = 'a+rx'
cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self.transport in ['ssh']:
if utils.VERBOSITY > 3:
output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
else:
output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
elif 'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
if 'stdout' in result and result['stdout'] != '':
output = output + ": %s" % result['stdout']
raise errors.AnsibleError(output)
rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd))
return rc
# *****************************************************
def _remove_tmp_path(self, conn, tmp_path):
''' Remove a tmp_path. '''
if "-tmp-" in tmp_path:
cmd = conn.shell.remove(tmp_path, recurse=True)
self._low_level_exec_command(conn, cmd, None, sudoable=False)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
# *****************************************************
def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None):
''' transfer a module over SFTP, does not run it '''
(
module_style,
module_shebang,
module_data
) = self._configure_module(conn, module_name, module_args, inject, complex_args)
module_remote_path = conn.shell.join_path(tmp, module_name)
self._transfer_str(conn, tmp, module_name, module_data)
return (module_remote_path, module_style, module_shebang)
# *****************************************************
def _configure_module(self, conn, module_name, module_args, inject, complex_args=None):
''' find module and configure it '''
# Search module path(s) for named module.
module_suffixes = getattr(conn, 'default_suffixes', None)
module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes)
if module_path is None:
module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes)
if module_path2 is not None:
raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name))
else:
raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = module_replacer.modify_module(
module_path, complex_args, module_args, inject
)
return (module_style, module_shebang, module_data)
# *****************************************************
def _parallel_exec(self, hosts):
''' handles mulitprocessing when more than 1 fork is required '''
manager = multiprocessing.Manager()
job_queue = manager.Queue()
for host in hosts:
job_queue.put(host)
result_queue = manager.Queue()
try:
fileno = sys.stdin.fileno()
except ValueError:
fileno = None
workers = []
for i in range(self.forks):
new_stdin = None
if fileno is not None:
try:
new_stdin = os.fdopen(os.dup(fileno))
except OSError, e:
# couldn't dupe stdin, most likely because it's
# not a valid file descriptor, so we just rely on
# using the one that was passed in
pass
prc = multiprocessing.Process(target=_executor_hook,
args=(job_queue, result_queue, new_stdin))
prc.start()
workers.append(prc)
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
for worker in workers:
worker.terminate()
worker.join()
results = []
try:
while not result_queue.empty():
results.append(result_queue.get(block=False))
except socket.error:
raise errors.AnsibleError("<interrupted>")
return results
# *****************************************************
def _partition_results(self, results):
''' separate results by ones we contacted & ones we didn't '''
if results is None:
return None
results2 = dict(contacted={}, dark={})
for result in results:
host = result.host
if host is None:
raise Exception("internal error, host not set")
if result.communicated_ok():
results2["contacted"][host] = result.result
else:
results2["dark"][host] = result.result
# hosts which were contacted but never got a chance to return
for host in self.run_hosts:
if not (host in results2['dark'] or host in results2['contacted']):
results2["dark"][host] = {}
return results2
# *****************************************************
def run(self):
''' xfer & run module on all matched hosts '''
# find hosts that match the pattern
if not self.run_hosts:
self.run_hosts = self.inventory.list_hosts(self.pattern)
hosts = self.run_hosts
if len(hosts) == 0:
self.callbacks.on_no_hosts()
return dict(contacted={}, dark={})
global multiprocessing_runner
multiprocessing_runner = self
results = None
# Check if this is an action plugin. Some of them are designed
# to be ran once per group of hosts. Example module: pause,
# run once per hostgroup, rather than pausing once per each
# host.
p = utils.plugins.action_loader.get(self.module_name, self)
if self.forks == 0 or self.forks > len(hosts):
self.forks = len(hosts)
if (p and (getattr(p, 'BYPASS_HOST_LOOP', None)) or self.run_once):
# Expose the current hostgroup to the bypassing plugins
self.host_set = hosts
# We aren't iterating over all the hosts in this
# group. So, just choose the "delegate_to" host if that is defined and is
# one of the targeted hosts, otherwise pick the first host in our group to
# construct the conn object with.
if self.delegate_to is not None and self.delegate_to in hosts:
host = self.delegate_to
else:
host = hosts[0]
result_data = self._executor(host, None).result
# Create a ResultData item for each host in this group
# using the returned result. If we didn't do this we would
# get false reports of dark hosts.
results = [ ReturnData(host=h, result=result_data, comm_ok=True) \
for h in hosts ]
del self.host_set
elif self.forks > 1:
try:
results = self._parallel_exec(hosts)
except IOError, ie:
print ie.errno
if ie.errno == 32:
# broken pipe from Ctrl+C
raise errors.AnsibleError("interrupted")
raise
else:
results = [ self._executor(h, None) for h in hosts ]
return self._partition_results(results)
# *****************************************************
def run_async(self, time_limit):
''' Run this module asynchronously and return a poller. '''
self.background = time_limit
results = self.run()
return results, poller.AsyncPoller(results, self)
# *****************************************************
def noop_on_check(self, inject):
''' Should the runner run in check mode or not ? '''
# initialize self.always_run on first call
if self.always_run is None:
self.always_run = self.module_vars.get('always_run', False)
self.always_run = check_conditional(
self.always_run, self.basedir, inject, fail_on_undefined=True)
return (self.check and not self.always_run)
| gpl-3.0 | 4,361,940,887,251,713,000 | -7,523,003,955,366,520,000 | 44.896506 | 323 | 0.576115 | false |
lesserwhirls/scipy-cwt | scipy/odr/models.py | 57 | 4522 | """ Collection of Model instances for use with the odrpack fitting package.
"""
import numpy as np
from scipy.odr.odrpack import Model
def _lin_fcn(B, x):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + (x*b).sum(axis=0)
def _lin_fjb(B, x):
a = np.ones(x.shape[-1], float)
res = np.concatenate((a, x.ravel()))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _lin_fjd(B, x):
b = B[1:]
b = np.repeat(b, (x.shape[-1],)*b.shape[-1],axis=0)
b.shape = x.shape
return b
def _lin_est(data):
# Eh. The answer is analytical, so just return all ones.
# Don't return zeros since that will interfere with
# ODRPACK's auto-scaling procedures.
if len(data.x.shape) == 2:
m = data.x.shape[0]
else:
m = 1
return np.ones((m + 1,), float)
def _poly_fcn(B, x, powers):
a, b = B[0], B[1:]
b.shape = (b.shape[0], 1)
return a + np.sum(b * np.power(x, powers), axis=0)
def _poly_fjacb(B, x, powers):
res = np.concatenate((np.ones(x.shape[-1], float), np.power(x,
powers).flat))
res.shape = (B.shape[-1], x.shape[-1])
return res
def _poly_fjacd(B, x, powers):
b = B[1:]
b.shape = (b.shape[0], 1)
b = b * powers
return np.sum(b * np.power(x, powers-1),axis=0)
def _exp_fcn(B, x):
return B[0] + np.exp(B[1] * x)
def _exp_fjd(B, x):
return B[1] * np.exp(B[1] * x)
def _exp_fjb(B, x):
res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
res.shape = (2, x.shape[-1])
return res
def _exp_est(data):
# Eh.
return np.array([1., 1.])
multilinear = Model(_lin_fcn, fjacb=_lin_fjb,
fjacd=_lin_fjd, estimate=_lin_est,
meta={'name': 'Arbitrary-dimensional Linear',
'equ':'y = B_0 + Sum[i=1..m, B_i * x_i]',
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^m \\beta_i x_i$'})
def polynomial(order):
""" Factory function for a general polynomial model.
Parameters
----------
order : int or sequence
If an integer, it becomes the order of the polynomial to fit. If
a sequence of numbers, then these are the explicit powers in the
polynomial.
A constant term (power 0) is always included, so don't include 0.
Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
Returns
-------
model : Model instance
"""
powers = np.asarray(order)
if powers.shape == ():
# Scalar.
powers = np.arange(1, powers + 1)
powers.shape = (len(powers), 1)
len_beta = len(powers) + 1
def _poly_est(data, len_beta=len_beta):
# Eh. Ignore data and return all ones.
return np.ones((len_beta,), float)
return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
estimate=_poly_est, extra_args=(powers,),
meta={'name': 'Sorta-general Polynomial',
'equ':'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
'TeXequ':'$y=\\beta_0 + \sum_{i=1}^{%s} \\beta_i x^i$' %\
(len_beta-1)})
exponential = Model(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
estimate=_exp_est, meta={'name':'Exponential',
'equ':'y= B_0 + exp(B_1 * x)',
'TeXequ':'$y=\\beta_0 + e^{\\beta_1 x}$'})
def _unilin(B, x):
return x*B[0] + B[1]
def _unilin_fjd(B, x):
return np.ones(x.shape, float) * B[0]
def _unilin_fjb(B, x):
_ret = np.concatenate((x, np.ones(x.shape, float)))
_ret.shape = (2,) + x.shape
return _ret
def _unilin_est(data):
return (1., 1.)
def _quadratic(B, x):
return x*(x*B[0] + B[1]) + B[2]
def _quad_fjd(B, x):
return 2*x*B[0] + B[1]
def _quad_fjb(B, x):
_ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
_ret.shape = (3,) + x.shape
return _ret
def _quad_est(data):
return (1.,1.,1.)
unilinear = Model(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
estimate=_unilin_est, meta={'name': 'Univariate Linear',
'equ': 'y = B_0 * x + B_1',
'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
quadratic = Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb,
estimate=_quad_est, meta={'name': 'Quadratic',
'equ': 'y = B_0*x**2 + B_1*x + B_2',
'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
#### EOF #######################################################################
| bsd-3-clause | 4,210,629,467,124,670,500 | -311,914,228,658,175,000 | 27.2625 | 80 | 0.515922 | false |
resmo/ansible | lib/ansible/modules/network/netvisor/pn_admin_syslog.py | 38 | 6606 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_admin_syslog
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to create/modify/delete admin-syslog
description:
- This module can be used to create the scope and other parameters of syslog event collection.
- This module can be used to modify parameters of syslog event collection.
- This module can be used to delete the scope and other parameters of syslog event collection.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(present) to create admin-syslog and
C(absent) to delete admin-syslog C(update) to modify the admin-syslog.
required: True
type: str
choices: ['present', 'absent', 'update']
pn_scope:
description:
- Scope of the system log.
required: False
type: str
choices: ['local', 'fabric']
pn_host:
description:
- Hostname to log system events.
required: False
type: str
pn_port:
description:
- Host port.
required: False
type: str
pn_transport:
description:
- Transport for log events - tcp/tls or udp.
required: False
type: str
choices: ['tcp-tls', 'udp']
default: 'udp'
pn_message_format:
description:
- message-format for log events - structured or legacy.
required: False
choices: ['structured', 'legacy']
type: str
pn_name:
description:
- name of the system log.
required: False
type: str
"""
EXAMPLES = """
- name: admin-syslog functionality
pn_admin_syslog:
pn_cliswitch: "sw01"
state: "absent"
pn_name: "foo"
pn_scope: "local"
- name: admin-syslog functionality
pn_admin_syslog:
pn_cliswitch: "sw01"
state: "present"
pn_name: "foo"
pn_scope: "local"
pn_host: "166.68.224.46"
pn_message_format: "structured"
- name: admin-syslog functionality
pn_admin_syslog:
pn_cliswitch: "sw01"
state: "update"
pn_name: "foo"
pn_host: "166.68.224.10"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the admin-syslog command.
returned: always
type: list
stderr:
description: set of error responses from the admin-syslog command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the admin-syslog-show command.
If a user with given name exists, return as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_name']
cli += ' admin-syslog-show format name no-show-headers'
out = run_commands(module, cli)[1]
out = out.split()
return True if name in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='admin-syslog-create',
absent='admin-syslog-delete',
update='admin-syslog-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_scope=dict(required=False, type='str',
choices=['local', 'fabric']),
pn_host=dict(required=False, type='str'),
pn_port=dict(required=False, type='str'),
pn_transport=dict(required=False, type='str',
choices=['tcp-tls', 'udp'], default='udp'),
pn_message_format=dict(required=False, type='str',
choices=['structured', 'legacy']),
pn_name=dict(required=False, type='str'),
),
required_if=(
['state', 'present', ['pn_name', 'pn_host', 'pn_scope']],
['state', 'absent', ['pn_name']],
['state', 'update', ['pn_name']]
),
required_one_of=[['pn_port', 'pn_message_format',
'pn_host', 'pn_transport', 'pn_scope']]
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
scope = module.params['pn_scope']
host = module.params['pn_host']
port = module.params['pn_port']
transport = module.params['pn_transport']
message_format = module.params['pn_message_format']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
SYSLOG_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'admin-syslog-modify':
if SYSLOG_EXISTS is False:
module.fail_json(
failed=True,
msg='admin syslog with name %s does not exist' % name
)
if command == 'admin-syslog-delete':
if SYSLOG_EXISTS is False:
module.exit_json(
skipped=True,
msg='admin syslog with name %s does not exist' % name
)
if command == 'admin-syslog-create':
if SYSLOG_EXISTS is True:
module.exit_json(
skipped=True,
msg='admin syslog user with name %s already exists' % name
)
if command == 'admin-syslog-create':
if scope:
cli += ' scope ' + scope
if command != 'admin-syslog-delete':
if host:
cli += ' host ' + host
if port:
cli += ' port ' + port
if transport:
cli += ' transport ' + transport
if message_format:
cli += ' message-format ' + message_format
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,213,916,416,219,937,500 | -4,141,367,556,518,737,000 | 27.721739 | 96 | 0.60218 | false |
koniiiik/django | django/utils/text.py | 7 | 14950 | from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import (
SimpleLazyObject, keep_lazy, keep_lazy_text, lazy,
)
from django.utils.safestring import SafeText, mark_safe
from django.utils.six.moves import html_entities
from django.utils.translation import pgettext, ugettext as _, ugettext_lazy
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode # NOQA
# Capitalizes the first letter of a string.
def capfirst(x):
return x and force_text(x)[0].upper() + force_text(x)[1:]
capfirst = keep_lazy_text(capfirst)
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S)
re_chars = re.compile(r'<.*?>|(.)', re.U | re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
All white space is preserved except added line breaks consume the space on
which they break the line.
Long words are not wrapped, so the output text may have lines longer than
``width``.
"""
text = force_text(text)
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""
Truncates a string after a certain number of chars.
"""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncates HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Closes opened tags if they were correctly closed in the given HTML.
Newlines in the HTML are preserved.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
@keep_lazy_text
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
@keep_lazy_text
def normalize_newlines(text):
"""Normalizes CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
@keep_lazy_text
def phone2numeric(phone):
"""Converts a phone number with letters into its numeric equivalent."""
char2number = {
'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4',
'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6',
'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8',
'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
with GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0) as zfile:
zfile.write(s)
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
@keep_lazy_text
def unescape_entities(text):
return _entity_re.sub(_replace_entity, force_text(text))
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
@keep_lazy(six.text_type, SafeText)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub(r'[^\w\s-]', '', value, flags=re.U).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value, flags=re.U))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
def camel_case_to_spaces(value):
"""
Splits CamelCase and converts to lower case. Also strips leading and
trailing whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, six.text_type)
| bsd-3-clause | 6,018,476,127,858,814,000 | -2,294,448,612,792,369,200 | 32.44519 | 90 | 0.557726 | false |
dropbox/changes | changes/listeners/mail.py | 1 | 8772 | from __future__ import absolute_import, print_function
from itertools import imap
import logging
import toronado
from email.utils import parseaddr
from flask import current_app, render_template
from flask_mail import Message, sanitize_address
from jinja2 import Markup
from typing import List # NOQA
from changes.config import db, mail
from changes.constants import Result, Status
from changes.db.utils import try_create
from changes.lib import build_context_lib, build_type
from changes.lib.build_context_lib import CollectionContext # NOQA
from changes.models.event import Event, EventType
from changes.models.build import Build
from changes.models.job import Job
from changes.models.jobplan import JobPlan
from changes.models.project import ProjectOption
def filter_recipients(email_list, domain_whitelist=None):
"""
Returns emails from email_list that have been white-listed by
domain_whitelist.
"""
if domain_whitelist is None:
domain_whitelist = current_app.config['MAIL_DOMAIN_WHITELIST']
if not domain_whitelist:
return email_list
return [
e for e in email_list
if parseaddr(e)[1].split('@', 1)[-1] in domain_whitelist
]
class MailNotificationHandler(object):
logger = logging.getLogger('mail')
def send(self, msg, build):
msg.recipients = filter_recipients(msg.recipients)
if not msg.recipients:
self.logger.info(
'Exiting for collection_id={} because its message has no '
'recipients.'.format(build.collection_id))
return
event = try_create(Event, where={
'type': EventType.email,
'item_id': build.collection_id,
'data': {
'triggering_build_id': build.id.hex,
'recipients': msg.recipients,
}
})
# If we were unable to create the Event, we must've done so (and thus sent the mail) already.
if not event:
self.logger.warning('An email has already been sent for collection_id=%s, (build_id=%s).',
build.collection_id, build.id.hex)
return
mail.send(msg)
def get_msg(self, builds):
# type: (List[Build]) -> Message
context = build_context_lib.get_collection_context(builds) # type: CollectionContext
if context.result == Result.passed:
return None
max_shown = current_app.config.get('MAX_SHOWN_ITEMS_PER_BUILD_MAIL', 3)
context_dict = context._asdict()
context_dict.update({
'MAX_SHOWN_ITEMS_PER_BUILD': max_shown,
'showing_failing_tests_count':
sum([min(b['failing_tests_count'], max_shown) for b in context.builds])
})
recipients = self.get_collection_recipients(context)
msg = Message(context.title, recipients=recipients, extra_headers={
'Reply-To': ', '.join(sanitize_address(r) for r in recipients),
})
msg.body = render_template('listeners/mail/notification.txt', **context_dict)
msg.html = Markup(toronado.from_string(
render_template('listeners/mail/notification.html', **context_dict)
))
return msg
def get_collection_recipients(self, collection_context):
# type: (CollectionContext) -> List[unicode]
"""
Returns a list of recipients for a collection context created by
get_collection_context. Only recipients for failing builds will be
returned.
"""
recipient_lists = map(
lambda build_context: self.get_build_recipients(build_context['build']),
collection_context.builds)
return list(set([r for rs in recipient_lists for r in rs]))
def get_build_recipients(self, build):
# type: (Build) -> List[unicode]
"""
Returns a list of recipients for a build.
The build author is included unless the build and all failing jobs
have turned off the mail.notify-author option.
Successful builds will return the empty list.
Recipients are also collected from each failing job's
mail.notify-addresses and mail.notify-addresses-revisions options.
Should there be no failing jobs (is that possible?), recipients are
collected from the build's own mail.notify-addresses and
mail.notify-addresses-revisions options.
"""
if build.result == Result.passed:
return []
recipients = []
options = self.get_build_options(build)
if options['mail.notify-author']:
author = build.author
if author:
recipients.append(u'%s <%s>' % (author.name, author.email))
recipients.extend(options['mail.notify-addresses'])
if build_type.is_initial_commit_build(build):
recipients.extend(options['mail.notify-addresses-revisions'])
return recipients
def get_build_options(self, build):
"""
Returns a build's mail options as a
{
'mail.notify-author': bool,
'mail.notify-addresses': set,
'mail.notify-addresses-revisions': set,
} dict.
The 'mail.notify-author' option is True unless the build and all
failing jobs have turned off the mail.notify-author option.
The mail.notify-addresses and mail.notify-addresses-revisions options
respectively are sets of email addresses constructed by merging the
corresponding options of all failing jobs. Note that the build's
options are used as defaults when constructing the options for
each job, so that the job options override the build options.
Finally, the build's own options are used if there are no failing jobs.
"""
default_options = {
'mail.notify-author': '1',
'mail.notify-addresses': '',
'mail.notify-addresses-revisions': '',
}
build_options = dict(
default_options,
**dict(db.session.query(
ProjectOption.name, ProjectOption.value
).filter(
ProjectOption.project_id == build.project_id,
ProjectOption.name.in_(default_options.keys()),
))
)
# Get options for all failing jobs.
jobs_options = []
for job in list(Job.query.filter(Job.build_id == build.id)):
if job.result != Result.passed:
jobs_options.append(dict(
build_options, **self.get_job_options(job)))
# Merge all options.
# Fallback to build options in case there are no failing jobs.
all_options = jobs_options or [build_options]
merged_options = {
# Notify the author unless all jobs and the build have turned the
# notify-author option off.
'mail.notify-author': any(
imap(
lambda options: options.get('mail.notify-author') == '1',
all_options,
),
),
'mail.notify-addresses': set(),
'mail.notify-addresses-revisions': set(),
}
recipient_keys = ['mail.notify-addresses', 'mail.notify-addresses-revisions']
for options in all_options:
for key in recipient_keys:
# XXX(dcramer): we dont have option validators so lets assume
# people enter slightly incorrect values
merged_options[key] |= set(
[x.strip() for x in options[key].split(',') if x.strip()]
)
return merged_options
def get_job_options(self, job):
jobplan = JobPlan.query.filter(
JobPlan.job_id == job.id,
).first()
options = {}
if jobplan and 'snapshot' in jobplan.data:
options = jobplan.data['snapshot']['options']
return options
def build_finished_handler(build_id, *args, **kwargs):
build = Build.query.get(build_id)
if not build:
return
if not build.collection_id:
# If there isn't a collection_id, assume the build stands alone.
# All builds should probably have collection_id set.
builds = [build]
else:
builds = list(
Build.query.filter(Build.collection_id == build.collection_id))
# Exit if there are no builds for the given build_id, or any build hasn't
# finished.
if not builds or any(map(lambda build: build.status != Status.finished, builds)):
return
notification_handler = MailNotificationHandler()
msg = notification_handler.get_msg(builds)
if msg is not None:
notification_handler.send(msg, build)
| apache-2.0 | 9,162,154,976,084,966,000 | -3,655,443,941,014,662,000 | 35.39834 | 102 | 0.614683 | false |
cecep-edu/edx-platform | lms/djangoapps/instructor/views/coupons.py | 61 | 6574 | """
E-commerce Tab Instructor Dashboard Coupons Operations views
"""
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from util.json_request import JsonResponse
from shoppingcart.models import Coupon, CourseRegistrationCode
from opaque_keys.edx.locations import SlashSeparatedCourseKey
import datetime
import pytz
import logging
log = logging.getLogger(__name__)
@require_POST
@login_required
def remove_coupon(request, course_id): # pylint: disable=unused-argument
"""
remove the coupon against the coupon id
set the coupon is_active flag to false
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _('coupon id is None')
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) DoesNotExist').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) is already inactive').format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
coupon.is_active = False
coupon.save()
return JsonResponse({
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
@require_POST
@login_required
def add_coupon(request, course_id):
"""
add coupon in the Coupons Table
"""
code = request.POST.get('code')
# check if the code is already in the Coupons Table and active
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)
coupon = Coupon.objects.get(is_active=True, code=code, course_id=course_id)
except Coupon.DoesNotExist:
# check if the coupon code is in the CourseRegistrationCode Table
course_registration_code = CourseRegistrationCode.objects.filter(code=code)
if course_registration_code:
return JsonResponse(
{'message': _("The code ({code}) that you have tried to define is already in use as a registration code").format(code=code)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
course_id = request.POST.get('course_id')
try:
discount = int(request.POST.get('discount'))
except ValueError:
return JsonResponse({
'message': _("Please Enter the Integer Value for Coupon Discount")
}, status=400) # status code 400: Bad Request
if discount > 100 or discount < 0:
return JsonResponse({
'message': _("Please Enter the Coupon Discount Value Less than or Equal to 100")
}, status=400) # status code 400: Bad Request
expiration_date = None
if request.POST.get('expiration_date'):
expiration_date = request.POST.get('expiration_date')
try:
expiration_date = datetime.datetime.strptime(expiration_date, "%m/%d/%Y").replace(tzinfo=pytz.UTC) + datetime.timedelta(days=1)
except ValueError:
return JsonResponse({
'message': _("Please enter the date in this format i-e month/day/year")
}, status=400) # status code 400: Bad Request
coupon = Coupon(
code=code, description=description,
course_id=course_id,
percentage_discount=discount,
created_by_id=request.user.id,
expiration_date=expiration_date
)
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) added successfully").format(code=code)}
)
if coupon:
return JsonResponse(
{'message': _("coupon with the coupon code ({code}) already exists for this course").format(code=code)},
status=400) # status code 400: Bad Request
@require_POST
@login_required
def update_coupon(request, course_id): # pylint: disable=unused-argument
"""
update the coupon object in the database
"""
coupon_id = request.POST.get('coupon_id', None)
if not coupon_id:
return JsonResponse({'message': _("coupon id not found")}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(pk=coupon_id)
except ObjectDoesNotExist:
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)},
status=400) # status code 400: Bad Request
description = request.POST.get('description')
coupon.description = description
coupon.save()
return JsonResponse(
{'message': _("coupon with the coupon id ({coupon_id}) updated Successfully").format(coupon_id=coupon_id)}
)
@require_POST
@login_required
def get_coupon_info(request, course_id): # pylint: disable=unused-argument
"""
get the coupon information to display in the pop up form
"""
coupon_id = request.POST.get('id', None)
if not coupon_id:
return JsonResponse({
'message': _("coupon id not found")
}, status=400) # status code 400: Bad Request
try:
coupon = Coupon.objects.get(id=coupon_id)
except ObjectDoesNotExist:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) DoesNotExist").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
if not coupon.is_active:
return JsonResponse({
'message': _("coupon with the coupon id ({coupon_id}) is already inactive").format(coupon_id=coupon_id)
}, status=400) # status code 400: Bad Request
expiry_date = coupon.display_expiry_date
return JsonResponse({
'coupon_code': coupon.code,
'coupon_description': coupon.description,
'coupon_course_id': coupon.course_id.to_deprecated_string(),
'coupon_discount': coupon.percentage_discount,
'expiry_date': expiry_date,
'message': _('coupon with the coupon id ({coupon_id}) updated successfully').format(coupon_id=coupon_id)
}) # status code 200: OK by default
| agpl-3.0 | 5,432,209,679,303,135,000 | 7,392,226,003,318,476,000 | 38.60241 | 143 | 0.642988 | false |
qz267/zerorpc-python | zerorpc/core.py | 53 | 15303 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import traceback
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
import gevent_zmq as zmq
from .exceptions import TimeoutExpired, RemoteError, LostRemote
from .channel import ChannelMultiplexer, BufferedChannel
from .socket import SocketBase
from .heartbeat import HeartBeatOnChannel
from .context import Context
from .decorators import DecoratorBase, rep
import patterns
from logging import getLogger
logger = getLogger(__name__)
class ServerBase(object):
def __init__(self, channel, methods=None, name=None, context=None,
pool_size=None, heartbeat=5):
self._multiplexer = ChannelMultiplexer(channel)
if methods is None:
methods = self
self._context = context or Context.get_instance()
self._name = name or self._extract_name()
self._task_pool = gevent.pool.Pool(size=pool_size)
self._acceptor_task = None
self._methods = self._filter_methods(ServerBase, self, methods)
self._inject_builtins()
self._heartbeat_freq = heartbeat
for (k, functor) in self._methods.items():
if not isinstance(functor, DecoratorBase):
self._methods[k] = rep(functor)
@staticmethod
def _filter_methods(cls, self, methods):
if hasattr(methods, '__getitem__'):
return methods
server_methods = set(getattr(self, k) for k in dir(cls) if not
k.startswith('_'))
return dict((k, getattr(methods, k))
for k in dir(methods)
if (callable(getattr(methods, k))
and not k.startswith('_')
and getattr(methods, k) not in server_methods
))
@staticmethod
def _extract_name(methods):
return getattr(type(methods), '__name__', None) or repr(methods)
def close(self):
self.stop()
self._multiplexer.close()
def _format_args_spec(self, args_spec, r=None):
if args_spec:
r = [dict(name=name) for name in args_spec[0]]
default_values = args_spec[3]
if default_values is not None:
for arg, def_val in zip(reversed(r), reversed(default_values)):
arg['default'] = def_val
return r
def _zerorpc_inspect(self):
methods = dict((m, f) for m, f in self._methods.items()
if not m.startswith('_'))
detailled_methods = dict((m,
dict(args=self._format_args_spec(f._zerorpc_args()),
doc=f._zerorpc_doc())) for (m, f) in methods.items())
return {'name': self._name,
'methods': detailled_methods}
def _inject_builtins(self):
self._methods['_zerorpc_list'] = lambda: [m for m in self._methods
if not m.startswith('_')]
self._methods['_zerorpc_name'] = lambda: self._name
self._methods['_zerorpc_ping'] = lambda: ['pong', self._name]
self._methods['_zerorpc_help'] = lambda m: \
self._methods[m]._zerorpc_doc()
self._methods['_zerorpc_args'] = \
lambda m: self._methods[m]._zerorpc_args()
self._methods['_zerorpc_inspect'] = self._zerorpc_inspect
def __call__(self, method, *args):
if method not in self._methods:
raise NameError(method)
return self._methods[method](*args)
def _print_traceback(self, protocol_v1, exc_infos):
logger.exception('')
exc_type, exc_value, exc_traceback = exc_infos
if protocol_v1:
return (repr(exc_value),)
human_traceback = traceback.format_exc()
name = exc_type.__name__
human_msg = str(exc_value)
return (name, human_msg, human_traceback)
def _async_task(self, initial_event):
protocol_v1 = initial_event.header.get('v', 1) < 2
channel = self._multiplexer.channel(initial_event)
hbchan = HeartBeatOnChannel(channel, freq=self._heartbeat_freq,
passive=protocol_v1)
bufchan = BufferedChannel(hbchan)
exc_infos = None
event = bufchan.recv()
try:
self._context.hook_load_task_context(event.header)
functor = self._methods.get(event.name, None)
if functor is None:
raise NameError(event.name)
functor.pattern.process_call(self._context, bufchan, event, functor)
except LostRemote:
exc_infos = list(sys.exc_info())
self._print_traceback(protocol_v1, exc_infos)
except Exception:
exc_infos = list(sys.exc_info())
human_exc_infos = self._print_traceback(protocol_v1, exc_infos)
reply_event = bufchan.create_event('ERR', human_exc_infos,
self._context.hook_get_task_context())
self._context.hook_server_inspect_exception(event, reply_event, exc_infos)
bufchan.emit_event(reply_event)
finally:
del exc_infos
bufchan.close()
def _acceptor(self):
while True:
initial_event = self._multiplexer.recv()
self._task_pool.spawn(self._async_task, initial_event)
def run(self):
self._acceptor_task = gevent.spawn(self._acceptor)
try:
self._acceptor_task.get()
finally:
self.stop()
self._task_pool.join(raise_error=True)
def stop(self):
if self._acceptor_task is not None:
self._acceptor_task.kill()
self._acceptor_task = None
class ClientBase(object):
def __init__(self, channel, context=None, timeout=30, heartbeat=5,
passive_heartbeat=False):
self._multiplexer = ChannelMultiplexer(channel,
ignore_broadcast=True)
self._context = context or Context.get_instance()
self._timeout = timeout
self._heartbeat_freq = heartbeat
self._passive_heartbeat = passive_heartbeat
def close(self):
self._multiplexer.close()
def _handle_remote_error(self, event):
exception = self._context.hook_client_handle_remote_error(event)
if not exception:
if event.header.get('v', 1) >= 2:
(name, msg, traceback) = event.args
exception = RemoteError(name, msg, traceback)
else:
(msg,) = event.args
exception = RemoteError('RemoteError', msg, None)
return exception
def _select_pattern(self, event):
for pattern in patterns.patterns_list:
if pattern.accept_answer(event):
return pattern
msg = 'Unable to find a pattern for: {0}'.format(event)
raise RuntimeError(msg)
def _process_response(self, request_event, bufchan, timeout):
try:
reply_event = bufchan.recv(timeout)
pattern = self._select_pattern(reply_event)
return pattern.process_answer(self._context, bufchan, request_event,
reply_event, self._handle_remote_error)
except TimeoutExpired:
bufchan.close()
ex = TimeoutExpired(timeout,
'calling remote method {0}'.format(request_event.name))
self._context.hook_client_after_request(request_event, None, ex)
raise ex
except:
bufchan.close()
raise
def __call__(self, method, *args, **kargs):
timeout = kargs.get('timeout', self._timeout)
channel = self._multiplexer.channel()
hbchan = HeartBeatOnChannel(channel, freq=self._heartbeat_freq,
passive=self._passive_heartbeat)
bufchan = BufferedChannel(hbchan, inqueue_size=kargs.get('slots', 100))
xheader = self._context.hook_get_task_context()
request_event = bufchan.create_event(method, args, xheader)
self._context.hook_client_before_request(request_event)
bufchan.emit_event(request_event)
try:
if kargs.get('async', False) is False:
return self._process_response(request_event, bufchan, timeout)
async_result = gevent.event.AsyncResult()
gevent.spawn(self._process_response, request_event, bufchan,
timeout).link(async_result)
return async_result
except:
# XXX: This is going to be closed twice if async is false and
# _process_response raises an exception. I wonder if the above
# async branch can raise an exception too, if no we can just remove
# this code.
bufchan.close()
raise
def __getattr__(self, method):
return lambda *args, **kargs: self(method, *args, **kargs)
class Server(SocketBase, ServerBase):
def __init__(self, methods=None, name=None, context=None, pool_size=None,
heartbeat=5):
SocketBase.__init__(self, zmq.ROUTER, context)
if methods is None:
methods = self
name = name or ServerBase._extract_name(methods)
methods = ServerBase._filter_methods(Server, self, methods)
ServerBase.__init__(self, self._events, methods, name, context,
pool_size, heartbeat)
def close(self):
ServerBase.close(self)
SocketBase.close(self)
class Client(SocketBase, ClientBase):
def __init__(self, connect_to=None, context=None, timeout=30, heartbeat=5,
passive_heartbeat=False):
SocketBase.__init__(self, zmq.DEALER, context=context)
ClientBase.__init__(self, self._events, context, timeout, heartbeat,
passive_heartbeat)
if connect_to:
self.connect(connect_to)
def close(self):
ClientBase.close(self)
SocketBase.close(self)
class Pusher(SocketBase):
def __init__(self, context=None, zmq_socket=zmq.PUSH):
super(Pusher, self).__init__(zmq_socket, context=context)
def __call__(self, method, *args):
self._events.emit(method, args,
self._context.hook_get_task_context())
def __getattr__(self, method):
return lambda *args: self(method, *args)
class Puller(SocketBase):
def __init__(self, methods=None, context=None, zmq_socket=zmq.PULL):
super(Puller, self).__init__(zmq_socket, context=context)
if methods is None:
methods = self
self._methods = ServerBase._filter_methods(Puller, self, methods)
self._receiver_task = None
def close(self):
self.stop()
super(Puller, self).close()
def __call__(self, method, *args):
if method not in self._methods:
raise NameError(method)
return self._methods[method](*args)
def _receiver(self):
while True:
event = self._events.recv()
try:
if event.name not in self._methods:
raise NameError(event.name)
self._context.hook_load_task_context(event.header)
self._context.hook_server_before_exec(event)
self._methods[event.name](*event.args)
# In Push/Pull their is no reply to send, hence None for the
# reply_event argument
self._context.hook_server_after_exec(event, None)
except Exception:
exc_infos = sys.exc_info()
try:
logger.exception('')
self._context.hook_server_inspect_exception(event, None, exc_infos)
finally:
del exc_infos
def run(self):
self._receiver_task = gevent.spawn(self._receiver)
try:
self._receiver_task.get()
finally:
self._receiver_task = None
def stop(self):
if self._receiver_task is not None:
self._receiver_task.kill(block=False)
class Publisher(Pusher):
def __init__(self, context=None):
super(Publisher, self).__init__(context=context, zmq_socket=zmq.PUB)
class Subscriber(Puller):
def __init__(self, methods=None, context=None):
super(Subscriber, self).__init__(methods=methods, context=context,
zmq_socket=zmq.SUB)
self._events.setsockopt(zmq.SUBSCRIBE, '')
def fork_task_context(functor, context=None):
'''Wrap a functor to transfer context.
Usage example:
gevent.spawn(zerorpc.fork_task_context(myfunction), args...)
The goal is to permit context "inheritance" from a task to another.
Consider the following example:
zerorpc.Server receive a new event
- task1 is created to handle this event this task will be linked
to the initial event context. zerorpc.Server does that for you.
- task1 make use of some zerorpc.Client instances, the initial
event context is transfered on every call.
- task1 spawn a new task2.
- task2 make use of some zerorpc.Client instances, it's a fresh
context. Thus there is no link to the initial context that
spawned task1.
- task1 spawn a new fork_task_context(task3).
- task3 make use of some zerorpc.Client instances, the initial
event context is transfered on every call.
A real use case is a distributed tracer. Each time a new event is
created, a trace_id is injected in it or copied from the current task
context. This permit passing the trace_id from a zerorpc.Server to
another via zerorpc.Client.
The simple rule to know if a task need to be wrapped is:
- if the new task will make any zerorpc call, it should be wrapped.
'''
context = context or Context.get_instance()
header = context.hook_get_task_context()
def wrapped(*args, **kargs):
context.hook_load_task_context(header)
return functor(*args, **kargs)
return wrapped
| mit | -2,251,642,694,928,973,600 | -974,574,580,680,730,400 | 36.053269 | 87 | 0.606548 | false |
noiselabs/box-linux-sync | src/noiselabs/box/pms/apt.py | 1 | 1248 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of box-linux-sync.
#
# Copyright (C) 2013 Vítor Brandão <[email protected]>
#
# box-linux-sync is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# box-linux-sync is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with box-linux-sync; if not, see
# <http://www.gnu.org/licenses/>.
from noiselabs.box.pms.pms import BasePMS
class APT(BasePMS):
"""The Advanced Packaging Tool used in the Debian family of Linux operating
systems (Ubuntu included)."""
def __str__(self):
return 'APT'
def search(self, pkg):
return "apt-cache search %s" % pkg
def install(self, pkg):
return "apt-get install %s" % pkg
def remove(self, pkg):
return "apt-get remove %s" % pkg
| lgpl-3.0 | 8,450,697,679,023,201,000 | 8,420,276,514,008,444,000 | 31.789474 | 80 | 0.701445 | false |
rhyolight/nupic.research | projects/l2_pooling/convergence_activity.py | 10 | 9793 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file plots activity of single vs multiple columns as they converge.
"""
import random
import os
import pprint
import plotly
import plotly.graph_objs as go
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
plotlyUser = os.environ['PLOTLY_USERNAME']
plotlyAPIKey = os.environ['PLOTLY_API_KEY']
plotly.plotly.sign_in(plotlyUser, plotlyAPIKey)
def plotActivity(l2ActiveCellsMultiColumn):
maxTouches = 15
numTouches = min(maxTouches, len(l2ActiveCellsMultiColumn))
numColumns = len(l2ActiveCellsMultiColumn[0])
fig = plotly.tools.make_subplots(
rows=1, cols=numColumns, shared_yaxes=True,
subplot_titles=('Column 1', 'Column 2', 'Column 3')[0:numColumns]
)
# pprint.pprint(fig)
data = go.Scatter(x=[], y=[])
shapes = []
for t,sdrs in enumerate(l2ActiveCellsMultiColumn):
if t <= numTouches:
for c, activeCells in enumerate(sdrs):
# print t, c, len(activeCells)
for cell in activeCells:
shapes.append(
{
'type': 'rect',
'xref': 'x'+str((c+1)),
'yref': 'y1',
'x0': t,
'x1': t + 0.6,
'y0': cell,
'y1': cell + 1,
'line': {
# 'color': 'rgba(128, 0, 128, 1)',
'width': 2,
},
# 'fillcolor': 'rgba(128, 0, 128, 0.7)',
},
)
# Add red rectangle
if numColumns==1:
shapes.append(
{
'type': 'rect',
'x0': 6,
'x1': 6.6,
'y0': -95,
'y1': 4100,
'line': {
'color': 'rgba(255, 0, 0, 0.5)',
'width': 3,
},
},
)
else:
shapes.append(
{
'type': 'rect',
'x0': 3,
'x1': 3.6,
'y0': -95,
'y1': 4100,
'line': {
'color': 'rgba(255, 0, 0, 0.5)',
'width': 3,
},
},
)
# Legend for x-axis and appropriate title
fig['layout']['annotations'].append({
'font': {'size': 20},
'xanchor': 'center',
'yanchor': 'bottom',
'text': 'Number of touches',
'xref': 'paper',
'yref': 'paper',
'x': 0.5,
'y': -0.15,
'showarrow': False,
})
fig['layout']['annotations'].append({
'font': {'size': 24},
'xanchor': 'center',
'yanchor': 'bottom',
'text': ['','<b>One cortical column</b>','',
'<b>Three cortical columns</b>'][numColumns],
'xref': 'paper',
'yref': 'paper',
'x': 0.5,
'y': 1.1,
'showarrow': False,
})
layout = {
'height': 600,
'font': {'size': 18},
'yaxis': {
'title': "Neuron #",
'range': [-100, 4201],
'showgrid': False,
},
'shapes': shapes,
}
if numColumns == 1: layout.update(width=320)
else: layout.update(width=700)
for c in range(numColumns):
fig.append_trace(data, 1, c+1)
fig['layout']['xaxis'+str(c+1)].update({
'title': "",
'range': [0, numTouches],
'showgrid': False,
'showticklabels': True,
}),
fig['layout'].update(layout)
# Save plots as HTM and/or PDF
basename='plots/activity_c'+str(numColumns)
plotly.offline.plot(fig, filename=basename+'.html', auto_open=True)
# Can't save image files in offline mode
plotly.plotly.image.save_as(fig, filename=basename+'.pdf', scale=4)
def plotL2ObjectRepresentations(exp1):
shapes = []
numObjects = len(exp1.objectL2Representations)
for obj in range(numObjects):
activeCells = exp1.objectL2Representations[obj][0]
for cell in activeCells:
shapes.append(
{
'type': 'rect',
'x0': obj,
'x1': obj + 0.75,
'y0': cell,
'y1': cell + 2,
'line': {
# 'color': 'rgba(128, 0, 128, 1)',
'width': 2,
},
# 'fillcolor': 'rgba(128, 0, 128, 0.7)',
},
)
# Add red rectangle
shapes.append(
{
'type': 'rect',
'x0': 0,
'x1': 0.9,
'y0': -95,
'y1': 4100,
'line': {
'color': 'rgba(255, 0, 0, 0.5)',
'width': 3,
},
},
)
data = [go.Scatter(x=[], y=[])]
layout = {
'width': 320,
'height': 600,
'font': {'size': 20},
'xaxis': {
'title': "Object #",
'range': [0, 10],
'showgrid': False,
'showticklabels': True,
},
'yaxis': {
'title': "Neuron #",
'range': [-100, 4201],
'showgrid': False,
},
'shapes': shapes,
'annotations': [ {
'xanchor': 'middle',
'yanchor': 'bottom',
'text': 'Target object',
'x': 1,
'y': 4100,
'ax': 10,
'ay': -25,
'arrowcolor': 'rgba(255, 0, 0, 1)',
},
{
'font': {'size': 24},
'xanchor': 'center',
'yanchor': 'bottom',
'text': '<b>Object representations</b>',
'xref': 'paper',
'yref': 'paper',
'x': 0.5,
'y': 1.1,
'showarrow': False,
}
]
}
fig = {
'data': data,
'layout': layout,
}
plotPath = plotly.offline.plot(fig, filename='plots/shapes-rectangle.html',
auto_open=True)
print "url=", plotPath
# Can't save image files in offline mode
plotly.plotly.image.save_as(
fig, filename='plots/target_object_representations.pdf', scale=4)
if __name__ == "__main__":
numColumns = 3
numFeatures = 3
numPoints = 10
numLocations = 10
numObjects = 10
numRptsPerSensation = 2
objectMachine = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=3,
seed=40,
)
objectMachine.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
objects = objectMachine.provideObjectsToLearn()
# single-out the inputs to the column #1
objectsSingleColumn = {}
for i in range(numObjects):
featureLocations = []
for j in range(numLocations):
featureLocations.append({0: objects[i][j][0]})
objectsSingleColumn[i] = featureLocations
# we will run two experiments side by side, with either single column
# or 3 columns
exp3 = L4L2Experiment(
'three_column',
numCorticalColumns=3,
seed=1
)
exp1 = L4L2Experiment(
'single_column',
numCorticalColumns=1,
seed=1
)
print "train single column "
exp1.learnObjects(objectsSingleColumn)
print "train multi-column "
exp3.learnObjects(objects)
# test on the first object
objectId = 0
obj = objectMachine[objectId]
# Create sequence of sensations for this object for all columns
# We need to set the seed to get specific convergence points for the red
# rectangle in the graph.
objectSensations = {}
random.seed(12)
for c in range(numColumns):
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
# stay multiple steps on each sensation
sensations = []
for pair in objectCopy:
for _ in xrange(numRptsPerSensation):
sensations.append(pair)
objectSensations[c] = sensations
sensationStepsSingleColumn = []
sensationStepsMultiColumn = []
for step in xrange(len(objectSensations[0])):
pairs = [
objectSensations[col][step] for col in xrange(numColumns)
]
sdrs = objectMachine._getSDRPairs(pairs)
sensationStepsMultiColumn.append(sdrs)
sensationStepsSingleColumn.append({0: sdrs[0]})
print "inference: multi-columns "
exp3.sendReset()
l2ActiveCellsMultiColumn = []
L2ActiveCellNVsTimeMultiColumn = []
for sensation in sensationStepsMultiColumn:
exp3.infer([sensation], objectName=objectId, reset=False)
l2ActiveCellsMultiColumn.append(exp3.getL2Representations())
activeCellNum = 0
for c in range(numColumns):
activeCellNum += len(exp3.getL2Representations()[c])
L2ActiveCellNVsTimeMultiColumn.append(activeCellNum/numColumns)
print "inference: single column "
exp1.sendReset()
l2ActiveCellsSingleColumn = []
L2ActiveCellNVsTimeSingleColumn = []
for sensation in sensationStepsSingleColumn:
exp1.infer([sensation], objectName=objectId, reset=False)
l2ActiveCellsSingleColumn.append(exp1.getL2Representations())
L2ActiveCellNVsTimeSingleColumn.append(len(exp1.getL2Representations()[0]))
# Used to figure out where to put the red rectangle!
print numFeatures
for i,sdrs in enumerate(l2ActiveCellsSingleColumn):
print i,len(l2ActiveCellsSingleColumn[i][0]),len(l2ActiveCellsMultiColumn[i][0])
plotActivity(l2ActiveCellsMultiColumn)
plotActivity(l2ActiveCellsSingleColumn)
plotL2ObjectRepresentations(exp1)
| gpl-3.0 | 5,490,254,391,354,759,000 | 869,391,025,037,136,000 | 25.903846 | 84 | 0.590728 | false |
pwollstadt/trentoolxl | dev/search_GPU/test_neighbour_search_cuda.py | 2 | 28060 | """Provide unit tests for neighbour searches using CUDA GPU-code.
Tests are based on unit tests by Pedro Mediano
https://github.com/pmediano/jidt/tree/master/java/source/infodynamics/
measures/continuous/kraskov/cuda
"""
import pytest
import numpy as np
from idtxl.neighbour_search_cuda import cudaFindKnnSetGPU, knn_search
# TODO pass 'float64' to high-level functions
def test_knn_one_dim():
"""Test kNN search in 1D."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 1
pointset = np.array([-1, -1.2, 1, 1.1]).astype('float32')
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.1), 'Distance 3 not correct.'
# Call high-level function.
(indexes2, distances2) = knn_search(np.expand_dims(pointset, axis=1),
np.expand_dims(pointset, axis=1),
knn_k, theiler_t, n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 0, 'Index 1 not correct.'
assert indexes2[0][2] == 3, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances2[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances2[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances2[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances2[0][3], 0.1), 'Distance 3 not correct.'
def test_knn_two_dim():
"""Test kNN search in 2D."""
theiler_t = 0
n_points = 4
n_dims = 2
knn_k = 1
n_chunks = 1
pointset = np.array([-1, 0.5, 1.1, 2,
-1, 0.5, 1.1, 2]).astype('float32')
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 2, 'Index 1 not correct.'
assert indexes[0][2] == 1, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances[0][3], 0.9), 'Distances 3 not correct.'
# Call high-level function.
pointset2 = pointset.reshape((n_points, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 2, 'Index 1 not correct.'
assert indexes2[0][2] == 1, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances2[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances2[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances2[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances2[0][3], 0.9), 'Distances 3 not correct.'
def test_one_dim_longer_sequence():
"""Test kNN search in 1D."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 1
pointset = np.array([-1, -1.2, 1, 1.1, 10, 11, 10.5, -100, -50, 666]).astype('float32')
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.1), 'Distance 3 not correct.'
# Call high-level function.
(indexes2, distances2) = knn_search(np.expand_dims(pointset, axis=1),
np.expand_dims(pointset, axis=1),
knn_k, theiler_t, n_chunks, gpu_id)
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 0.2), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.2), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.1), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.1), 'Distance 3 not correct.'
def test_two_dim_longer_sequence():
"""Test kNN with longer sequences.
Note:
The expected results differ from the C++ unit tests because we use the
maximum norm when searching for neighbours.
"""
theiler_t = 0
n_points = 10
n_dims = 2
knn_k = 1
n_chunks = 1
gpu_id = 0
# This is the same sequence as in the previous test case, padded with a
# bunch of points very far away.
pointset = np.array([-1, 0.5, 1.1, 2, 10, 11, 10.5, -100, -50, 666,
-1, 0.5, 1.1, 2, 98, -9, -200, 45.3, -53, 0.1]).astype('float32')
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 2, 'Index 1 not correct.'
assert indexes[0][2] == 1, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances[0][3], 0.9), 'Distances 3 not correct.'
# Call high-level function.
pointset2 = pointset.reshape((n_points, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 2, 'Index 1 not correct.'
assert indexes[0][2] == 1, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert np.isclose(distances[0][0], 1.5), 'Distances 0 not correct.'
assert np.isclose(distances[0][1], 0.6), 'Distances 1 not correct.'
assert np.isclose(distances[0][2], 0.6), 'Distances 2 not correct.'
assert np.isclose(distances[0][3], 0.9), 'Distances 3 not correct.'
def test_random_data():
"""Smoke kNN test with big random dataset"""
theiler_t = 0
n_points = 1000
n_dims = 5
knn_k = 4
n_chunks = 1
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, n_points), dtype=np.int32)
distances = np.zeros((knn_k, n_points), dtype=np.float32)
pointset = np.random.randn(n_points, n_dims).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, n_points, gpu_id)
# Call high-level function.
pointset2 = pointset.reshape((n_points, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert np.all(indexes == indexes2), ('High- and low-level calls returned '
'different indices.')
assert np.all(distances == distances2), ('High- and low-level calls '
'returned different distances.')
def test_two_chunks():
"""Run knn search for two chunks."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 2
signal_length = n_points * n_chunks
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset = np.array([5, 6, -5, -7,
50, -50, 60, -70]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert indexes[0][4] == 2, 'Index 4 not correct.'
assert indexes[0][5] == 3, 'Index 5 not correct.'
assert indexes[0][6] == 0, 'Index 6 not correct.'
assert indexes[0][7] == 1, 'Index 7 not correct.'
assert np.isclose(distances[0][0], 1), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 1), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 2), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 2), 'Distance 3 not correct.'
assert np.isclose(distances[0][4], 10), 'Distance 4 not correct.'
assert np.isclose(distances[0][5], 20), 'Distance 5 not correct.'
assert np.isclose(distances[0][6], 10), 'Distance 6 not correct.'
assert np.isclose(distances[0][7], 20), 'Distance 7 not correct.'
# Call high-level function.
pointset2 = np.expand_dims(pointset, axis=1)
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 0, 'Index 1 not correct.'
assert indexes2[0][2] == 3, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert indexes2[0][4] == 2, 'Index 4 not correct.'
assert indexes2[0][5] == 3, 'Index 5 not correct.'
assert indexes2[0][6] == 0, 'Index 6 not correct.'
assert indexes2[0][7] == 1, 'Index 7 not correct.'
assert np.isclose(distances2[0][0], 1), 'Distance 0 not correct.'
assert np.isclose(distances2[0][1], 1), 'Distance 1 not correct.'
assert np.isclose(distances2[0][2], 2), 'Distance 2 not correct.'
assert np.isclose(distances2[0][3], 2), 'Distance 3 not correct.'
assert np.isclose(distances2[0][4], 10), 'Distance 4 not correct.'
assert np.isclose(distances2[0][5], 20), 'Distance 5 not correct.'
assert np.isclose(distances2[0][6], 10), 'Distance 6 not correct.'
assert np.isclose(distances2[0][7], 20), 'Distance 7 not correct.'
def test_three_chunks():
"""Run knn search for three chunks."""
theiler_t = 0
n_points = 4
n_dims = 1
knn_k = 1
n_chunks = 3
signal_length = n_points*n_chunks
gpu_id = 0
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset = np.array([5, 6, -5, -7,
50, -50, 60, -70,
500, -500, 600, -700]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 nor correct.'
assert indexes[0][1] == 0, 'Index 1 nor correct.'
assert indexes[0][2] == 3, 'Index 2 nor correct.'
assert indexes[0][3] == 2, 'Index 3 nor correct.'
assert indexes[0][4] == 2, 'Index 4 nor correct.'
assert indexes[0][5] == 3, 'Index 5 nor correct.'
assert indexes[0][6] == 0, 'Index 6 nor correct.'
assert indexes[0][7] == 1, 'Index 7 nor correct.'
assert indexes[0][8] == 2, 'Index 8 nor correct.'
assert indexes[0][9] == 3, 'Index 9 nor correct.'
assert indexes[0][10] == 0, 'Index 10 nor correct.'
assert indexes[0][11] == 1, 'Index 11 nor correct.'
assert np.isclose(distances[0][0], 1), 'Distance 0 is not correct.'
assert np.isclose(distances[0][1], 1), 'Distance 1 is not correct.'
assert np.isclose(distances[0][2], 2), 'Distance 2 is not correct.'
assert np.isclose(distances[0][3], 2), 'Distance 3 is not correct.'
assert np.isclose(distances[0][4], 10), 'Distance 4 is not correct.'
assert np.isclose(distances[0][5], 20), 'Distance 5 is not correct.'
assert np.isclose(distances[0][6], 10), 'Distance 6 is not correct.'
assert np.isclose(distances[0][7], 20), 'Distance 7 is not correct.'
assert np.isclose(distances[0][8], 100), 'Distance 8 is not correct.'
assert np.isclose(distances[0][9], 200), 'Distance 9 is not correct.'
assert np.isclose(distances[0][10], 100), 'Distance 10 is not correct.'
assert np.isclose(distances[0][11], 200), 'Distance 11 is not correct.'
# Call high-level function.
pointset2 = np.expand_dims(pointset, axis=1)
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 nor correct.'
assert indexes2[0][1] == 0, 'Index 1 nor correct.'
assert indexes2[0][2] == 3, 'Index 2 nor correct.'
assert indexes2[0][3] == 2, 'Index 3 nor correct.'
assert indexes2[0][4] == 2, 'Index 4 nor correct.'
assert indexes2[0][5] == 3, 'Index 5 nor correct.'
assert indexes2[0][6] == 0, 'Index 6 nor correct.'
assert indexes2[0][7] == 1, 'Index 7 nor correct.'
assert indexes2[0][8] == 2, 'Index 8 nor correct.'
assert indexes2[0][9] == 3, 'Index 9 nor correct.'
assert indexes2[0][10] == 0, 'Index 10 nor correct.'
assert indexes2[0][11] == 1, 'Index 11 nor correct.'
assert np.isclose(distances2[0][0], 1), 'Distance 0 is not correct.'
assert np.isclose(distances2[0][1], 1), 'Distance 1 is not correct.'
assert np.isclose(distances2[0][2], 2), 'Distance 2 is not correct.'
assert np.isclose(distances2[0][3], 2), 'Distance 3 is not correct.'
assert np.isclose(distances2[0][4], 10), 'Distance 4 is not correct.'
assert np.isclose(distances2[0][5], 20), 'Distance 5 is not correct.'
assert np.isclose(distances2[0][6], 10), 'Distance 6 is not correct.'
assert np.isclose(distances2[0][7], 20), 'Distance 7 is not correct.'
assert np.isclose(distances2[0][8], 100), 'Distance 8 is not correct.'
assert np.isclose(distances2[0][9], 200), 'Distance 9 is not correct.'
assert np.isclose(distances2[0][10], 100), 'Distance 10 is not correct.'
assert np.isclose(distances2[0][11], 200), 'Distance 11 is not correct.'
def test_two_chunks_two_dim():
"""Test kNN with two chunks of 2D data in the same call."""
theiler_t = 0
n_points = 4
n_dims = 2
knn_k = 1
n_chunks = 2
gpu_id = 0
signal_length = n_points * n_chunks
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
# Points: X Y y
# 1 1 | o o
# 1.1 1 |
# -1 -1 ----+----x
# -1.2 -1 |
# o o |
pointset = np.array([1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2,
1, 1, -1, -1, 1, 1, -1, -1]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 not correct.'
assert indexes[0][1] == 0, 'Index 1 not correct.'
assert indexes[0][2] == 3, 'Index 2 not correct.'
assert indexes[0][3] == 2, 'Index 3 not correct.'
assert indexes[0][4] == 1, 'Index 4 not correct.'
assert indexes[0][5] == 0, 'Index 5 not correct.'
assert indexes[0][6] == 3, 'Index 6 not correct.'
assert indexes[0][7] == 2, 'Index 7 not correct.'
assert np.isclose(distances[0][0], 0.1), 'Distance 0 not correct.'
assert np.isclose(distances[0][1], 0.1), 'Distance 1 not correct.'
assert np.isclose(distances[0][2], 0.2), 'Distance 2 not correct.'
assert np.isclose(distances[0][3], 0.2), 'Distance 3 not correct.'
assert np.isclose(distances[0][4], 0.1), 'Distance 4 not correct.'
assert np.isclose(distances[0][5], 0.1), 'Distance 5 not correct.'
assert np.isclose(distances[0][6], 0.2), 'Distance 6 not correct.'
assert np.isclose(distances[0][7], 0.2), 'Distance 7 not correct.'
# Call high-level function.
pointset2 = pointset.reshape((signal_length, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 not correct.'
assert indexes2[0][1] == 0, 'Index 1 not correct.'
assert indexes2[0][2] == 3, 'Index 2 not correct.'
assert indexes2[0][3] == 2, 'Index 3 not correct.'
assert indexes2[0][4] == 1, 'Index 4 not correct.'
assert indexes2[0][5] == 0, 'Index 5 not correct.'
assert indexes2[0][6] == 3, 'Index 6 not correct.'
assert indexes2[0][7] == 2, 'Index 7 not correct.'
assert np.isclose(distances2[0][0], 0.1), 'Distance 0 not correct.'
assert np.isclose(distances2[0][1], 0.1), 'Distance 1 not correct.'
assert np.isclose(distances2[0][2], 0.2), 'Distance 2 not correct.'
assert np.isclose(distances2[0][3], 0.2), 'Distance 3 not correct.'
assert np.isclose(distances2[0][4], 0.1), 'Distance 4 not correct.'
assert np.isclose(distances2[0][5], 0.1), 'Distance 5 not correct.'
assert np.isclose(distances2[0][6], 0.2), 'Distance 6 not correct.'
assert np.isclose(distances2[0][7], 0.2), 'Distance 7 not correct.'
def test_two_chunks_odd_dim():
"""Test kNN with two chunks of data with odd dimension."""
theiler_t = 0
n_points = 4
n_dims = 3
knn_k = 1
n_chunks = 2
gpu_id = 0
signal_length = n_points * n_chunks
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
# Points: X Y Z y
# 1 1 1.02 | o o
# 1.1 1 1.03 |
# -1 -1 -1.04 ----+----x
# -1.2 -1 -1.05 |
# o o |
pointset = np.array([1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2,
1, 1, -1, -1, 1, 1, -1, -1,
1.02, 1.03, 1.04, 1.05, 1.02, 1.03, 1.04, 1.05]).astype('float32')
# Call low-level function.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 is not correct.'
assert indexes[0][1] == 0, 'Index 1 is not correct.'
assert indexes[0][2] == 3, 'Index 2 is not correct.'
assert indexes[0][3] == 2, 'Index 3 is not correct.'
assert indexes[0][4] == 1, 'Index 4 is not correct.'
assert indexes[0][5] == 0, 'Index 5 is not correct.'
assert indexes[0][6] == 3, 'Index 6 is not correct.'
assert indexes[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances[0][7], 0.2), 'Distance 7 ist not correct.'
# Call high-level function.
pointset2 = pointset.reshape((signal_length, n_dims))
(indexes2, distances2) = knn_search(pointset2, pointset2, knn_k, theiler_t,
n_chunks, gpu_id)
assert indexes2[0][0] == 1, 'Index 0 is not correct.'
assert indexes2[0][1] == 0, 'Index 1 is not correct.'
assert indexes2[0][2] == 3, 'Index 2 is not correct.'
assert indexes2[0][3] == 2, 'Index 3 is not correct.'
assert indexes2[0][4] == 1, 'Index 4 is not correct.'
assert indexes2[0][5] == 0, 'Index 5 is not correct.'
assert indexes2[0][6] == 3, 'Index 6 is not correct.'
assert indexes2[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances2[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances2[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances2[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances2[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances2[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances2[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances2[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances2[0][7], 0.2), 'Distance 7 ist not correct.'
def test_one_dim_two_dim_arg():
"""Test kNN with two chunks of data with odd dimension."""
theiler_t = 0
n_points = 4
n_dims = 3
knn_k = 1
n_chunks = 2
gpu_id = 0
signal_length = n_points * n_chunks
# Return arrays.
indexes = np.zeros((knn_k, signal_length), dtype=np.int32)
distances = np.zeros((knn_k, signal_length), dtype=np.float32)
# Points: X Y Z y
# 1 1 1.02 | o o
# 1.1 1 1.03 |
# -1 -1 -1.04 ----+----x
# -1.2 -1 -1.05 |
# o o |
pointset = np.array([1, 1.1, -1, -1.2, 1, 1.1, -1, -1.2,
1, 1, -1, -1, 1, 1, -1, -1,
1.02, 1.03, 1.04, 1.05, 1.02, 1.03, 1.04, 1.05]).astype('float32')
# Call low-level function with 1D numpy array. Numpy arranges data in
# C-order (row major) by default. This is what's expected by CUDA/pyopencl.
err = cudaFindKnnSetGPU(indexes, distances, pointset, pointset, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes[0][0] == 1, 'Index 0 is not correct.'
assert indexes[0][1] == 0, 'Index 1 is not correct.'
assert indexes[0][2] == 3, 'Index 2 is not correct.'
assert indexes[0][3] == 2, 'Index 3 is not correct.'
assert indexes[0][4] == 1, 'Index 4 is not correct.'
assert indexes[0][5] == 0, 'Index 5 is not correct.'
assert indexes[0][6] == 3, 'Index 6 is not correct.'
assert indexes[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances[0][7], 0.2), 'Distance 7 ist not correct.'
# Call low-level function with 2D numpy array. Transposing doesn't change
# anything about the memory layout.
indexes2 = np.zeros((knn_k, signal_length), dtype=np.int32)
distances2 = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset2 = pointset.reshape((signal_length, n_dims)).copy()
err = cudaFindKnnSetGPU(indexes2, distances2, pointset2, pointset2, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
assert err == 1, 'There was an error during the GPU-call.'
assert indexes2[0][0] == 1, 'Index 0 is not correct.'
assert indexes2[0][1] == 0, 'Index 1 is not correct.'
assert indexes2[0][2] == 3, 'Index 2 is not correct.'
assert indexes2[0][3] == 2, 'Index 3 is not correct.'
assert indexes2[0][4] == 1, 'Index 4 is not correct.'
assert indexes2[0][5] == 0, 'Index 5 is not correct.'
assert indexes2[0][6] == 3, 'Index 6 is not correct.'
assert indexes2[0][7] == 2, 'Index 7 is not correct.'
assert np.isclose(distances2[0][0], 0.1), 'Distance 0 ist not correct.'
assert np.isclose(distances2[0][1], 0.1), 'Distance 1 ist not correct.'
assert np.isclose(distances2[0][2], 0.2), 'Distance 2 ist not correct.'
assert np.isclose(distances2[0][3], 0.2), 'Distance 3 ist not correct.'
assert np.isclose(distances2[0][4], 0.1), 'Distance 4 ist not correct.'
assert np.isclose(distances2[0][5], 0.1), 'Distance 5 ist not correct.'
assert np.isclose(distances2[0][6], 0.2), 'Distance 6 ist not correct.'
assert np.isclose(distances2[0][7], 0.2), 'Distance 7 ist not correct.'
# Call low-level function with 2D numpy array in Fortran order.
indexes3 = np.zeros((knn_k, signal_length), dtype=np.int32)
distances3 = np.zeros((knn_k, signal_length), dtype=np.float32)
pointset3 = np.asfortranarray(pointset2)
print(pointset3.flags['C_CONTIGUOUS'])
with pytest.raises(AssertionError):
cudaFindKnnSetGPU(indexes3, distances3, pointset3, pointset3, knn_k,
theiler_t, n_chunks, n_dims, signal_length, gpu_id)
if __name__ == '__main__':
test_one_dim_two_dim_arg()
test_one_dim_two_dim_arg()
test_two_chunks_odd_dim()
test_two_chunks_odd_dim()
test_two_chunks_two_dim()
test_two_chunks()
test_three_chunks()
test_random_data()
test_one_dim_longer_sequence
test_two_dim_longer_sequence()
test_knn_one_dim()
test_knn_two_dim()
| gpl-3.0 | 3,920,839,145,382,684,000 | -660,165,205,250,113,900 | 43.469097 | 91 | 0.594939 | false |
pidydx/grr | grr/lib/flows/general/audit.py | 1 | 2003 | #!/usr/bin/env python
"""This implements the auditing system.
How does it work?
Noteworthy events within the GRR system (such as approval granting, flow
execution etc) generate events to notify listeners about the event.
The audit system consists of a group of event listeners which receive these
events and act upon them.
"""
from grr.lib import aff4
from grr.lib import events
from grr.lib import flow
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib import sequential_collection
AUDIT_EVENT = "Audit"
class AuditEventCollection(sequential_collection.IndexedSequentialCollection):
RDF_TYPE = events.AuditEvent
def AllAuditLogs(token=None):
# TODO(user): This is not great, we should store this differently.
for log in aff4.FACTORY.Open("aff4:/audit/logs", token=token).ListChildren():
yield AuditEventCollection(log, token=token)
def AuditLogsForTimespan(start_time, end_time, token=None):
# TODO(user): This is not great, we should store this differently.
for log in aff4.FACTORY.Open(
"aff4:/audit/logs", token=token).ListChildren(age=(start_time, end_time)):
yield AuditEventCollection(log, token=token)
class AuditEventListener(flow.EventListener):
"""Receive the audit events."""
well_known_session_id = rdfvalue.SessionID(
base="aff4:/audit", queue=queues.FLOWS, flow_name="listener")
EVENTS = [AUDIT_EVENT]
created_logs = set()
def EnsureLogIsIndexed(self, log_urn):
if log_urn not in self.created_logs:
# Just write any type to the aff4 space so we can determine
# which audit logs exist easily.
aff4.FACTORY.Create(
log_urn, aff4.AFF4Volume, mode="w", token=self.token).Close()
self.created_logs.add(log_urn)
return log_urn
@flow.EventHandler(auth_required=False)
def ProcessMessage(self, message=None, event=None):
_ = message
log_urn = aff4.CurrentAuditLog()
self.EnsureLogIsIndexed(log_urn)
AuditEventCollection.StaticAdd(log_urn, self.token, event)
| apache-2.0 | -7,233,806,585,083,813,000 | 6,941,292,987,057,124,000 | 30.793651 | 80 | 0.736895 | false |
jmmease/pandas | pandas/tests/tseries/test_timezones.py | 2 | 69288 | # pylint: disable-msg=E1101,W0612
import pytest
import pytz
import dateutil
import numpy as np
from dateutil.parser import parse
from pytz import NonExistentTimeError
from distutils.version import LooseVersion
from dateutil.tz import tzlocal, tzoffset
from datetime import datetime, timedelta, tzinfo, date
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
from pandas.compat import lrange, zip
from pandas.core.indexes.datetimes import bdate_range, date_range
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas._libs import tslib
from pandas._libs.tslibs import timezones
from pandas import (Index, Series, DataFrame, isna, Timestamp, NaT,
DatetimeIndex, to_datetime)
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
set_timezone)
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, '-07:00')
fixed_off_no_name = FixedOffset(-330, None)
class TestTimeZoneSupportPytz(object):
def tz(self, tz):
# Construct a timezone object from a string. Overridden in subclass to
# parameterize tests.
return pytz.timezone(tz)
def tzstr(self, tz):
# Construct a timezone string from a string. Overridden in subclass to
# parameterize tests.
return tz
def localize(self, tz, x):
return tz.localize(x)
def cmptz(self, tz1, tz2):
# Compare two timezones. Overridden in subclass to parameterize
# tests.
return tz1.zone == tz2.zone
def test_utc_to_local_no_modify(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# Values are unmodified
assert np.array_equal(rng.asi8, rng_eastern.asi8)
assert self.cmptz(rng_eastern.tz, self.tz('US/Eastern'))
def test_utc_to_local_no_modify_explicit(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tz('US/Eastern'))
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert rng_eastern.tz == self.tz('US/Eastern')
def test_localize_utc_conversion(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(self.tzstr('US/Eastern'))
expected_naive = rng + offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
pytest.raises(NonExistentTimeError, rng.tz_localize,
self.tzstr('US/Eastern'))
def test_localize_utc_conversion_explicit(self):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(self.tz('US/Eastern'))
expected_naive = rng + offsets.Hour(5)
assert np.array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
pytest.raises(NonExistentTimeError, rng.tz_localize,
self.tz('US/Eastern'))
def test_timestamp_tz_localize(self):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(self.tzstr('US/Eastern'))
expected = Timestamp('3/11/2012 04:00', tz=self.tzstr('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_tz_localize_explicit(self):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(self.tz('US/Eastern'))
expected = Timestamp('3/11/2012 04:00', tz=self.tz('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_constructed_by_date_and_tz(self):
# Fix Issue 2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=self.tzstr('US/Eastern'))
expected = Timestamp('3/11/2012', tz=self.tzstr('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_constructed_by_date_and_tz_explicit(self):
# Fix Issue 2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=self.tz('US/Eastern'))
expected = Timestamp('3/11/2012', tz=self.tz('US/Eastern'))
assert result.hour == expected.hour
assert result == expected
def test_timestamp_constructor_near_dst_boundary(self):
# GH 11481 & 15777
# Naive string timestamps were being localized incorrectly
# with tz_convert_single instead of tz_localize_to_utc
for tz in ['Europe/Brussels', 'Europe/Prague']:
result = Timestamp('2015-10-25 01:00', tz=tz)
expected = Timestamp('2015-10-25 01:00').tz_localize(tz)
assert result == expected
with pytest.raises(pytz.AmbiguousTimeError):
Timestamp('2015-10-25 02:00', tz=tz)
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
# GH 11708
result = to_datetime("2015-11-18 15:30:00+05:30").tz_localize(
'UTC').tz_convert('Asia/Kolkata')
expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')
assert result == expected
# GH 15823
result = Timestamp('2017-03-26 00:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris')
expected = Timestamp(result.value).tz_localize(
'UTC').tz_convert('Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 03:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris')
assert result == expected
def test_timestamp_to_datetime_tzoffset(self):
tzinfo = tzoffset(None, 7200)
expected = Timestamp('3/11/2012 04:00', tz=tzinfo)
result = Timestamp(expected.to_pydatetime())
assert expected == result
def test_timedelta_push_over_dst_boundary(self):
# #1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=self.tzstr('US/Eastern'))
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))
assert result == expected
def test_timedelta_push_over_dst_boundary_explicit(self):
# #1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=self.tz('US/Eastern'))
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=self.tz('US/Eastern'))
assert result == expected
def test_tz_localize_dti(self):
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize(self.tzstr('US/Eastern'))
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L', tz='utc')
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(self.tzstr('US/Pacific'))
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00',
freq='L')
pytest.raises(pytz.AmbiguousTimeError, dti.tz_localize,
self.tzstr('US/Eastern'))
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
pytest.raises(pytz.NonExistentTimeError, dti.tz_localize,
self.tzstr('US/Eastern'))
def test_tz_localize_empty_series(self):
# #2248
ts = Series()
ts2 = ts.tz_localize('utc')
assert ts2.index.tz == pytz.utc
ts2 = ts.tz_localize(self.tzstr('US/Eastern'))
assert self.cmptz(ts2.index.tz, self.tz('US/Eastern'))
def test_astimezone(self):
utc = Timestamp('3/11/2012 22:00', tz='UTC')
expected = utc.tz_convert(self.tzstr('US/Eastern'))
result = utc.astimezone(self.tzstr('US/Eastern'))
assert expected == result
assert isinstance(result, Timestamp)
def test_create_with_tz(self):
stamp = Timestamp('3/11/2012 05:00', tz=self.tzstr('US/Eastern'))
assert stamp.hour == 5
rng = date_range('3/11/2012 04:00', periods=10, freq='H',
tz=self.tzstr('US/Eastern'))
assert stamp == rng[1]
utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')
assert utc_stamp.tzinfo is pytz.utc
assert utc_stamp.hour == 5
utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')
assert utc_stamp.hour == 5
def test_create_with_fixed_tz(self):
off = FixedOffset(420, '+07:00')
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range('3/11/2012 05:00:00+07:00',
'6/11/2012 05:00:00+07:00')
assert (rng.values == rng3.values).all()
def test_create_with_fixedoffset_noname(self):
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = Index([start, end])
assert off == idx.tz
def test_date_range_localize(self):
rng = date_range('3/11/2012 03:00', periods=15, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'],
tz='US/Eastern')
rng3 = date_range('3/11/2012 03:00', periods=15, freq='H')
rng3 = rng3.tz_localize('US/Eastern')
tm.assert_index_equal(rng, rng3)
# DST transition time
val = rng[0]
exp = Timestamp('3/11/2012 03:00', tz='US/Eastern')
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range('3/11/2012 00:00', periods=2, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'],
tz='US/Eastern')
tm.assert_index_equal(rng, rng2)
exp = Timestamp('3/11/2012 00:00', tz='US/Eastern')
assert exp.hour == 0
assert rng[0] == exp
exp = Timestamp('3/11/2012 01:00', tz='US/Eastern')
assert exp.hour == 1
assert rng[1] == exp
rng = date_range('3/11/2012 00:00', periods=10, freq='H',
tz='US/Eastern')
assert rng[2].hour == 3
def test_utc_box_timestamp_and_localize(self):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
tz = self.tz('US/Eastern')
expected = rng[-1].astimezone(tz)
stamp = rng_eastern[-1]
assert stamp == expected
assert stamp.tzinfo == expected.tzinfo
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(self.tzstr('US/Eastern'))
# test not valid for dateutil timezones.
# assert 'EDT' in repr(rng_eastern[0].tzinfo)
assert ('EDT' in repr(rng_eastern[0].tzinfo) or
'tzfile' in repr(rng_eastern[0].tzinfo))
def test_timestamp_tz_convert(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
conv = idx[0].tz_convert(self.tzstr('US/Pacific'))
expected = idx.tz_convert(self.tzstr('US/Pacific'))[0]
assert conv == expected
def test_pass_dates_localize_to_utc(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(self.tzstr('US/Eastern'))
fromdates = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
def test_field_access_localize(self):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz=self.tzstr('US/Eastern'))
assert (rng.hour == 0).all()
# a more unusual time zone, #1946
dr = date_range('2011-10-02 00:00', freq='h', periods=10,
tz=self.tzstr('America/Atikokan'))
expected = Index(np.arange(10, dtype=np.int64))
tm.assert_index_equal(dr.hour, expected)
def test_with_tz(self):
tz = self.tz('US/Central')
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=offsets.Hour())
assert dr.tz is pytz.utc
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
assert central.tz is tz
comp = self.localize(tz, central[0].to_pydatetime().replace(
tzinfo=None)).tzinfo
assert central[0].tz is comp
# compare vs a localized tz
comp = self.localize(tz,
dr[0].to_pydatetime().replace(tzinfo=None)).tzinfo
assert central[0].tz is comp
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
'1/1/2009', tz=pytz.utc)
pytest.raises(Exception, bdate_range,
datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009',
tz=tz)
def test_tz_localize(self):
dr = bdate_range('1/1/2009', '1/1/2010')
dr_utc = bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
def test_with_tz_ambiguous_times(self):
tz = self.tz('US/Eastern')
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=offsets.Hour())
pytest.raises(pytz.NonExistentTimeError, dr.tz_localize, tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=offsets.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=offsets.Hour())
pytest.raises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=offsets.Minute(30), tz=pytz.utc)
def test_ambiguous_infer(self):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
tz = self.tz('US/Eastern')
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour())
pytest.raises(pytz.AmbiguousTimeError, dr.tz_localize, tz)
# With repeated hours, we can infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='infer')
tm.assert_index_equal(dr, localized)
with tm.assert_produces_warning(FutureWarning):
localized_old = di.tz_localize(tz, infer_dst=True)
tm.assert_index_equal(dr, localized_old)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous='infer'))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous='infer')
tm.assert_index_equal(localized, localized_infer)
with tm.assert_produces_warning(FutureWarning):
localized_infer_old = dr.tz_localize(tz, infer_dst=True)
tm.assert_index_equal(localized, localized_infer_old)
def test_ambiguous_flags(self):
# November 6, 2011, fall back, repeat 2 AM hour
tz = self.tz('US/Eastern')
# Pass in flags to determine right dst transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz,
ambiguous=np.array(is_dst).astype('bool'))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where infer_dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
pytest.raises(Exception, di.tz_localize, tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# construction with an ambiguous end-point
# GH 11626
tz = self.tzstr("Europe/London")
def f():
date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London", freq="H")
pytest.raises(pytz.AmbiguousTimeError, f)
times = date_range("2013-10-26 23:00", "2013-10-27 01:00", freq="H",
tz=tz, ambiguous='infer')
assert times[0] == Timestamp('2013-10-26 23:00', tz=tz, freq="H")
if str(tz).startswith('dateutil'):
if dateutil.__version__ < LooseVersion('2.6.0'):
# see gh-14621
assert times[-1] == Timestamp('2013-10-27 01:00:00+0000',
tz=tz, freq="H")
elif dateutil.__version__ > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert times[-1] == Timestamp('2013-10-27 01:00:00+0100',
tz=tz, freq="H")
else:
assert times[-1] == Timestamp('2013-10-27 01:00:00+0000',
tz=tz, freq="H")
def test_ambiguous_nat(self):
tz = self.tz('US/Eastern')
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='NaT')
times = ['11/06/2011 00:00', np.NaN, np.NaN, '11/06/2011 02:00',
'11/06/2011 03:00']
di_test = DatetimeIndex(times, tz='US/Eastern')
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
def test_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# gh-14402
t = Timestamp('2015-11-01 01:00:03')
expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
def f():
t.tz_localize('US/Central')
pytest.raises(pytz.AmbiguousTimeError, f)
result = t.tz_localize('US/Central', ambiguous=True)
assert result == expected0
result = t.tz_localize('US/Central', ambiguous=False)
assert result == expected1
s = Series([t])
expected0 = Series([expected0])
expected1 = Series([expected1])
def f():
s.dt.tz_localize('US/Central')
pytest.raises(pytz.AmbiguousTimeError, f)
result = s.dt.tz_localize('US/Central', ambiguous=True)
assert_series_equal(result, expected0)
result = s.dt.tz_localize('US/Central', ambiguous=[True])
assert_series_equal(result, expected0)
result = s.dt.tz_localize('US/Central', ambiguous=False)
assert_series_equal(result, expected1)
result = s.dt.tz_localize('US/Central', ambiguous=[False])
assert_series_equal(result, expected1)
def test_nonexistent_raise_coerce(self):
# See issue 13057
from pytz.exceptions import NonExistentTimeError
times = ['2015-03-08 01:00', '2015-03-08 02:00', '2015-03-08 03:00']
index = DatetimeIndex(times)
tz = 'US/Eastern'
pytest.raises(NonExistentTimeError,
index.tz_localize, tz=tz)
pytest.raises(NonExistentTimeError,
index.tz_localize, tz=tz, errors='raise')
result = index.tz_localize(tz=tz, errors='coerce')
test_times = ['2015-03-08 01:00-05:00', 'NaT',
'2015-03-08 03:00-04:00']
expected = DatetimeIndex(test_times)\
.tz_localize('UTC').tz_convert('US/Eastern')
tm.assert_index_equal(result, expected)
# test utility methods
def test_infer_tz(self):
eastern = self.tz('US/Eastern')
utc = pytz.utc
_start = datetime(2001, 1, 1)
_end = datetime(2009, 1, 1)
start = self.localize(eastern, _start)
end = self.localize(eastern, _end)
assert (timezones.infer_tzinfo(start, end) is
self.localize(eastern, _start).tzinfo)
assert (timezones.infer_tzinfo(start, None) is
self.localize(eastern, _start).tzinfo)
assert (timezones.infer_tzinfo(None, end) is
self.localize(eastern, _end).tzinfo)
start = utc.localize(_start)
end = utc.localize(_end)
assert (timezones.infer_tzinfo(start, end) is utc)
end = self.localize(eastern, _end)
pytest.raises(Exception, timezones.infer_tzinfo, start, end)
pytest.raises(Exception, timezones.infer_tzinfo, end, start)
def test_tz_string(self):
result = date_range('1/1/2000', periods=10,
tz=self.tzstr('US/Eastern'))
expected = date_range('1/1/2000', periods=10, tz=self.tz('US/Eastern'))
tm.assert_index_equal(result, expected)
def test_take_dont_lose_meta(self):
rng = date_range('1/1/2000', periods=20, tz=self.tzstr('US/Eastern'))
result = rng.take(lrange(5))
assert result.tz == rng.tz
assert result.freq == rng.freq
def test_index_with_timezone_repr(self):
rng = date_range('4/13/2010', '5/6/2010')
rng_eastern = rng.tz_localize(self.tzstr('US/Eastern'))
rng_repr = repr(rng_eastern)
assert '2010-04-13 00:00:00' in rng_repr
def test_index_astype_asobject_tzinfos(self):
# #1345
# dates around a dst transition
rng = date_range('2/13/2010', '5/6/2010', tz=self.tzstr('US/Eastern'))
objs = rng.asobject
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
def test_localized_at_time_between_time(self):
from datetime import time
rng = date_range('4/16/2012', '5/1/2012', freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(self.tzstr('US/Eastern'))
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(self.tzstr(
'US/Eastern'))
assert_series_equal(result, expected)
assert self.cmptz(result.index.tz, self.tz('US/Eastern'))
t1, t2 = time(10, 0), time(11, 0)
result = ts_local.between_time(t1, t2)
expected = ts.between_time(t1,
t2).tz_localize(self.tzstr('US/Eastern'))
assert_series_equal(result, expected)
assert self.cmptz(result.index.tz, self.tz('US/Eastern'))
def test_string_index_alias_tz_aware(self):
rng = date_range('1/1/2000', periods=10, tz=self.tzstr('US/Eastern'))
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts['1/3/2000']
tm.assert_almost_equal(result, ts[2])
def test_fixed_offset(self):
dates = [datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)]
result = to_datetime(dates)
assert result.tz == fixed_off
def test_fixedtz_topydatetime(self):
dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)])
result = to_datetime(dates).to_pydatetime()
tm.assert_numpy_array_equal(dates, result)
result = to_datetime(dates)._mpl_repr()
tm.assert_numpy_array_equal(dates, result)
def test_convert_tz_aware_datetime_datetime(self):
# #1581
tz = self.tz('US/Eastern')
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)]
dates_aware = [self.localize(tz, x) for x in dates]
result = to_datetime(dates_aware)
assert self.cmptz(result.tz, self.tz('US/Eastern'))
converted = to_datetime(dates_aware, utc=True)
ex_vals = np.array([Timestamp(x).value for x in dates_aware])
tm.assert_numpy_array_equal(converted.asi8, ex_vals)
assert converted.tz is pytz.utc
def test_to_datetime_utc(self):
arr = np.array([parse('2012-06-13T01:39:00Z')], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_tzlocal(self):
dt = parse('2012-06-13T01:39:00Z')
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_frame_no_datetime64_dtype(self):
# after 7822
# these retain the timezones on dict construction
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))
e = DataFrame({'A': 'foo', 'B': dr_tz}, index=dr)
tz_expected = DatetimeTZDtype('ns', dr_tz.tzinfo)
assert e['B'].dtype == tz_expected
# GH 2810 (with timezones)
datetimes_naive = [ts.to_pydatetime() for ts in dr]
datetimes_with_tz = [ts.to_pydatetime() for ts in dr_tz]
df = DataFrame({'dr': dr,
'dr_tz': dr_tz,
'datetimes_naive': datetimes_naive,
'datetimes_with_tz': datetimes_with_tz})
result = df.get_dtype_counts().sort_index()
expected = Series({'datetime64[ns]': 2,
str(tz_expected): 2}).sort_index()
assert_series_equal(result, expected)
def test_hongkong_tz_convert(self):
# #1673
dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong')
# it works!
dr.hour
def test_tz_convert_unsorted(self):
dr = date_range('2012-03-09', freq='H', periods=100, tz='utc')
dr = dr.tz_convert(self.tzstr('US/Eastern'))
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
def test_shift_localized(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(self.tzstr('US/Eastern'))
result = dr_tz.shift(1, '10T')
assert result.tz == dr_tz.tz
def test_tz_aware_asfreq(self):
dr = date_range('2011-12-01', '2012-07-20', freq='D',
tz=self.tzstr('US/Eastern'))
s = Series(np.random.randn(len(dr)), index=dr)
# it works!
s.asfreq('T')
def test_static_tzinfo(self):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=self.tzstr('EST'))
index.hour
index[0]
def test_tzaware_datetime_to_index(self):
d = [datetime(2012, 8, 19, tzinfo=self.tz('US/Eastern'))]
index = DatetimeIndex(d)
assert self.cmptz(index.tz, self.tz('US/Eastern'))
def test_date_range_span_dst_transition(self):
# #1778
# Standard -> Daylight Savings Time
dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI',
tz='US/Eastern')
assert (dr.hour == 0).all()
dr = date_range('2012-11-02', periods=10, tz=self.tzstr('US/Eastern'))
assert (dr.hour == 0).all()
def test_convert_datetime_list(self):
dr = date_range('2012-06-02', periods=10,
tz=self.tzstr('US/Eastern'), name='foo')
dr2 = DatetimeIndex(list(dr), name='foo')
tm.assert_index_equal(dr, dr2)
assert dr.tz == dr2.tz
assert dr2.name == 'foo'
def test_frame_from_records_utc(self):
rec = {'datum': 1.5,
'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)}
# it works
DataFrame.from_records([rec], index='begin_time')
def test_frame_reset_index(self):
dr = date_range('2012-06-02', periods=10, tz=self.tzstr('US/Eastern'))
df = DataFrame(np.random.randn(len(dr)), dr)
roundtripped = df.reset_index().set_index('index')
xp = df.index.tz
rs = roundtripped.index.tz
assert xp == rs
def test_dateutil_tzoffset_support(self):
values = [188.5, 328.25]
tzinfo = tzoffset(None, 7200)
index = [datetime(2012, 5, 11, 11, tzinfo=tzinfo),
datetime(2012, 5, 11, 12, tzinfo=tzinfo)]
series = Series(data=values, index=index)
assert series.index.tz == tzinfo
# it works! #2443
repr(series.index[0])
def test_getitem_pydatetime_tz(self):
index = date_range(start='2012-12-24 16:00', end='2012-12-24 18:00',
freq='H', tz=self.tzstr('Europe/Berlin'))
ts = Series(index=index, data=index.hour)
time_pandas = Timestamp('2012-12-24 17:00',
tz=self.tzstr('Europe/Berlin'))
time_datetime = self.localize(
self.tz('Europe/Berlin'), datetime(2012, 12, 24, 17, 0))
assert ts[time_pandas] == ts[time_datetime]
def test_index_drop_dont_lose_tz(self):
# #2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_datetimeindex_tz(self):
""" Test different DatetimeIndex constructions with timezone
Follow-up of #4229
"""
arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00']
idx1 = to_datetime(arr).tz_localize(self.tzstr('US/Eastern'))
idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2,
tz=self.tzstr('US/Eastern'))
idx3 = DatetimeIndex(arr, tz=self.tzstr('US/Eastern'))
idx4 = DatetimeIndex(np.array(arr), tz=self.tzstr('US/Eastern'))
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
def test_datetimeindex_tz_nat(self):
idx = to_datetime([Timestamp("2013-1-1", tz=self.tzstr('US/Eastern')),
NaT])
assert isna(idx[1])
assert idx[0].tzinfo is not None
class TestTimeZoneSupportDateutil(TestTimeZoneSupportPytz):
def tz(self, tz):
"""
Construct a dateutil timezone.
Use tslib.maybe_get_tz so that we get the filename on the tz right
on windows. See #7337.
"""
return timezones.maybe_get_tz('dateutil/' + tz)
def tzstr(self, tz):
""" Construct a timezone string from a string. Overridden in subclass
to parameterize tests. """
return 'dateutil/' + tz
def cmptz(self, tz1, tz2):
""" Compare two timezones. Overridden in subclass to parameterize
tests. """
return tz1 == tz2
def localize(self, tz, x):
return x.replace(tzinfo=tz)
def test_utc_with_system_utc(self):
# Skipped on win32 due to dateutil bug
tm._skip_if_windows()
from pandas._libs.tslibs.timezones import maybe_get_tz
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
def test_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2009-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2009-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2008-05-12 09:50:32']
tt = to_datetime(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2008-05-12 13:50:32']
tt = to_datetime(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
def test_tz_convert_hour_overflow_dst_timestamps(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
tz = self.tzstr('US/Eastern')
# sorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2009-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2009-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2008-05-12 09:50:32', tz=tz)]
tt = to_datetime(ts)
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2008-05-12 13:50:32', tz='UTC')]
tt = to_datetime(ts)
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
def test_tslib_tz_convert_trans_pos_plus_1__bug(self):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
idx = date_range(datetime(2011, 3, 26, 23),
datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize('UTC')
idx = idx.tz_convert('Europe/Moscow')
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_tslib_tz_convert_dst(self):
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
# Start DST
idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([18, 19, 20, 21, 22, 23,
0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([19, 20, 21, 22, 23,
0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n,
n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tzlocal(self):
# GH 13583
ts = Timestamp('2011-01-01', tz=dateutil.tz.tzlocal())
assert ts.tz == dateutil.tz.tzlocal()
assert "tz='tzlocal()')" in repr(ts)
tz = timezones.maybe_get_tz('tzlocal()')
assert tz == dateutil.tz.tzlocal()
# get offset using normal datetime for test
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = offset.total_seconds() * 1000000000
assert ts.value + offset == Timestamp('2011-01-01').value
def test_tz_localize_tzlocal(self):
# GH 13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start='2001-01-01', end='2001-03-01')
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
def test_tz_convert_tzlocal(self):
# GH 13583
# tz_convert doesn't affect to internal
dti = date_range(start='2001-01-01', end='2001-03-01', tz='UTC')
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
class TestTimeZoneCacheKey(object):
def test_cache_keys_are_distinct_for_pytz_vs_dateutil(self):
tzs = pytz.common_timezones
for tz_name in tzs:
if tz_name == 'UTC':
# skip utc as it's a special case in dateutil
continue
tz_p = timezones.maybe_get_tz(tz_name)
tz_d = timezones.maybe_get_tz('dateutil/' + tz_name)
if tz_d is None:
# skip timezones that dateutil doesn't know about.
continue
assert (timezones._p_tz_cache_key(tz_p) !=
timezones._p_tz_cache_key(tz_d))
class TestTimeZones(object):
timezones = ['UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/US/Pacific']
def test_replace(self):
# GH 14621
# GH 7825
# replacing datetime components with and w/o presence of a timezone
dt = Timestamp('2016-01-01 09:00:00')
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00')
assert result == expected
for tz in self.timezones:
dt = Timestamp('2016-01-01 09:00:00', tz=tz)
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00', tz=tz)
assert result == expected
# we preserve nanoseconds
dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
result = dt.replace(hour=0)
expected = Timestamp('2016-01-01 00:00:00.000000123', tz=tz)
assert result == expected
# test all
dt = Timestamp('2016-01-01 09:00:00.000000123', tz=tz)
result = dt.replace(year=2015, month=2, day=2, hour=0, minute=5,
second=5, microsecond=5, nanosecond=5)
expected = Timestamp('2015-02-02 00:05:05.000005005', tz=tz)
assert result == expected
# error
def f():
dt.replace(foo=5)
pytest.raises(TypeError, f)
def f():
dt.replace(hour=0.1)
pytest.raises(ValueError, f)
# assert conversion to naive is the same as replacing tzinfo with None
dt = Timestamp('2013-11-03 01:59:59.999999-0400', tz='US/Eastern')
assert dt.tz_localize(None) == dt.replace(tzinfo=None)
def test_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
pytz_zone = 'Europe/London'
dateutil_zone = 'dateutil/Europe/London'
result_pytz = (Timestamp('2013-10-27 01:00:00')
.tz_localize(pytz_zone, ambiguous=0))
result_dateutil = (Timestamp('2013-10-27 01:00:00')
.tz_localize(dateutil_zone, ambiguous=0))
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
if dateutil.__version__ < LooseVersion('2.6.0'):
# dateutil 2.6 buggy w.r.t. ambiguous=0
# see gh-14621
# see https://github.com/dateutil/dateutil/issues/321
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
assert str(result_pytz) == str(result_dateutil)
elif dateutil.__version__ > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert result_pytz.to_pydatetime().tzname() == 'GMT'
assert result_dateutil.to_pydatetime().tzname() == 'BST'
assert str(result_pytz) != str(result_dateutil)
# 1 hour difference
result_pytz = (Timestamp('2013-10-27 01:00:00')
.tz_localize(pytz_zone, ambiguous=1))
result_dateutil = (Timestamp('2013-10-27 01:00:00')
.tz_localize(dateutil_zone, ambiguous=1))
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382832000000000000
# dateutil < 2.6 is buggy w.r.t. ambiguous timezones
if dateutil.__version__ > LooseVersion('2.5.3'):
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
def test_replace_tzinfo(self):
# GH 15683
dt = datetime(2016, 3, 27, 1)
tzinfo = pytz.timezone('CET').localize(dt, is_dst=False).tzinfo
result_dt = dt.replace(tzinfo=tzinfo)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo)
if hasattr(result_dt, 'timestamp'): # New method in Py 3.3
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None)
if hasattr(result_dt, 'timestamp'): # New method in Py 3.3
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
def test_index_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')
assert not left.equals(right)
def test_tz_localize_naive(self):
rng = date_range('1/1/2011', periods=100, freq='H')
conv = rng.tz_localize('US/Pacific')
exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific')
tm.assert_index_equal(conv, exp)
def test_tz_localize_roundtrip(self):
for tz in self.timezones:
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx in [idx1, idx2, idx3, idx4]:
localized = idx.tz_localize(tz)
expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq,
tz=tz)
tm.assert_index_equal(localized, expected)
with pytest.raises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
tm.assert_index_equal(reset, idx)
assert reset.tzinfo is None
def test_series_frame_tz_localize(self):
rng = date_range('1/1/2011', periods=100, freq='H')
ts = Series(1, index=rng)
result = ts.tz_localize('utc')
assert result.index.tz.zone == 'UTC'
df = DataFrame({'a': 1}, index=rng)
result = df.tz_localize('utc')
expected = DataFrame({'a': 1}, rng.tz_localize('UTC'))
assert result.index.tz.zone == 'UTC'
assert_frame_equal(result, expected)
df = df.T
result = df.tz_localize('utc', axis=1)
assert result.columns.tz.zone == 'UTC'
assert_frame_equal(result, expected.T)
# Can't localize if already tz-aware
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
ts = Series(1, index=rng)
tm.assert_raises_regex(TypeError, 'Already tz-aware',
ts.tz_localize, 'US/Eastern')
def test_series_frame_tz_convert(self):
rng = date_range('1/1/2011', periods=200, freq='D', tz='US/Eastern')
ts = Series(1, index=rng)
result = ts.tz_convert('Europe/Berlin')
assert result.index.tz.zone == 'Europe/Berlin'
df = DataFrame({'a': 1}, index=rng)
result = df.tz_convert('Europe/Berlin')
expected = DataFrame({'a': 1}, rng.tz_convert('Europe/Berlin'))
assert result.index.tz.zone == 'Europe/Berlin'
assert_frame_equal(result, expected)
df = df.T
result = df.tz_convert('Europe/Berlin', axis=1)
assert result.columns.tz.zone == 'Europe/Berlin'
assert_frame_equal(result, expected.T)
# can't convert tz-naive
rng = date_range('1/1/2011', periods=200, freq='D')
ts = Series(1, index=rng)
tm.assert_raises_regex(TypeError, "Cannot convert tz-naive",
ts.tz_convert, 'US/Eastern')
def test_tz_convert_roundtrip(self):
for tz in self.timezones:
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M',
tz='UTC')
exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D',
tz='UTC')
exp2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H',
tz='UTC')
exp3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T',
tz='UTC')
exp4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3),
(idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
tm.assert_index_equal(reset, converted.tz_convert(
'UTC').tz_localize(None))
def test_join_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng.tz_convert('US/Eastern')
right = rng.tz_convert('Europe/Berlin')
for how in ['inner', 'outer', 'left', 'right']:
result = left.join(left[:-5], how=how)
assert isinstance(result, DatetimeIndex)
assert result.tz == left.tz
result = left.join(right[:-5], how=how)
assert isinstance(result, DatetimeIndex)
assert result.tz.zone == 'UTC'
def test_join_aware(self):
rng = date_range('1/1/2011', periods=10, freq='H')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_utc = ts.tz_localize('utc')
pytest.raises(Exception, ts.__add__, ts_utc)
pytest.raises(Exception, ts_utc.__add__, ts)
test1 = DataFrame(np.zeros((6, 3)),
index=date_range("2012-11-15 00:00:00", periods=6,
freq="100L", tz="US/Central"))
test2 = DataFrame(np.zeros((3, 3)),
index=date_range("2012-11-15 00:00:00", periods=3,
freq="250L", tz="US/Central"),
columns=lrange(3, 6))
result = test1.join(test2, how='outer')
ex_index = test1.index.union(test2.index)
tm.assert_index_equal(result.index, ex_index)
assert result.index.tz.zone == 'US/Central'
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6, freq="H",
tz="US/Central")
rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H",
tz="US/Eastern")
result = rng.union(rng2)
assert result.tz.zone == 'UTC'
def test_align_aware(self):
idx1 = date_range('2001', periods=5, freq='H', tz='US/Eastern')
idx2 = date_range('2001', periods=5, freq='2H', tz='US/Eastern')
df1 = DataFrame(np.random.randn(len(idx1), 3), idx1)
df2 = DataFrame(np.random.randn(len(idx2), 3), idx2)
new1, new2 = df1.align(df2)
assert df1.index.tz == new1.index.tz
assert df2.index.tz == new2.index.tz
# # different timezones convert to UTC
# frame
df1_central = df1.tz_convert('US/Central')
new1, new2 = df1.align(df1_central)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
# series
new1, new2 = df1[0].align(df1_central[0])
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
# combination
new1, new2 = df1.align(df1_central[0], axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
df1[0].align(df1_central, axis=0)
assert new1.index.tz == pytz.UTC
assert new2.index.tz == pytz.UTC
def test_append_aware(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='US/Eastern')
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
assert ts_result.index.tz == rng1.tz
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H', tz='UTC')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H', tz='UTC')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2011-01-01 01:00', '2011-01-01 02:00'],
tz='UTC')
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
utc = rng1.tz
assert utc == ts_result.index.tz
# GH 7795
# different tz coerces to object dtype, not UTC
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H',
tz='US/Eastern')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Central')
ts1 = Series([1], index=rng1)
ts2 = Series([2], index=rng2)
ts_result = ts1.append(ts2)
exp_index = Index([Timestamp('1/1/2011 01:00', tz='US/Eastern'),
Timestamp('1/1/2011 02:00', tz='US/Central')])
exp = Series([1, 2], index=exp_index)
assert_series_equal(ts_result, exp)
def test_append_dst(self):
rng1 = date_range('1/1/2016 01:00', periods=3, freq='H',
tz='US/Eastern')
rng2 = date_range('8/1/2016 01:00', periods=3, freq='H',
tz='US/Eastern')
ts1 = Series([1, 2, 3], index=rng1)
ts2 = Series([10, 11, 12], index=rng2)
ts_result = ts1.append(ts2)
exp_index = DatetimeIndex(['2016-01-01 01:00', '2016-01-01 02:00',
'2016-01-01 03:00', '2016-08-01 01:00',
'2016-08-01 02:00', '2016-08-01 03:00'],
tz='US/Eastern')
exp = Series([1, 2, 3, 10, 11, 12], index=exp_index)
assert_series_equal(ts_result, exp)
assert ts_result.index.tz == rng1.tz
def test_append_aware_naive(self):
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = date_range('1/1/2011 02:00', periods=1, freq='H',
tz='US/Eastern')
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
assert ts_result.index.equals(ts1.index.asobject.append(
ts2.index.asobject))
# mixed
rng1 = date_range('1/1/2011 01:00', periods=1, freq='H')
rng2 = lrange(100)
ts1 = Series(np.random.randn(len(rng1)), index=rng1)
ts2 = Series(np.random.randn(len(rng2)), index=rng2)
ts_result = ts1.append(ts2)
assert ts_result.index.equals(ts1.index.asobject.append(
ts2.index))
def test_equal_join_ensure_utc(self):
rng = date_range('1/1/2011', periods=10, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ts.tz_convert('Europe/Moscow')
result = ts + ts_moscow
assert result.index.tz is pytz.utc
result = ts_moscow + ts
assert result.index.tz is pytz.utc
df = DataFrame({'a': ts})
df_moscow = df.tz_convert('Europe/Moscow')
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_arith_utc_convert(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
perm = np.random.permutation(100)[:90]
ts1 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('US/Eastern'))
perm = np.random.permutation(100)[:90]
ts2 = Series(np.random.randn(90),
index=rng.take(perm).tz_convert('Europe/Berlin'))
result = ts1 + ts2
uts1 = ts1.tz_convert('utc')
uts2 = ts2.tz_convert('utc')
expected = uts1 + uts2
assert result.index.tz == pytz.UTC
assert_series_equal(result, expected)
def test_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng[10:90][::-1]
right = rng[20:80][::-1]
assert left.tz == rng.tz
result = left.intersection(right)
assert result.tz == left.tz
def test_timestamp_equality_different_timezones(self):
utc_range = date_range('1/1/2000', periods=20, tz='UTC')
eastern_range = utc_range.tz_convert('US/Eastern')
berlin_range = utc_range.tz_convert('Europe/Berlin')
for a, b, c in zip(utc_range, eastern_range, berlin_range):
assert a == b
assert b == c
assert a == c
assert (utc_range == eastern_range).all()
assert (utc_range == berlin_range).all()
assert (berlin_range == eastern_range).all()
def test_datetimeindex_tz(self):
rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI',
tz='US/Eastern')
rng2 = DatetimeIndex(data=rng, tz='US/Eastern')
tm.assert_index_equal(rng, rng2)
def test_normalize_tz(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz='US/Eastern')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz='US/Eastern')
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC')
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_tz_local(self):
# see gh-13459
timezones = ['US/Pacific', 'US/Eastern', 'UTC', 'Asia/Kolkata',
'Asia/Shanghai', 'Australia/Canberra']
for timezone in timezones:
with set_timezone(timezone):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_tzaware_offset(self):
dates = date_range('2012-11-01', periods=3, tz='US/Pacific')
offset = dates + offsets.Hour(5)
assert dates[0] + offsets.Hour(5) == offset[0]
# GH 6818
for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']:
dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=tz)
offset = dates + offsets.Hour(5)
tm.assert_index_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
tm.assert_index_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_index_equal(offset, expected)
def test_nat(self):
# GH 5546
dates = [NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Eastern'))
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='UTC'))
dates = ['2010-12-01 00:00', '2010-12-02 00:00', NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 03:00', '2010-12-02 03:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx + offsets.Hour(5)
expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx.tz_convert('US/Pacific')
expected = ['2010-12-01 05:00', '2010-12-02 05:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx + np.timedelta64(3, 'h')
expected = ['2010-12-01 08:00', '2010-12-02 08:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 11:00', '2010-12-02 11:00', NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
class TestTslib(object):
def test_tslib_tz_convert(self):
def compare_utc_to_local(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, 'UTC', tz_didx.tz)
result = tslib.tz_convert(tz_didx.asi8, 'UTC', tz_didx.tz)
result_single = np.vectorize(f)(tz_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
def compare_local_to_utc(tz_didx, utc_didx):
f = lambda x: tslib.tz_convert_single(x, tz_didx.tz, 'UTC')
result = tslib.tz_convert(utc_didx.asi8, tz_didx.tz, 'UTC')
result_single = np.vectorize(f)(utc_didx.asi8)
tm.assert_numpy_array_equal(result, result_single)
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern', 'Europe/Moscow']:
# US: 2014-03-09 - 2014-11-11
# MOSCOW: 2014-10-26 / 2014-12-31
tz_didx = date_range('2014-03-01', '2015-01-10', freq='H', tz=tz)
utc_didx = date_range('2014-03-01', '2015-01-10', freq='H')
compare_utc_to_local(tz_didx, utc_didx)
# local tz to UTC can be differ in hourly (or higher) freqs because
# of DST
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2020-01-01', freq='D', tz=tz)
utc_didx = date_range('2000-01-01', '2020-01-01', freq='D')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
tz_didx = date_range('2000-01-01', '2100-01-01', freq='A', tz=tz)
utc_didx = date_range('2000-01-01', '2100-01-01', freq='A')
compare_utc_to_local(tz_didx, utc_didx)
compare_local_to_utc(tz_didx, utc_didx)
# Check empty array
result = tslib.tz_convert(np.array([], dtype=np.int64),
timezones.maybe_get_tz('US/Eastern'),
timezones.maybe_get_tz('Asia/Tokyo'))
tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64))
# Check all-NaT array
result = tslib.tz_convert(np.array([tslib.iNaT], dtype=np.int64),
timezones.maybe_get_tz('US/Eastern'),
timezones.maybe_get_tz('Asia/Tokyo'))
tm.assert_numpy_array_equal(result, np.array(
[tslib.iNaT], dtype=np.int64))
| bsd-3-clause | 2,853,378,353,781,257,000 | -3,521,341,595,173,812,000 | 37.991559 | 79 | 0.55874 | false |
AtomLinter/linter-pylama | bin/deps/astroid/rebuilder.py | 2 | 39135 | # Copyright (c) 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""this module contains utilities for rebuilding a _ast tree in
order to get a single Astroid representation
"""
import sys
import _ast
import astroid
from astroid import astpeephole
from astroid import nodes
_BIN_OP_CLASSES = {_ast.Add: '+',
_ast.BitAnd: '&',
_ast.BitOr: '|',
_ast.BitXor: '^',
_ast.Div: '/',
_ast.FloorDiv: '//',
_ast.Mod: '%',
_ast.Mult: '*',
_ast.Pow: '**',
_ast.Sub: '-',
_ast.LShift: '<<',
_ast.RShift: '>>',
}
if sys.version_info >= (3, 5):
_BIN_OP_CLASSES[_ast.MatMult] = '@'
_BOOL_OP_CLASSES = {_ast.And: 'and',
_ast.Or: 'or',
}
_UNARY_OP_CLASSES = {_ast.UAdd: '+',
_ast.USub: '-',
_ast.Not: 'not',
_ast.Invert: '~',
}
_CMP_OP_CLASSES = {_ast.Eq: '==',
_ast.Gt: '>',
_ast.GtE: '>=',
_ast.In: 'in',
_ast.Is: 'is',
_ast.IsNot: 'is not',
_ast.Lt: '<',
_ast.LtE: '<=',
_ast.NotEq: '!=',
_ast.NotIn: 'not in',
}
CONST_NAME_TRANSFORMS = {'None': None,
'True': True,
'False': False,
}
REDIRECT = {'arguments': 'Arguments',
'comprehension': 'Comprehension',
"ListCompFor": 'Comprehension',
"GenExprFor": 'Comprehension',
'excepthandler': 'ExceptHandler',
'keyword': 'Keyword',
}
PY3 = sys.version_info >= (3, 0)
PY34 = sys.version_info >= (3, 4)
CONTEXTS = {_ast.Load: astroid.Load,
_ast.Store: astroid.Store,
_ast.Del: astroid.Del,
_ast.Param: astroid.Store}
def _get_doc(node):
try:
if isinstance(node.body[0], _ast.Expr) and isinstance(node.body[0].value, _ast.Str):
doc = node.body[0].value.s
node.body = node.body[1:]
return node, doc
except IndexError:
pass # ast built from scratch
return node, None
def _visit_or_none(node, attr, visitor, parent, visit='visit',
**kws):
"""If the given node has an attribute, visits the attribute, and
otherwise returns None.
"""
value = getattr(node, attr, None)
if value:
return getattr(visitor, visit)(value, parent, **kws)
return None
def _get_context(node):
return CONTEXTS.get(type(node.ctx), astroid.Load)
class TreeRebuilder(object):
"""Rebuilds the _ast tree to become an Astroid tree"""
def __init__(self, manager):
self._manager = manager
self._global_names = []
self._import_from_nodes = []
self._delayed_assattr = []
self._visit_meths = {}
self._peepholer = astpeephole.ASTPeepholeOptimizer()
def visit_module(self, node, modname, modpath, package):
"""visit a Module node by returning a fresh instance of it"""
node, doc = _get_doc(node)
newnode = nodes.Module(name=modname, doc=doc, file=modpath, path=modpath,
package=package, parent=None)
newnode.postinit([self.visit(child, newnode) for child in node.body])
return newnode
def visit(self, node, parent):
cls = node.__class__
if cls in self._visit_meths:
visit_method = self._visit_meths[cls]
else:
cls_name = cls.__name__
visit_name = 'visit_' + REDIRECT.get(cls_name, cls_name).lower()
visit_method = getattr(self, visit_name)
self._visit_meths[cls] = visit_method
return visit_method(node, parent)
def _save_assignment(self, node, name=None):
"""save assignement situation since node.parent is not available yet"""
if self._global_names and node.name in self._global_names[-1]:
node.root().set_local(node.name, node)
else:
node.parent.set_local(node.name, node)
def visit_arguments(self, node, parent):
"""visit a Arguments node by returning a fresh instance of it"""
vararg, kwarg = node.vararg, node.kwarg
if PY34:
newnode = nodes.Arguments(vararg.arg if vararg else None,
kwarg.arg if kwarg else None,
parent)
else:
newnode = nodes.Arguments(vararg, kwarg, parent)
args = [self.visit(child, newnode) for child in node.args]
defaults = [self.visit(child, newnode)
for child in node.defaults]
varargannotation = None
kwargannotation = None
# change added in 82732 (7c5c678e4164), vararg and kwarg
# are instances of `_ast.arg`, not strings
if vararg:
if PY34:
if node.vararg.annotation:
varargannotation = self.visit(node.vararg.annotation,
newnode)
vararg = vararg.arg
if kwarg:
if PY34:
if node.kwarg.annotation:
kwargannotation = self.visit(node.kwarg.annotation,
newnode)
kwarg = kwarg.arg
if PY3:
kwonlyargs = [self.visit(child, newnode) for child
in node.kwonlyargs]
kw_defaults = [self.visit(child, newnode) if child else
None for child in node.kw_defaults]
annotations = [self.visit(arg.annotation, newnode) if
arg.annotation else None for arg in node.args]
kwonlyargs_annotations = [
self.visit(arg.annotation, newnode) if arg.annotation else None
for arg in node.kwonlyargs
]
else:
kwonlyargs = []
kw_defaults = []
annotations = []
kwonlyargs_annotations = []
newnode.postinit(
args=args,
defaults=defaults,
kwonlyargs=kwonlyargs,
kw_defaults=kw_defaults,
annotations=annotations,
kwonlyargs_annotations=kwonlyargs_annotations,
varargannotation=varargannotation,
kwargannotation=kwargannotation
)
# save argument names in locals:
if vararg:
newnode.parent.set_local(vararg, newnode)
if kwarg:
newnode.parent.set_local(kwarg, newnode)
return newnode
def visit_assert(self, node, parent):
"""visit a Assert node by returning a fresh instance of it"""
newnode = nodes.Assert(node.lineno, node.col_offset, parent)
if node.msg:
msg = self.visit(node.msg, newnode)
else:
msg = None
newnode.postinit(self.visit(node.test, newnode), msg)
return newnode
def visit_assign(self, node, parent):
"""visit a Assign node by returning a fresh instance of it"""
newnode = nodes.Assign(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.targets],
self.visit(node.value, newnode))
return newnode
def visit_assignname(self, node, parent, node_name=None):
'''visit a node and return a AssignName node'''
newnode = nodes.AssignName(node_name, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
self._save_assignment(newnode)
return newnode
def visit_augassign(self, node, parent):
"""visit a AugAssign node by returning a fresh instance of it"""
newnode = nodes.AugAssign(_BIN_OP_CLASSES[type(node.op)] + "=",
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.value, newnode))
return newnode
def visit_repr(self, node, parent):
"""visit a Backquote node by returning a fresh instance of it"""
newnode = nodes.Repr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_binop(self, node, parent):
"""visit a BinOp node by returning a fresh instance of it"""
if isinstance(node.left, _ast.BinOp) and self._manager.optimize_ast:
# Optimize BinOp operations in order to remove
# redundant recursion. For instance, if the
# following code is parsed in order to obtain
# its ast, then the rebuilder will fail with an
# infinite recursion, the same will happen with the
# inference engine as well. There's no need to hold
# so many objects for the BinOp if they can be reduced
# to something else (also, the optimization
# might handle only Const binops, which isn't a big
# problem for the correctness of the program).
#
# ("a" + "b" + # one thousand more + "c")
optimized = self._peepholer.optimize_binop(node, parent)
if optimized:
return optimized
newnode = nodes.BinOp(_BIN_OP_CLASSES[type(node.op)],
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.left, newnode),
self.visit(node.right, newnode))
return newnode
def visit_boolop(self, node, parent):
"""visit a BoolOp node by returning a fresh instance of it"""
newnode = nodes.BoolOp(_BOOL_OP_CLASSES[type(node.op)],
node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.values])
return newnode
def visit_break(self, node, parent):
"""visit a Break node by returning a fresh instance of it"""
return nodes.Break(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None),
parent)
def visit_call(self, node, parent):
"""visit a CallFunc node by returning a fresh instance of it"""
newnode = nodes.Call(node.lineno, node.col_offset, parent)
starargs = _visit_or_none(node, 'starargs', self, newnode)
kwargs = _visit_or_none(node, 'kwargs', self, newnode)
args = [self.visit(child, newnode)
for child in node.args]
if node.keywords:
keywords = [self.visit(child, newnode)
for child in node.keywords]
else:
keywords = None
if starargs:
new_starargs = nodes.Starred(col_offset=starargs.col_offset,
lineno=starargs.lineno,
parent=starargs.parent)
new_starargs.postinit(value=starargs)
args.append(new_starargs)
if kwargs:
new_kwargs = nodes.Keyword(arg=None, col_offset=kwargs.col_offset,
lineno=kwargs.lineno,
parent=kwargs.parent)
new_kwargs.postinit(value=kwargs)
if keywords:
keywords.append(new_kwargs)
else:
keywords = [new_kwargs]
newnode.postinit(self.visit(node.func, newnode),
args, keywords)
return newnode
def visit_classdef(self, node, parent, newstyle=None):
"""visit a ClassDef node to become astroid"""
node, doc = _get_doc(node)
newnode = nodes.ClassDef(node.name, doc, node.lineno,
node.col_offset, parent)
metaclass = None
if PY3:
for keyword in node.keywords:
if keyword.arg == 'metaclass':
metaclass = self.visit(keyword, newnode).value
break
if node.decorator_list:
decorators = self.visit_decorators(node, newnode)
else:
decorators = None
newnode.postinit([self.visit(child, newnode)
for child in node.bases],
[self.visit(child, newnode)
for child in node.body],
decorators, newstyle, metaclass,
[self.visit(kwd, newnode) for kwd in node.keywords
if kwd.arg != 'metaclass'] if PY3 else [])
return newnode
def visit_const(self, node, parent):
"""visit a Const node by returning a fresh instance of it"""
return nodes.Const(node.value,
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_continue(self, node, parent):
"""visit a Continue node by returning a fresh instance of it"""
return nodes.Continue(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None),
parent)
def visit_compare(self, node, parent):
"""visit a Compare node by returning a fresh instance of it"""
newnode = nodes.Compare(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.left, newnode),
[(_CMP_OP_CLASSES[op.__class__],
self.visit(expr, newnode))
for (op, expr) in zip(node.ops, node.comparators)])
return newnode
def visit_comprehension(self, node, parent):
"""visit a Comprehension node by returning a fresh instance of it"""
newnode = nodes.Comprehension(parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode)
for child in node.ifs],
getattr(node, 'is_async', None))
return newnode
def visit_decorators(self, node, parent):
"""visit a Decorators node by returning a fresh instance of it"""
# /!\ node is actually a _ast.FunctionDef node while
# parent is a astroid.nodes.FunctionDef node
newnode = nodes.Decorators(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.decorator_list])
return newnode
def visit_delete(self, node, parent):
"""visit a Delete node by returning a fresh instance of it"""
newnode = nodes.Delete(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.targets])
return newnode
def _visit_dict_items(self, node, parent, newnode):
for key, value in zip(node.keys, node.values):
rebuilt_value = self.visit(value, newnode)
if not key:
# Python 3.5 and extended unpacking
rebuilt_key = nodes.DictUnpack(rebuilt_value.lineno,
rebuilt_value.col_offset,
parent)
else:
rebuilt_key = self.visit(key, newnode)
yield rebuilt_key, rebuilt_value
def visit_dict(self, node, parent):
"""visit a Dict node by returning a fresh instance of it"""
newnode = nodes.Dict(node.lineno, node.col_offset, parent)
items = list(self._visit_dict_items(node, parent, newnode))
newnode.postinit(items)
return newnode
def visit_dictcomp(self, node, parent):
"""visit a DictComp node by returning a fresh instance of it"""
newnode = nodes.DictComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.key, newnode),
self.visit(node.value, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_expr(self, node, parent):
"""visit a Expr node by returning a fresh instance of it"""
newnode = nodes.Expr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_ellipsis(self, node, parent):
"""visit an Ellipsis node by returning a fresh instance of it"""
return nodes.Ellipsis(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_emptynode(self, node, parent):
"""visit an EmptyNode node by returning a fresh instance of it"""
return nodes.EmptyNode(getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent)
# /!\ node.name can be a tuple
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
_visit_or_none(node, 'name', self, newnode),
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_exec(self, node, parent):
"""visit an Exec node by returning a fresh instance of it"""
newnode = nodes.Exec(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.body, newnode),
_visit_or_none(node, 'globals', self, newnode),
_visit_or_none(node, 'locals', self, newnode))
return newnode
def visit_extslice(self, node, parent):
"""visit an ExtSlice node by returning a fresh instance of it"""
newnode = nodes.ExtSlice(parent=parent)
newnode.postinit([self.visit(dim, newnode)
for dim in node.dims])
return newnode
def _visit_for(self, cls, node, parent):
"""visit a For node by returning a fresh instance of it"""
newnode = cls(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_for(self, node, parent):
return self._visit_for(nodes.For, node, parent)
def visit_importfrom(self, node, parent):
"""visit an ImportFrom node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.ImportFrom(node.module or '', names, node.level or None,
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
# store From names to add them to locals after building
self._import_from_nodes.append(newnode)
return newnode
def _visit_functiondef(self, cls, node, parent):
"""visit an FunctionDef node to become astroid"""
self._global_names.append({})
node, doc = _get_doc(node)
newnode = cls(node.name, doc, node.lineno,
node.col_offset, parent)
if node.decorator_list:
decorators = self.visit_decorators(node, newnode)
else:
decorators = None
if PY3 and node.returns:
returns = self.visit(node.returns, newnode)
else:
returns = None
newnode.postinit(self.visit(node.args, newnode),
[self.visit(child, newnode)
for child in node.body],
decorators, returns)
self._global_names.pop()
return newnode
def visit_functiondef(self, node, parent):
return self._visit_functiondef(nodes.FunctionDef, node, parent)
def visit_generatorexp(self, node, parent):
"""visit a GeneratorExp node by returning a fresh instance of it"""
newnode = nodes.GeneratorExp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_attribute(self, node, parent):
"""visit an Attribute node by returning a fresh instance of it"""
context = _get_context(node)
if context == astroid.Del:
# FIXME : maybe we should reintroduce and visit_delattr ?
# for instance, deactivating assign_ctx
newnode = nodes.DelAttr(node.attr, node.lineno, node.col_offset,
parent)
elif context == astroid.Store:
newnode = nodes.AssignAttr(node.attr, node.lineno, node.col_offset,
parent)
# Prohibit a local save if we are in an ExceptHandler.
if not isinstance(parent, astroid.ExceptHandler):
self._delayed_assattr.append(newnode)
else:
newnode = nodes.Attribute(node.attr, node.lineno, node.col_offset,
parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_global(self, node, parent):
"""visit a Global node to become astroid"""
newnode = nodes.Global(node.names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
if self._global_names: # global at the module level, no effect
for name in node.names:
self._global_names[-1].setdefault(name, []).append(newnode)
return newnode
def visit_if(self, node, parent):
"""visit an If node by returning a fresh instance of it"""
newnode = nodes.If(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_ifexp(self, node, parent):
"""visit a IfExp node by returning a fresh instance of it"""
newnode = nodes.IfExp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
self.visit(node.body, newnode),
self.visit(node.orelse, newnode))
return newnode
def visit_import(self, node, parent):
"""visit a Import node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = nodes.Import(names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
# save import names in parent's locals:
for (name, asname) in newnode.names:
name = asname or name
parent.set_local(name.split('.')[0], newnode)
return newnode
def visit_index(self, node, parent):
"""visit a Index node by returning a fresh instance of it"""
newnode = nodes.Index(parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_keyword(self, node, parent):
"""visit a Keyword node by returning a fresh instance of it"""
newnode = nodes.Keyword(node.arg, parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_lambda(self, node, parent):
"""visit a Lambda node by returning a fresh instance of it"""
newnode = nodes.Lambda(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.args, newnode),
self.visit(node.body, newnode))
return newnode
def visit_list(self, node, parent):
"""visit a List node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.List(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_listcomp(self, node, parent):
"""visit a ListComp node by returning a fresh instance of it"""
newnode = nodes.ListComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_name(self, node, parent):
"""visit a Name node by returning a fresh instance of it"""
context = _get_context(node)
# True and False can be assigned to something in py2x, so we have to
# check first the context.
if context == astroid.Del:
newnode = nodes.DelName(node.id, node.lineno, node.col_offset,
parent)
elif context == astroid.Store:
newnode = nodes.AssignName(node.id, node.lineno, node.col_offset,
parent)
elif node.id in CONST_NAME_TRANSFORMS:
newnode = nodes.Const(CONST_NAME_TRANSFORMS[node.id],
getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
return newnode
else:
newnode = nodes.Name(node.id, node.lineno, node.col_offset, parent)
# XXX REMOVE me :
if context in (astroid.Del, astroid.Store): # 'Aug' ??
self._save_assignment(newnode)
return newnode
def visit_str(self, node, parent):
"""visit a String/Bytes node by returning a fresh instance of Const"""
return nodes.Const(node.s, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
visit_bytes = visit_str
def visit_num(self, node, parent):
"""visit a Num node by returning a fresh instance of Const"""
return nodes.Const(node.n, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_pass(self, node, parent):
"""visit a Pass node by returning a fresh instance of it"""
return nodes.Pass(node.lineno, node.col_offset, parent)
def visit_print(self, node, parent):
"""visit a Print node by returning a fresh instance of it"""
newnode = nodes.Print(node.nl, node.lineno, node.col_offset, parent)
newnode.postinit(_visit_or_none(node, 'dest', self, newnode),
[self.visit(child, newnode)
for child in node.values])
return newnode
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = nodes.Raise(node.lineno, node.col_offset, parent)
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
_visit_or_none(node, 'inst', self, newnode),
_visit_or_none(node, 'tback', self, newnode))
return newnode
def visit_return(self, node, parent):
"""visit a Return node by returning a fresh instance of it"""
newnode = nodes.Return(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_set(self, node, parent):
"""visit a Set node by returning a fresh instance of it"""
newnode = nodes.Set(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_setcomp(self, node, parent):
"""visit a SetComp node by returning a fresh instance of it"""
newnode = nodes.SetComp(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.elt, newnode),
[self.visit(child, newnode)
for child in node.generators])
return newnode
def visit_slice(self, node, parent):
"""visit a Slice node by returning a fresh instance of it"""
newnode = nodes.Slice(parent=parent)
newnode.postinit(_visit_or_none(node, 'lower', self, newnode),
_visit_or_none(node, 'upper', self, newnode),
_visit_or_none(node, 'step', self, newnode))
return newnode
def visit_subscript(self, node, parent):
"""visit a Subscript node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.Subscript(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit(self.visit(node.value, newnode),
self.visit(node.slice, newnode))
return newnode
def visit_tryexcept(self, node, parent):
"""visit a TryExcept node by returning a fresh instance of it"""
newnode = nodes.TryExcept(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.handlers],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_tryfinally(self, node, parent):
"""visit a TryFinally node by returning a fresh instance of it"""
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.body],
[self.visit(n, newnode)
for n in node.finalbody])
return newnode
def visit_tuple(self, node, parent):
"""visit a Tuple node by returning a fresh instance of it"""
context = _get_context(node)
newnode = nodes.Tuple(ctx=context,
lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit([self.visit(child, newnode)
for child in node.elts])
return newnode
def visit_unaryop(self, node, parent):
"""visit a UnaryOp node by returning a fresh instance of it"""
newnode = nodes.UnaryOp(_UNARY_OP_CLASSES[node.op.__class__],
node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.operand, newnode))
return newnode
def visit_while(self, node, parent):
"""visit a While node by returning a fresh instance of it"""
newnode = nodes.While(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.test, newnode),
[self.visit(child, newnode)
for child in node.body],
[self.visit(child, newnode)
for child in node.orelse])
return newnode
def visit_with(self, node, parent):
newnode = nodes.With(node.lineno, node.col_offset, parent)
expr = self.visit(node.context_expr, newnode)
if node.optional_vars is not None:
optional_vars = self.visit(node.optional_vars, newnode)
else:
optional_vars = None
newnode.postinit([(expr, optional_vars)],
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_yield(self, node, parent):
"""visit a Yield node by returning a fresh instance of it"""
newnode = nodes.Yield(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
class TreeRebuilder3(TreeRebuilder):
"""extend and overwrite TreeRebuilder for python3k"""
def visit_arg(self, node, parent):
"""visit a arg node by returning a fresh AssName instance"""
# TODO(cpopa): introduce an Arg node instead of using AssignName.
return self.visit_assignname(node, parent, node.arg)
def visit_nameconstant(self, node, parent):
# in Python 3.4 we have NameConstant for True / False / None
return nodes.Const(node.value, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent)
if node.name:
name = self.visit_assignname(node, newnode, node.name)
else:
name = None
newnode.postinit(_visit_or_none(node, 'type', self, newnode),
name,
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_nonlocal(self, node, parent):
"""visit a Nonlocal node and return a new instance of it"""
return nodes.Nonlocal(node.names, getattr(node, 'lineno', None),
getattr(node, 'col_offset', None), parent)
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = nodes.Raise(node.lineno, node.col_offset, parent)
# no traceback; anyway it is not used in Pylint
newnode.postinit(_visit_or_none(node, 'exc', self, newnode),
_visit_or_none(node, 'cause', self, newnode))
return newnode
def visit_starred(self, node, parent):
"""visit a Starred node and return a new instance of it"""
context = _get_context(node)
newnode = nodes.Starred(ctx=context, lineno=node.lineno,
col_offset=node.col_offset,
parent=parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_try(self, node, parent):
# python 3.3 introduce a new Try node replacing
# TryFinally/TryExcept nodes
if node.finalbody:
newnode = nodes.TryFinally(node.lineno, node.col_offset, parent)
if node.handlers:
body = [self.visit_tryexcept(node, newnode)]
else:
body = [self.visit(child, newnode)
for child in node.body]
newnode.postinit(body,
[self.visit(n, newnode)
for n in node.finalbody])
return newnode
elif node.handlers:
return self.visit_tryexcept(node, parent)
return None
def visit_annassign(self, node, parent):
"""visit an AnnAssign node by returning a fresh instance of it"""
newnode = nodes.AnnAssign(node.lineno, node.col_offset, parent)
annotation = _visit_or_none(node, 'annotation', self, newnode)
newnode.postinit(target=self.visit(node.target, newnode),
annotation=annotation,
simple=node.simple,
value=_visit_or_none(node, 'value', self, newnode))
return newnode
def _visit_with(self, cls, node, parent):
if 'items' not in node._fields:
# python < 3.3
return super(TreeRebuilder3, self).visit_with(node, parent)
newnode = cls(node.lineno, node.col_offset, parent)
def visit_child(child):
expr = self.visit(child.context_expr, newnode)
var = _visit_or_none(child, 'optional_vars', self, newnode)
return expr, var
newnode.postinit([visit_child(child) for child in node.items],
[self.visit(child, newnode)
for child in node.body])
return newnode
def visit_with(self, node, parent):
return self._visit_with(nodes.With, node, parent)
def visit_yieldfrom(self, node, parent):
newnode = nodes.YieldFrom(node.lineno, node.col_offset, parent)
if node.value is not None:
newnode.postinit(self.visit(node.value, newnode))
return newnode
def visit_classdef(self, node, parent, newstyle=True):
return super(TreeRebuilder3, self).visit_classdef(node, parent,
newstyle=newstyle)
# Async structs added in Python 3.5
def visit_asyncfunctiondef(self, node, parent):
return self._visit_functiondef(nodes.AsyncFunctionDef, node, parent)
def visit_asyncfor(self, node, parent):
return self._visit_for(nodes.AsyncFor, node, parent)
def visit_await(self, node, parent):
newnode = nodes.Await(node.lineno, node.col_offset, parent)
newnode.postinit(value=self.visit(node.value, newnode))
return newnode
def visit_asyncwith(self, node, parent):
return self._visit_with(nodes.AsyncWith, node, parent)
def visit_joinedstr(self, node, parent):
newnode = nodes.JoinedStr(node.lineno, node.col_offset, parent)
newnode.postinit([self.visit(child, newnode)
for child in node.values])
return newnode
def visit_formattedvalue(self, node, parent):
newnode = nodes.FormattedValue(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode),
node.conversion,
_visit_or_none(node, 'format_spec', self, newnode))
return newnode
if sys.version_info >= (3, 0):
TreeRebuilder = TreeRebuilder3
| mit | 5,970,214,544,650,732,000 | -4,569,736,567,885,908,500 | 42.10022 | 92 | 0.55886 | false |
eamosov/thrift | test/py/TestRenderedDoubleConstants.py | 17 | 10576 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from DoubleConstantsTest import constants
#
# In order to run the test under Windows. We need to create symbolic link
# name 'thrift' to '../src' folder by using:
#
# mklink /D thrift ..\src
#
class TestRenderedDoubleConstants(unittest.TestCase):
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST = \
"failed to verify a double constant generated by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_LIST_TEST =\
"failed to verify a list item by Thrift (expected = %f, got = %f)"
ASSERTION_MESSAGE_FOR_TYPE_CHECKS = "the rendered variable with name %s is not of double type"
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_constants(self):
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT = 1.0
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT = -100.0
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT = 9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT = -9223372036854775807.0
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS = 3.14159265359
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE = 1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE = -1000000.1
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE = 1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE = 9223372036854775816.43
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE = -1.7e+308
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE = -9223372036854775816.43
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_INT_CONSTANT, constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT,
constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS,
constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_SMALL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST))
self.assertAlmostEqual(
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST,
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE, places=7,
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_RENDERED_DOUBLE_CONSTANTS_TEST % (
EXPECTED_DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE,
constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST))
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALLEST_INT_CONSTANT_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_DOUBLE_WITH_MANY_DECIMALS_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_LARGE_FRACTIONAL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_SMALL_DOUBLE_TEST")
self.assertTrue(
isinstance(constants.DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST, float),
msg=TestRenderedDoubleConstants.ASSERTION_MESSAGE_FOR_TYPE_CHECKS %
"DOUBLE_ASSIGNED_TO_NEGATIVE_BUT_LARGE_FRACTIONAL_DOUBLE_TEST")
# to make sure the variables inside Thrift files are generated correctly
def test_rendered_double_list(self):
EXPECTED_DOUBLE_LIST = [1.0, -100.0, 100.0, 9223372036854775807.0, -9223372036854775807.0, 3.14159265359,
1000000.1, -1000000.1, 1.7e+308, -1.7e+308, 9223372036854775816.43,
-9223372036854775816.43]
self.assertEqual(len(constants.DOUBLE_LIST_TEST), len(EXPECTED_DOUBLE_LIST))
for i, expectedValue in enumerate(EXPECTED_DOUBLE_LIST):
self.assertAlmostEqual(constants.DOUBLE_LIST_TEST[i], expectedValue, places=7)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestRenderedDoubleConstants))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite", testRunner=unittest.TextTestRunner(verbosity=2))
| apache-2.0 | 2,863,334,379,811,724,000 | -4,025,156,356,676,366,000 | 58.751412 | 119 | 0.702156 | false |
gatagat/lapjv | bench/overview_sparse.py | 1 | 2203 | from pytest import mark
from joblib import Memory
import numpy as np
from lap import lapjv, lapmod
from lap.lapmod import get_cost
try:
from lap_old import lapjv as lapjv_old
except ImportError:
print(
'''If you get here, you do not have the old lapjv to compare to.
git clone [email protected]:gatagat/lapjv.git lapjv-old
cd lapjv-old
git checkout old
python setup.py build_ext -i
mv lapjv lapjv_old
And run the benchmark:
LAPJV_OLD=lapjv-old bench.sh
''')
lapjv_old = None
from centrosome.lapjv import lapjv as lapjv_centrosome
from lap.tests.test_utils import (
sparse_from_masked,
sparse_from_masked_CS,
get_sparse_int,
get_platform_maxint
)
cachedir = '/tmp/lapjv-cache'
memory = Memory(cachedir=cachedir, verbose=1)
@memory.cache
def get_data(seed):
cost, mask = get_sparse_int(5000, 1000, 0.01, hard=False, seed=seed)
cost_ = cost.copy()
cost_[~mask] = get_platform_maxint()
opt = lapjv(cost_)[0]
return cost, mask, opt
seeds = [1299821, 15485867, 32452867, 49979693]
def _get_cost_CS(cost, x):
return cost[np.arange(cost.shape[0]), x].sum()
@mark.parametrize('seed', seeds)
def test_CSCY(benchmark, seed):
cost, mask, opt = get_data(seed)
i, j, cc = sparse_from_masked_CS(cost, mask)
ret = benchmark(lapjv_centrosome, i, j, cc)
assert _get_cost_CS(cost, ret[0]) == opt
if lapjv_old is not None:
@mark.parametrize('seed', seeds)
def test_JV_old(benchmark, seed):
cost, mask, opt = get_data(seed)
cost[~mask] = get_platform_maxint()
ret = benchmark(lapjv_old, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_JV(benchmark, seed):
cost, mask, opt = get_data(seed)
cost[~mask] = get_platform_maxint()
ret = benchmark(lapjv, cost)
assert ret[0] == opt
@mark.parametrize('seed', seeds)
def test_MOD_c(benchmark, seed):
cost, mask, opt = get_data(seed)
n, cc, ii, kk = sparse_from_masked(cost, mask)
ret = benchmark(lapmod, n, cc, ii, kk, fast=True, return_cost=False)
assert get_cost(n, cc, ii, kk, ret[0]) == opt
| bsd-2-clause | -1,136,599,336,251,793,000 | 5,808,073,282,927,701,000 | 26.886076 | 74 | 0.634135 | false |
ryfeus/lambda-packs | Opencv_pil/source36/numpy/polynomial/tests/test_hermite.py | 6 | 18758 | """Tests for hermite module.
"""
from __future__ import division, absolute_import, print_function
from functools import reduce
import numpy as np
import numpy.polynomial.hermite as herm
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
H0 = np.array([1])
H1 = np.array([0, 2])
H2 = np.array([-2, 0, 4])
H3 = np.array([0, -12, 0, 8])
H4 = np.array([12, 0, -48, 0, 16])
H5 = np.array([0, 120, 0, -160, 0, 32])
H6 = np.array([-120, 0, 720, 0, -480, 0, 64])
H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128])
H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256])
H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512])
Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9]
def trim(x):
return herm.hermtrim(x, tol=1e-6)
class TestConstants(object):
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
def test_hermzero(self):
assert_equal(herm.hermzero, [0])
def test_hermone(self):
assert_equal(herm.hermone, [1])
def test_hermx(self):
assert_equal(herm.hermx, [0, .5])
class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herm.hermadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herm.hermsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermmulx(self):
assert_equal(herm.hermmulx([0]), [0])
assert_equal(herm.hermmulx([1]), [0, .5])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, .5]
assert_equal(herm.hermmulx(ser), tgt)
def test_hermmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herm.hermval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = herm.hermval(self.x, pol2)
pol3 = herm.hermmul(pol1, pol2)
val3 = herm.hermval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herm.hermadd(ci, cj)
quo, rem = herm.hermdiv(tgt, ci)
res = herm.hermadd(herm.hermmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermpow(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
c = np.arange(i + 1)
tgt = reduce(herm.hermmul, [c]*j, np.array([1]))
res = herm.hermpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermval(self):
#check empty input
assert_equal(herm.hermval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Hlist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herm.hermval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herm.hermval(x, [1]).shape, dims)
assert_equal(herm.hermval(x, [1, 0]).shape, dims)
assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims)
def test_hermval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herm.hermval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herm.hermval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herm.hermgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herm.hermgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(object):
def test_hermint(self):
# check exceptions
assert_raises(ValueError, herm.hermint, [0], .5)
assert_raises(ValueError, herm.hermint, [0], -1)
assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])
assert_raises(ValueError, herm.hermint, [0], lbnd=[0])
assert_raises(ValueError, herm.hermint, [0], scl=[0])
assert_raises(ValueError, herm.hermint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herm.hermint([0], m=i, k=k)
assert_almost_equal(res, [0, .5])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i])
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herm.hermval(-1, hermint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1)
res = herm.hermint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k])
res = herm.hermint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T
res = herm.hermint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c) for c in c2d])
res = herm.hermint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c, k=3) for c in c2d])
res = herm.hermint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(object):
def test_hermder(self):
# check exceptions
assert_raises(ValueError, herm.hermder, [0], .5)
assert_raises(ValueError, herm.hermder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herm.hermder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T
res = herm.hermder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermder(c) for c in c2d])
res = herm.hermder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermvander(self):
# check for 1d x
x = np.arange(3)
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
def test_hermvander2d(self):
# also tests hermval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herm.hermvander2d(x1, x2, [1, 2])
tgt = herm.hermval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermvander3d(self):
# also tests hermval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herm.hermvander3d(x1, x2, x3, [1, 2, 3])
tgt = herm.hermval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(object):
def test_hermfit(self):
def f(x):
return x*(x - 1)*(x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, herm.hermfit, [1], [1], -1)
assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)
assert_raises(TypeError, herm.hermfit, [], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)
assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, herm.hermfit, [1], [1], [-1,])
assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, herm.hermfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herm.hermfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
coef3 = herm.hermfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
#
coef4 = herm.hermfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
#
coef2d = herm.hermfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herm.hermfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])
assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5])
# test fitting only even Legendre polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = herm.hermfit(x, y, 4)
assert_almost_equal(herm.hermval(x, coef1), y)
coef2 = herm.hermfit(x, y, [0, 2, 4])
assert_almost_equal(herm.hermval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
assert_raises(ValueError, herm.hermcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herm.hermcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
class TestGauss(object):
def test_100(self):
x, w = herm.hermgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herm.hermvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(object):
def test_hermfromroots(self):
res = herm.hermfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herm.hermfromroots(roots)
res = herm.hermval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herm.herm2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermroots(self):
assert_almost_equal(herm.hermroots([1]), [])
assert_almost_equal(herm.hermroots([1, 1]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herm.hermroots(herm.hermfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herm.hermtrim, coef, -1)
# Test results
assert_equal(herm.hermtrim(coef), coef[:-1])
assert_equal(herm.hermtrim(coef, 1), coef[:-3])
assert_equal(herm.hermtrim(coef, 2), [0])
def test_hermline(self):
assert_equal(herm.hermline(3, 4), [3, 2])
def test_herm2poly(self):
for i in range(10):
assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i])
def test_poly2herm(self):
for i in range(10):
assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-x**2)
res = herm.hermweight(x)
assert_almost_equal(res, tgt)
| mit | 1,516,033,884,435,407,600 | 3,190,546,164,860,395,000 | 32.67684 | 78 | 0.512741 | false |
nvoron23/statsmodels | statsmodels/sandbox/regression/tests/results_gmm_griliches_iter.py | 34 | 7976 | import numpy as np
est = dict(
rank = 13,
N = 758,
Q = .0150568875809373,
J = 11.41312078635046,
J_df = 2,
k_1 = 13,
converged = 1,
has_xtinst = 0,
type = 1,
n_eq = 1,
k = 13,
n_moments = 15,
k_aux = 13,
k_eq_model = 0,
ic = 6,
k_eq = 13,
cmdline = "gmm (lw - {xb:s iq expr tenure rns smsa dyear*} - {b0}) , instruments(expr tenure rns smsa dyear* med kww age mrt) igmm",
cmd = "gmm",
estat_cmd = "gmm_estat",
predict = "gmm_p",
marginsnotok = "_ALL",
eqnames = "1",
technique = "gn",
winit = "Unadjusted",
estimator = "igmm",
wmatrix = "robust",
vce = "robust",
vcetype = "Robust",
params = "xb_s xb_iq xb_expr xb_tenure xb_rns xb_smsa xb_dyear_67 xb_dyear_68 xb_dyear_69 xb_dyear_70 xb_dyear_71 xb_dyear_73 b0",
inst_1 = "expr tenure rns smsa dyear_67 dyear_68 dyear_69 dyear_70 dyear_71 dyear_73 med kww age mrt _cons",
params_1 = "xb_s xb_iq xb_expr xb_tenure xb_rns xb_smsa xb_dyear_67 xb_dyear_68 xb_dyear_69 xb_dyear_70 xb_dyear_71 xb_dyear_73 b0",
sexp_1 = "lw - ({xb_s} *s + {xb_iq} *iq + {xb_expr} *expr + {xb_tenure} *tenure + {xb_rns} *rns + {xb_smsa} *smsa + {xb_dyear_67} *dyear_67 + {xb_dyear_68} *dyear_68 + {xb_dyear_69} *dyear_69 + {xb_dyear_70} *dyear_70 + {xb_dyear_71} *dyear_71 + {xb_dyear_73} *dyear_73) - {b0}",
properties = "b V",
)
params_table = np.array([
.17587739850768, .02085563162829, 8.4330890400415, 3.366583555e-17,
.1350011116414, .21675368537396, np.nan, 1.9599639845401,
0, -.00928586712743, .00491894287617, -1.88777697997,
.05905589683705, -.01892681800673, .00035508375188, np.nan,
1.9599639845401, 0, .05031651549731, .00810558790493,
6.2076330659127, 5.378855978e-10, .03442985513012, .0662031758645,
np.nan, 1.9599639845401, 0, .04246235782951,
.00956418082077, 4.4397276280375, 9.007280073e-06, .02371690787918,
.06120780777985, np.nan, 1.9599639845401, 0,
-.1039476753865, .03373281188749, -3.0815004611293, .00205960157647,
-.17006277178325, -.03783257898975, np.nan, 1.9599639845401,
0, .12477256813508, .03099244898605, 4.0259021864082,
.0000567572801, .06402848432973, .18551665194043, np.nan,
1.9599639845401, 0, -.05297127223127, .0517946935923,
-1.0227162003936, .30644204936546, -.15448700626247, .04854446179993,
np.nan, 1.9599639845401, 0, .04564516152971,
.05001865637643, .91256272831865, .36147256434055, -.05238960352318,
.1436799265826, np.nan, 1.9599639845401, 0,
.15574543741982, .04802004585645, 3.2433421218593, .00118136262363,
.06162787700523, .24986299783442, np.nan, 1.9599639845401,
0, .16681173496168, .06134387289984, 2.7192892635594,
.00654223677971, .0465799534058, .28704351651757, np.nan,
1.9599639845401, 0, .08417610675323, .05582688740597,
1.507805838092, .13160422753823, -.02524258193145, .19359479543791,
np.nan, 1.9599639845401, 0, .09964580476612,
.06124947866865, 1.6268841291727, .10376170930541, -.02040096749628,
.21969257702853, np.nan, 1.9599639845401, 0,
4.0027753075622, .33649589464938, 11.895465505554, 1.249543428e-32,
3.3432554731038, 4.6622951420205, np.nan, 1.9599639845401,
0]).reshape(13,9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov = np.array([
.00043495737061, -.00007938790704, .00002809207919, .00001486824321,
-.00017806650894, -6.696078938e-06, -.00011595347261, -.00018816769626,
-.00012205118386, -.00008281236274, -.00031504876539, -.00063574245306,
.00264272738846, -.00007938790704, .00002419599902, 4.932871670e-06,
-.00001114848619, .00006618803917, -.00002202930782, 4.808220835e-07,
.00003206765662, -.00002261059773, -.00006024105579, -.00001412126593,
.00001474591556, -.00144330101198, .00002809207919, 4.932871670e-06,
.00006570055528, -.0000203894891, .00005213529923, -.00003297805448,
.00003595284891, .00008758906787, .00003058926358, .00001696423798,
-.00008568569767, -.00013140753648, -.00094326672008, .00001486824321,
-.00001114848619, -.0000203894891, .00009147355477, -.00003774547245,
7.828122784e-06, .00008484461309, .00006729820252, .00011236802193,
.00010082715772, .00011217081931, .00009440153548, .00075659901252,
-.00017806650894, .00006618803917, .00005213529923, -.00003774547245,
.00113790259784, .00013005865302, .00018021354375, .00018779266096,
-9.435310865e-06, .0000165483542, -.00005323328914, .00008265052168,
-.00499436873124, -6.696078938e-06, -.00002202930782, -.00003297805448,
7.828122784e-06, .00013005865302, .00096053189415, .00005704546746,
.00011160225767, .00025285680201, .00010656723202, .00030213005331,
.00030792696913, .00157128168902, -.00011595347261, 4.808220835e-07,
.00003595284891, .00008484461309, .00018021354375, .00005704546746,
.00268269028432, .00085942321667, .00091151417222, .00096327250114,
.00090372304081, .00102768195348, .00034563629591, -.00018816769626,
.00003206765662, .00008758906787, .00006729820252, .00018779266096,
.00011160225767, .00085942321667, .0025018659857, .00092591134763,
.00088266305412, .0008241186538, .00095084381197, -.00206285154639,
-.00012205118386, -.00002261059773, .00003058926358, .00011236802193,
-9.435310865e-06, .00025285680201, .00091151417222, .00092591134763,
.00230592480406, .00118265696692, .0011106470199, .00129290662149,
.00256049741814, -.00008281236274, -.00006024105579, .00001696423798,
.00010082715772, .0000165483542, .00010656723202, .00096327250114,
.00088266305412, .00118265696692, .00376307074235, .00124584145426,
.00155915431219, .00599086304364, -.00031504876539, -.00001412126593,
-.00008568569767, .00011217081931, -.00005323328914, .00030213005331,
.00090372304081, .0008241186538, .0011106470199, .00124584145426,
.00311664135744, .0018437604357, .00431259131307, -.00063574245306,
.00001474591556, -.00013140753648, .00009440153548, .00008265052168,
.00030792696913, .00102768195348, .00095084381197, .00129290662149,
.00155915431219, .0018437604357, .00375149863718, .00538769349865,
.00264272738846, -.00144330101198, -.00094326672008, .00075659901252,
-.00499436873124, .00157128168902, .00034563629591, -.00206285154639,
.00256049741814, .00599086304364, .00431259131307, .00538769349865,
.11322948711589]).reshape(13,13)
cov_colnames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split()
cov_rownames = '_cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons _cons'.split()
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
for i,att in enumerate(['params', 'bse', 'tvalues', 'pvalues']):
self[att] = self.params_table[:,i]
results = Bunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
| bsd-3-clause | 3,502,440,829,241,030,000 | 3,256,714,863,935,488,500 | 55.567376 | 290 | 0.635657 | false |
khughitt/ete | ete_dev/tools/ete_ncbi_update.py | 2 | 4129 | import sys
import os
from string import strip
import tarfile
from common import Tree
from utils import ask, ask_filename
def load_ncbi_tree_from_dump(tar):
# Download: ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz
parent2child = {}
name2node = {}
node2taxname = {}
synonyms = set()
name2rank = {}
print "Loading node names..."
for line in tar.extractfile("names.dmp"):
fields = map(strip, line.split("|"))
nodename = fields[0]
name_type = fields[3].lower()
taxname = fields[1]
if name_type == "scientific name":
node2taxname[nodename] = taxname
elif name_type in set(["synonym", "equivalent name", "genbank equivalent name",
"anamorph", "genbank synonym", "genbank anamorph", "teleomorph"]):
synonyms.add( (nodename, taxname) )
print len(node2taxname), "names loaded."
print len(synonyms), "synonyms loaded."
print "Loading nodes..."
for line in tar.extractfile("nodes.dmp"):
fields = line.split("|")
nodename = fields[0].strip()
parentname = fields[1].strip()
n = Tree()
n.name = nodename
n.taxname = node2taxname[nodename]
n.rank = fields[2].strip()
parent2child[nodename] = parentname
name2node[nodename] = n
print len(name2node), "nodes loaded."
print "Linking nodes..."
for node in name2node:
if node == "1":
t = name2node[node]
else:
parent = parent2child[node]
parent_node = name2node[parent]
parent_node.add_child(name2node[node])
print "Tree is loaded."
return t, synonyms
def generate_table(t):
OUT = open("taxa.tab", "w")
for j, n in enumerate(t.traverse()):
if j%1000 == 0:
print "\r",j,"nodes inserted into the DB.",
temp_node = n
track = []
while temp_node:
track.append(temp_node.name)
temp_node = temp_node.up
if n.up:
print >>OUT, '\t'.join([n.name, n.up.name, n.taxname, n.rank, ','.join(track)])
else:
print >>OUT, '\t'.join([n.name, "", n.taxname, n.rank, ','.join(track)])
OUT.close()
def update(targz_file):
tar = tarfile.open(targz_file, 'r')
t, synonyms = load_ncbi_tree_from_dump(tar)
print "Updating database [ ~/.etetoolkit/taxa.sqlite ] ..."
generate_table(t)
open("syn.tab", "w").write('\n'.join(["%s\t%s" %(v[0],v[1]) for v in synonyms]))
open("merged.tab", "w").write('\n'.join(['\t'.join(map(strip, line.split('|')[:2])) for line in tar.extractfile("merged.dmp")]))
CMD = open("commands.tmp", "w")
cmd = """
DROP TABLE IF EXISTS species;
DROP TABLE IF EXISTS synonym;
DROP TABLE IF EXISTS merged;
CREATE TABLE species (taxid INT PRIMARY KEY, parent INT, spname VARCHAR(50) COLLATE NOCASE, rank VARCHAR(50), track TEXT);
CREATE TABLE synonym (taxid INT,spname VARCHAR(50) COLLATE NOCASE, PRIMARY KEY (spname, taxid));
CREATE TABLE merged (taxid_old INT, taxid_new INT);
CREATE INDEX spname1 ON species (spname COLLATE NOCASE);
CREATE INDEX spname2 ON synonym (spname COLLATE NOCASE);
.separator '\t'
.import taxa.tab species
.import syn.tab synonym
.import merged.tab merged
"""
CMD.write(cmd)
CMD.close()
os.system("mkdir -p ~/.etetoolkit/")
os.system("sqlite3 ~/.etetoolkit/taxa.sqlite < commands.tmp")
os.system("rm syn.tab merged.tab taxa.tab commands.tmp")
print "Creating extended newick file with the whole NCBI tree [ncbi.nw]"
t.write(outfile="./ncbi.nw", features=["name", "taxname"])
def main(args):
if not args:
if ask('Download latest ncbi taxonomy dump file?', ['y', 'n']) == 'y':
status = os.system('wget ftp://ftp.ncbi.nih.gov/pub/taxonomy/taxdump.tar.gz')
if status == 0:
update('taxdump.tar.gz')
else:
fname = ask_filename('path to tar.gz file containing ncbi taxonomy dump:')
update(fname)
else:
update(args[0])
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 | -993,658,415,799,305,100 | -4,004,103,124,259,330,600 | 32.844262 | 132 | 0.598208 | false |
montyly/manticore | tests/ethereum/EVM/test_EVMEXP.py | 1 | 79538 | import struct
import unittest
import json
from manticore.platforms import evm
from manticore.core import state
from manticore.core.smtlib import Operators, ConstraintSet
import os
class EVMTest_EXP(unittest.TestCase):
_multiprocess_can_split_ = True
maxDiff = None
def _execute(self, new_vm):
last_returned = None
last_exception = None
try:
new_vm.execute()
except evm.Stop as e:
last_exception = "STOP"
except evm.NotEnoughGas:
last_exception = "OOG"
except evm.StackUnderflow:
last_exception = "INSUFFICIENT STACK"
except evm.InvalidOpcode:
last_exception = "INVALID"
except evm.SelfDestruct:
last_exception = "SUICIDED"
except evm.Return as e:
last_exception = "RETURN"
last_returned = e.data
except evm.Revert:
last_exception = "REVERT"
return last_exception, last_returned
def test_EXP_1(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[115792089237316195423570985008687907853269984665640564039457584007913129639935],
)
def test_EXP_2(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_3(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_4(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_5(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[104454113832828984621679659393253883542637298667129925477260695573804969029359],
)
def test_EXP_6(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_7(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_8(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_9(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_10(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_11(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_12(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_13(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_14(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_15(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_16(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_17(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_18(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(0)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_19(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[115792089237316195423570985008687907853269984665640564039457584007913129639935],
)
def test_EXP_20(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_21(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_22(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[57896044618658097711785492504343953926634992332820282019728792003956564819952],
)
def test_EXP_23(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[3618502788666131106986593281521497120414687020801267626233049500247285301263],
)
def test_EXP_24(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [16])
def test_EXP_25(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [32])
def test_EXP_26(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [48])
def test_EXP_27(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(1)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [6089590155545428825848686802984512581899718912])
def test_EXP_28(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_29(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_30(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_31(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_32(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[97153515582439856940218076430383148080316642374323115531717460774015781538049],
)
def test_EXP_33(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_34(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_35(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_36(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_37(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[115792089237316195423570985008687907853269984665640564039457584007913129639935],
)
def test_EXP_38(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_39(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_40(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_41(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[112173586448650064316584391727166410732855297644839296413224972401556225198063],
)
def test_EXP_42(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_43(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_44(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_45(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_46(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_47(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_48(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_49(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [18446744073709551616])
def test_EXP_50(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[57896044618658097711785492504343953926634992332820282019735360412312277710593],
)
def test_EXP_51(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [18446744073709551616])
def test_EXP_52(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1208925819614629174706176])
def test_EXP_53(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [794071845499378503449051136])
def test_EXP_54(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(16)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[8303694420805298775410959586403913600201715917447438497573206049841934761984],
)
def test_EXP_55(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_56(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_57(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_58(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [340282366920938463463374607431768211456])
def test_EXP_59(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [43143988327398919500410556793212890625])
def test_EXP_60(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [340282366920938463463374607431768211456])
def test_EXP_61(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1461501637330902918203684832716283019655932542976])
def test_EXP_62(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [630550095814788844423632687832745817333905738742890496])
def test_EXP_63(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(32)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_64(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_65(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_66(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_67(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [6277101735386680763835789423207666416102355444464034512896])
def test_EXP_68(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[57896044618658097712068879837772420409703173580337995947392654709187277710593],
)
def test_EXP_69(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [6277101735386680763835789423207666416102355444464034512896])
def test_EXP_70(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[1766847064778384329583297500742918515827483896875618958121606201292619776],
)
def test_EXP_71(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[17084401304090163016086072004374689170541683170424114643147834605304589320192],
)
def test_EXP_72(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(48)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_73(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(115792089237316195423570985008687907853269984665640564039457584007913129639935)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_74(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(0)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_75(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(1)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [1])
def test_EXP_76(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(57896044618658097711785492504343953926634992332820282019728792003956564819952)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_77(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(3618502788666131106986593281521497120414687020801267626233049500247285301263)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(
new_vm.stack,
[42192513242301740010671492996252704544191162524312342410321251717326910681089],
)
def test_EXP_78(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(16)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_79(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(32)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_80(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(48)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
def test_EXP_81(self):
# Make the constraint store
constraints = ConstraintSet()
# make the ethereum world state
world = evm.EVMWorld(constraints)
address = 0x222222222222222222222222222222222222200
caller = origin = 0x111111111111111111111111111111111111100
price = 0
value = 10000
bytecode = b"\n"
data = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
header = {"coinbase": 0, "timestamp": 0, "number": 0, "difficulty": 0, "gaslimit": 0}
gas = 1000000
new_vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=gas, world=world)
new_vm._push(6089590155545428825848686802984512581899718912)
new_vm._push(6089590155545428825848686802984512581899718912)
last_exception, last_returned = self._execute(new_vm)
self.assertEqual(last_exception, None)
self.assertEqual(new_vm.pc, 1)
self.assertEqual(new_vm.stack, [0])
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 1,036,241,249,047,440,100 | -138,581,098,780,915,260 | 39.914609 | 100 | 0.646584 | false |
thaumos/ansible-modules-extras | monitoring/boundary_meter.py | 35 | 8359 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to add boundary meters.
(c) 2013, curtis <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
import datetime
import base64
import os
DOCUMENTATION = '''
module: boundary_meter
short_description: Manage boundary meters
description:
- This module manages boundary meters
version_added: "1.3"
author: "curtis (@ccollicutt)"
requirements:
- Boundary API access
- bprobe is required to send data, but not to register a meter
options:
name:
description:
- meter name
required: true
state:
description:
- Whether to create or remove the client from boundary
required: false
default: true
choices: ["present", "absent"]
apiid:
description:
- Organizations boundary API ID
required: true
apikey:
description:
- Organizations boundary API KEY
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
notes:
- This module does not yet support boundary tags.
'''
EXAMPLES='''
- name: Create meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}"
- name: Delete meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}"
'''
api_host = "api.boundary.com"
config_directory = "/etc/bprobe"
# "resource" like thing or apikey?
def auth_encode(apikey):
auth = base64.standard_b64encode(apikey)
auth.replace("\n", "")
return auth
def build_url(name, apiid, action, meter_id=None, cert_type=None):
if action == "create":
return 'https://%s/%s/meters' % (api_host, apiid)
elif action == "search":
return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
elif action == "certificates":
return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
elif action == "tags":
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None:
url = build_url(name, apiid, action)
else:
if cert_type is None:
url = build_url(name, apiid, action, meter_id)
else:
url = build_url(name, apiid, action, meter_id, cert_type)
headers = dict()
headers["Authorization"] = "Basic %s" % auth_encode(apikey)
headers["Content-Type"] = "application/json"
return fetch_url(module, url, data=data, headers=headers)
def create_meter(module, name, apiid, apikey):
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
# If the meter already exists, do nothing
module.exit_json(status="Meter " + name + " already exists",changed=False)
else:
# If it doesn't exist, create it
body = '{"name":"' + name + '"}'
response, info = http_request(module, name, apiid, apikey, data=body, action="create")
if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to create meter")
# If the config directory doesn't exist, create it
if not os.path.exists(config_directory):
try:
os.makedirs(config_directory)
except:
module.fail_json("Could not create " + config_directory)
# Download both cert files from the api host
types = ['key', 'cert']
for cert_type in types:
try:
# If we can't open the file it's not there, so we should download it
cert_file = open('%s/%s.pem' % (config_directory,cert_type))
except IOError:
# Now download the file...
rc = download_request(module, name, apiid, apikey, cert_type)
if rc == False:
module.fail_json("Download request for " + cert_type + ".pem failed")
return 0, "Meter " + name + " created"
def search_meter(module, name, apiid, apikey):
response, info = http_request(module, name, apiid, apikey, action="search")
if info['status'] != 200:
module.fail_json("Failed to connect to api host to search for meter")
# Return meters
return json.loads(response.read())
def get_meter_id(module, name, apiid, apikey):
# In order to delete the meter we need its id
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
return meters[0]['id']
else:
return None
def delete_meter(module, name, apiid, apikey):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is None:
return 1, "Meter does not exist, so can't delete it"
else:
response, info = http_request(module, name, apiid, apikey, action, meter_id)
if info['status'] != 200:
module.fail_json("Failed to delete meter")
# Each new meter gets a new key.pem and ca.pem file, so they should be deleted
types = ['cert', 'key']
for cert_type in types:
try:
cert_file = '%s/%s.pem' % (config_directory,cert_type)
os.remove(cert_file)
except OSError, e:
module.fail_json("Failed to remove " + cert_type + ".pem file")
return 0, "Meter " + name + " deleted"
def download_request(module, name, apiid, apikey, cert_type):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is not None:
action = "certificates"
response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
if info['status'] != 200:
module.fail_json("Failed to connect to api host to download certificate")
if result:
try:
cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
body = response.read()
cert_file = open(cert_file_path, 'w')
cert_file.write(body)
cert_file.close()
os.chmod(cert_file_path, int('0600', 8))
except:
module.fail_json("Could not write to certificate file")
return True
else:
module.fail_json("Could not get meter id")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=False),
apikey=dict(required=True),
apiid=dict(required=True),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name= module.params['name']
apikey = module.params['api_key']
apiid = module.params['api_id']
if state == "present":
(rc, result) = create_meter(module, name, apiid, apikey)
if state == "absent":
(rc, result) = delete_meter(module, name, apiid, apikey)
if rc != 0:
module.fail_json(msg=result)
module.exit_json(status=result,changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | -3,174,384,776,452,537,000 | 4,954,599,556,317,317,000 | 30.662879 | 96 | 0.612155 | false |
amitaekbote/dcos | gen/tests/utils.py | 3 | 1714 | """
Utilities for tests for ``gen``.
"""
import copy
import json
import pkg_resources
import gen
true_false_msg = "Must be one of 'true', 'false'. Got 'foo'."
def make_arguments(new_arguments):
"""
Fields with default values should not be added in here so that the
default values are also tested.
"""
arguments = copy.deepcopy({
'ip_detect_filename': pkg_resources.resource_filename('gen', 'ip-detect/aws.sh'),
'ip6_detect_filename': pkg_resources.resource_filename('gen', 'ip-detect/aws6.sh'),
'bootstrap_id': '123',
'package_ids': json.dumps(['package--version']),
'exhibitor_zk_path': '/dcos',
'master_discovery': 'static',
'platform': 'aws',
'provider': 'onprem',
'exhibitor_zk_hosts': '52.37.205.237:2181',
'resolvers': '["8.8.8.8", "8.8.4.4"]',
'master_list': '["52.37.192.49", "52.37.181.230", "52.37.163.105"]',
'exhibitor_storage_backend': 'zookeeper',
'bootstrap_url': 'file:///opt/dcos_install_tmp',
'cluster_name': 'Mesosphere: The Data Center Operating System',
'bootstrap_variant': '',
'oauth_available': 'true',
'oauth_enabled': 'true',
'enable_docker_gc': 'false'})
arguments.update(new_arguments)
return arguments
def validate_error(new_arguments, key, message, unset=None):
assert gen.validate(arguments=make_arguments(new_arguments)) == {
'status': 'errors',
'errors': {key: {'message': message}},
'unset': set() if unset is None else unset,
}
def validate_success(new_arguments):
assert gen.validate(arguments=make_arguments(new_arguments)) == {
'status': 'ok',
}
| apache-2.0 | -4,162,501,112,637,844,500 | 95,209,594,550,195,800 | 30.740741 | 91 | 0.603267 | false |
tiagofrepereira2012/tensorflow | tensorflow/python/kernel_tests/matrix_inverse_op_test.py | 20 | 4503 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matrix_inverse."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class InverseOpTest(test.TestCase):
def _verifyInverse(self, x, np_type):
for adjoint in False, True:
y = x.astype(np_type)
with self.test_session(use_gpu=True):
# Verify that x^{-1} * x == Identity matrix.
inv = linalg_ops.matrix_inverse(y, adjoint=adjoint)
tf_ans = math_ops.matmul(inv, y, adjoint_b=adjoint)
np_ans = np.identity(y.shape[-1])
if x.ndim > 2:
tiling = list(y.shape)
tiling[-2:] = [1, 1]
np_ans = np.tile(np_ans, tiling)
out = tf_ans.eval()
self.assertAllClose(np_ans, out)
self.assertShapeEqual(y, tf_ans)
def _verifyInverseReal(self, x):
for np_type in [np.float32, np.float64]:
self._verifyInverse(x, np_type)
def _verifyInverseComplex(self, x):
for np_type in [np.complex64, np.complex128]:
self._verifyInverse(x, np_type)
def _makeBatch(self, matrix1, matrix2):
matrix_batch = np.concatenate(
[np.expand_dims(matrix1, 0), np.expand_dims(matrix2, 0)])
matrix_batch = np.tile(matrix_batch, [2, 3, 1, 1])
return matrix_batch
def testNonsymmetric(self):
# 2x2 matrices
matrix1 = np.array([[1., 2.], [3., 4.]])
matrix2 = np.array([[1., 3.], [3., 5.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
def testSymmetricPositiveDefinite(self):
# 2x2 matrices
matrix1 = np.array([[2., 1.], [1., 2.]])
matrix2 = np.array([[3., -1.], [-1., 3.]])
self._verifyInverseReal(matrix1)
self._verifyInverseReal(matrix2)
# A multidimensional batch of 2x2 matrices
self._verifyInverseReal(self._makeBatch(matrix1, matrix2))
# Complex
matrix1 = matrix1.astype(np.complex64)
matrix1 += 1j * matrix1
matrix2 = matrix2.astype(np.complex64)
matrix2 += 1j * matrix2
self._verifyInverseComplex(matrix1)
self._verifyInverseComplex(matrix2)
# Complex batch
self._verifyInverseComplex(self._makeBatch(matrix1, matrix2))
def testNonSquareMatrix(self):
# When the inverse of a non-square matrix is attempted we should return
# an error
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(np.array([[1., 2., 3.], [3., 4., 5.]]))
def testWrongDimensions(self):
# The input to the inverse should be at least a 2-dimensional tensor.
tensor3 = constant_op.constant([1., 2.])
with self.assertRaises(ValueError):
linalg_ops.matrix_inverse(tensor3)
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("Input is not invertible."):
# All rows of the matrix below add to zero.
tensor3 = constant_op.constant([[1., 0., -1.], [-1., 1., 0.],
[0., -1., 1.]])
linalg_ops.matrix_inverse(tensor3).eval()
def testEmpty(self):
self._verifyInverseReal(np.empty([0, 2, 2]))
self._verifyInverseReal(np.empty([2, 0, 0]))
if __name__ == "__main__":
test.main()
| apache-2.0 | -4,506,752,268,370,341,400 | 6,080,034,024,952,153,000 | 35.314516 | 80 | 0.655119 | false |
kevin-intel/scikit-learn | examples/multioutput/plot_classifier_chain_yeast.py | 23 | 4637 | """
============================
Classifier Chain
============================
Example of using classifier chain on a multilabel dataset.
For this example we will use the `yeast
<https://www.openml.org/d/40597>`_ dataset which contains
2417 datapoints each with 103 features and 14 possible labels. Each
data point has at least one label. As a baseline we first train a logistic
regression classifier for each of the 14 labels. To evaluate the performance of
these classifiers we predict on a held-out test set and calculate the
:ref:`jaccard score <jaccard_similarity_score>` for each sample.
Next we create 10 classifier chains. Each classifier chain contains a
logistic regression model for each of the 14 labels. The models in each
chain are ordered randomly. In addition to the 103 features in the dataset,
each model gets the predictions of the preceding models in the chain as
features (note that by default at training time each model gets the true
labels as features). These additional features allow each chain to exploit
correlations among the classes. The Jaccard similarity score for each chain
tends to be greater than that of the set independent logistic models.
Because the models in each chain are arranged randomly there is significant
variation in performance among the chains. Presumably there is an optimal
ordering of the classes in a chain that will yield the best performance.
However we do not know that ordering a priori. Instead we can construct an
voting ensemble of classifier chains by averaging the binary predictions of
the chains and apply a threshold of 0.5. The Jaccard similarity score of the
ensemble is greater than that of the independent models and tends to exceed
the score of each chain in the ensemble (although this is not guaranteed
with randomly ordered chains).
"""
# Author: Adam Kleczewski
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_openml
from sklearn.multioutput import ClassifierChain
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import jaccard_score
from sklearn.linear_model import LogisticRegression
print(__doc__)
# Load a multi-label dataset from https://www.openml.org/d/40597
X, Y = fetch_openml('yeast', version=4, return_X_y=True)
Y = Y == 'TRUE'
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2,
random_state=0)
# Fit an independent logistic regression model for each class using the
# OneVsRestClassifier wrapper.
base_lr = LogisticRegression()
ovr = OneVsRestClassifier(base_lr)
ovr.fit(X_train, Y_train)
Y_pred_ovr = ovr.predict(X_test)
ovr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average='samples')
# Fit an ensemble of logistic regression classifier chains and take the
# take the average prediction of all the chains.
chains = [ClassifierChain(base_lr, order='random', random_state=i)
for i in range(10)]
for chain in chains:
chain.fit(X_train, Y_train)
Y_pred_chains = np.array([chain.predict(X_test) for chain in
chains])
chain_jaccard_scores = [jaccard_score(Y_test, Y_pred_chain >= .5,
average='samples')
for Y_pred_chain in Y_pred_chains]
Y_pred_ensemble = Y_pred_chains.mean(axis=0)
ensemble_jaccard_score = jaccard_score(Y_test,
Y_pred_ensemble >= .5,
average='samples')
model_scores = [ovr_jaccard_score] + chain_jaccard_scores
model_scores.append(ensemble_jaccard_score)
model_names = ('Independent',
'Chain 1',
'Chain 2',
'Chain 3',
'Chain 4',
'Chain 5',
'Chain 6',
'Chain 7',
'Chain 8',
'Chain 9',
'Chain 10',
'Ensemble')
x_pos = np.arange(len(model_names))
# Plot the Jaccard similarity scores for the independent model, each of the
# chains, and the ensemble (note that the vertical axis on this plot does
# not begin at 0).
fig, ax = plt.subplots(figsize=(7, 4))
ax.grid(True)
ax.set_title('Classifier Chain Ensemble Performance Comparison')
ax.set_xticks(x_pos)
ax.set_xticklabels(model_names, rotation='vertical')
ax.set_ylabel('Jaccard Similarity Score')
ax.set_ylim([min(model_scores) * .9, max(model_scores) * 1.1])
colors = ['r'] + ['b'] * len(chain_jaccard_scores) + ['g']
ax.bar(x_pos, model_scores, alpha=0.5, color=colors)
plt.tight_layout()
plt.show()
| bsd-3-clause | 2,264,663,470,688,559,600 | 1,764,316,301,538,384,400 | 40.035398 | 79 | 0.692042 | false |
joebowen/movement_validation_cloud | djangodev/lib/python2.7/site-packages/boto/rds/regioninfo.py | 167 | 1513 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo
class RDSRegionInfo(RegionInfo):
def __init__(self, connection=None, name=None, endpoint=None,
connection_cls=None):
from boto.rds import RDSConnection
super(RDSRegionInfo, self).__init__(connection, name, endpoint,
RDSConnection)
| mit | -8,019,016,159,509,263,000 | -2,290,470,775,293,971,200 | 44.848485 | 74 | 0.738268 | false |
tartavull/google-cloud-python | speech/google/cloud/speech/alternative.py | 2 | 2466 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representation of Speech Alternative for the Google Speech API."""
class Alternative(object):
"""Representation of Speech Alternative.
:type transcript: str
:param transcript: String of transcribed data.
:type confidence: float
:param confidence: The confidence estimate between 0.0 and 1.0.
"""
def __init__(self, transcript, confidence):
self._transcript = transcript
self._confidence = confidence
@classmethod
def from_api_repr(cls, alternative):
"""Factory: construct ``Alternative`` from JSON response.
:type alternative: dict
:param alternative: Dictionary response from the REST API.
:rtype: :class:`Alternative`
:returns: Instance of ``Alternative``.
"""
return cls(alternative['transcript'], alternative.get('confidence'))
@classmethod
def from_pb(cls, alternative):
"""Factory: construct ``Alternative`` from protobuf response.
:type alternative:
:class:`google.cloud.speech.v1.SpeechRecognitionAlternative`
:param alternative: Instance of ``SpeechRecognitionAlternative``
from protobuf.
:rtype: :class:`Alternative`
:returns: Instance of ``Alternative``.
"""
confidence = alternative.confidence
if confidence == 0.0: # In the protobof 0.0 means unset.
confidence = None
return cls(alternative.transcript, confidence)
@property
def transcript(self):
"""Transcript text from audio.
:rtype: str
:returns: Text detected in audio.
"""
return self._transcript
@property
def confidence(self):
"""Confidence score for recognized speech.
:rtype: float
:returns: Confidence score of recognized speech [0-1].
"""
return self._confidence
| apache-2.0 | -5,212,435,623,967,077,000 | -3,435,978,923,299,573,000 | 31.447368 | 76 | 0.657745 | false |
glennrub/micropython | examples/rp2/pio_pinchange.py | 7 | 1199 | # Example using PIO to wait for a pin change and raise an IRQ.
#
# Demonstrates:
# - PIO wrapping
# - PIO wait instruction, waiting on an input pin
# - PIO irq instruction, in blocking mode with relative IRQ number
# - setting the in_base pin for a StateMachine
# - setting an irq handler for a StateMachine
# - instantiating 2x StateMachine's with the same program and different pins
import time
from machine import Pin
import rp2
@rp2.asm_pio()
def wait_pin_low():
wrap_target()
wait(0, pin, 0)
irq(block, rel(0))
wait(1, pin, 0)
wrap()
def handler(sm):
# Print a (wrapping) timestamp, and the state machine object.
print(time.ticks_ms(), sm)
# Instantiate StateMachine(0) with wait_pin_low program on Pin(16).
pin16 = Pin(16, Pin.IN, Pin.PULL_UP)
sm0 = rp2.StateMachine(0, wait_pin_low, in_base=pin16)
sm0.irq(handler)
# Instantiate StateMachine(1) with wait_pin_low program on Pin(17).
pin17 = Pin(17, Pin.IN, Pin.PULL_UP)
sm1 = rp2.StateMachine(1, wait_pin_low, in_base=pin17)
sm1.irq(handler)
# Start the StateMachine's running.
sm0.active(1)
sm1.active(1)
# Now, when Pin(16) or Pin(17) is pulled low a message will be printed to the REPL.
| mit | -6,311,078,408,536,382,000 | -6,365,437,467,788,709,000 | 25.065217 | 83 | 0.69975 | false |
sauliusl/scipy | scipy/stats/tests/test_multivariate.py | 35 | 30527 | """
Test functions for multivariate normal distributions.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_almost_equal, assert_equal,
assert_raises, run_module_suite, TestCase)
from test_continuous_basic import check_distribution_rvs
import numpy
import numpy as np
import scipy.linalg
from scipy.stats._multivariate import _PSD, _lnB
from scipy.stats import multivariate_normal
from scipy.stats import dirichlet, beta
from scipy.stats import wishart, invwishart, chi2, invgamma
from scipy.stats import norm
from scipy.integrate import romb
from common_tests import check_random_state_property
class TestMultivariateNormal(TestCase):
def test_input_shape(self):
mu = np.arange(3)
cov = np.identity(2)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
def test_scalar_values(self):
np.random.seed(1234)
# When evaluated on scalar data, the pdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on a single vector, the pdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
def test_logpdf(self):
# Check that the log of the pdf is in fact the logpdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logpdf(x, mean, cov)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_rank(self):
# Check that the rank is detected correctly.
np.random.seed(1234)
n = 4
mean = np.random.randn(n)
for expected_rank in range(1, n + 1):
s = np.random.randn(n, expected_rank)
cov = np.dot(s, s.T)
distn = multivariate_normal(mean, cov, allow_singular=True)
assert_equal(distn.cov_info.rank, expected_rank)
def test_degenerate_distributions(self):
def _sample_orthonormal_matrix(n):
M = np.random.randn(n, n)
u, s, v = scipy.linalg.svd(M)
return u
for n in range(1, 5):
x = np.random.randn(n)
for k in range(1, n + 1):
# Sample a small covariance matrix.
s = np.random.randn(k, k)
cov_kk = np.dot(s, s.T)
# Embed the small covariance matrix into a larger low rank matrix.
cov_nn = np.zeros((n, n))
cov_nn[:k, :k] = cov_kk
# Define a rotation of the larger low rank matrix.
u = _sample_orthonormal_matrix(n)
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
y = np.dot(u, x)
# Check some identities.
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
allow_singular=True)
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
allow_singular=True)
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
allow_singular=True)
assert_equal(distn_kk.cov_info.rank, k)
assert_equal(distn_nn.cov_info.rank, k)
assert_equal(distn_rr.cov_info.rank, k)
pdf_kk = distn_kk.pdf(x[:k])
pdf_nn = distn_nn.pdf(x)
pdf_rr = distn_rr.pdf(y)
assert_allclose(pdf_kk, pdf_nn)
assert_allclose(pdf_kk, pdf_rr)
logpdf_kk = distn_kk.logpdf(x[:k])
logpdf_nn = distn_nn.logpdf(x)
logpdf_rr = distn_rr.logpdf(y)
assert_allclose(logpdf_kk, logpdf_nn)
assert_allclose(logpdf_kk, logpdf_rr)
def test_large_pseudo_determinant(self):
# Check that large pseudo-determinants are handled appropriately.
# Construct a singular diagonal covariance matrix
# whose pseudo determinant overflows double precision.
large_total_log = 1000.0
npos = 100
nzero = 2
large_entry = np.exp(large_total_log / npos)
n = npos + nzero
cov = np.zeros((n, n), dtype=float)
np.fill_diagonal(cov, large_entry)
cov[-nzero:, -nzero:] = 0
# Check some determinants.
assert_equal(scipy.linalg.det(cov), 0)
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
(1, large_total_log))
# Check the pseudo-determinant.
psd = _PSD(cov)
assert_allclose(psd.log_pdet, large_total_log)
def test_broadcasting(self):
np.random.seed(1234)
n = 4
# Construct a random covariance matrix.
data = np.random.randn(n, n)
cov = np.dot(data, data.T)
mean = np.random.randn(n)
# Construct an ndarray which can be interpreted as
# a 2x3 array whose elements are random data vectors.
X = np.random.randn(2, 3, n)
# Check that multiple data points can be evaluated at once.
for i in range(2):
for j in range(3):
actual = multivariate_normal.pdf(X[i, j], mean, cov)
desired = multivariate_normal.pdf(X, mean, cov)[i, j]
assert_allclose(actual, desired)
def test_normal_1D(self):
# The probability density function for a 1D normal variable should
# agree with the standard normal distribution in scipy.stats.distributions
x = np.linspace(0, 2, 10)
mean, cov = 1.2, 0.9
scale = cov**0.5
d1 = norm.pdf(x, mean, scale)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, d2)
def test_marginalization(self):
# Integrating out one of the variables of a 2D Gaussian should
# yield a 1D Gaussian
mean = np.array([2.5, 3.5])
cov = np.array([[.5, 0.2], [0.2, .6]])
n = 2 ** 8 + 1 # Number of samples
delta = 6 / (n - 1) # Grid spacing
v = np.linspace(0, 6, n)
xv, yv = np.meshgrid(v, v)
pos = np.empty((n, n, 2))
pos[:, :, 0] = xv
pos[:, :, 1] = yv
pdf = multivariate_normal.pdf(pos, mean, cov)
# Marginalize over x and y axis
margin_x = romb(pdf, delta, axis=0)
margin_y = romb(pdf, delta, axis=1)
# Compare with standard normal distribution
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
norm_frozen = multivariate_normal(mean, cov)
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
assert_allclose(norm_frozen.logpdf(x),
multivariate_normal.logpdf(x, mean, cov))
def test_pseudodet_pinv(self):
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
# Assemble random covariance matrix with large and small eigenvalues
np.random.seed(1234)
n = 7
x = np.random.randn(n, n)
cov = np.dot(x, x.T)
s, u = scipy.linalg.eigh(cov)
s = 0.5 * np.ones(n)
s[0] = 1.0
s[-1] = 1e-7
cov = np.dot(u, np.dot(np.diag(s), u.T))
# Set cond so that the lowest eigenvalue is below the cutoff
cond = 1e-5
psd = _PSD(cov, cond=cond)
psd_pinv = _PSD(psd.pinv, cond=cond)
# Check that the log pseudo-determinant agrees with the sum
# of the logs of all but the smallest eigenvalue
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
# Check that the pseudo-determinant of the pseudo-inverse
# agrees with 1 / pseudo-determinant
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
def test_exception_nonsquare_cov(self):
cov = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, _PSD, cov)
def test_exception_nonfinite_cov(self):
cov_nan = [[1, 0], [0, np.nan]]
assert_raises(ValueError, _PSD, cov_nan)
cov_inf = [[1, 0], [0, np.inf]]
assert_raises(ValueError, _PSD, cov_inf)
def test_exception_non_psd_cov(self):
cov = [[1, 0], [0, -1]]
assert_raises(ValueError, _PSD, cov)
def test_exception_singular_cov(self):
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.ones((5, 5))
e = np.linalg.LinAlgError
assert_raises(e, multivariate_normal, mean, cov)
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
def test_R_values(self):
# Compare the multivariate pdf with some values precomputed
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
0.0103803050, 0.0140250800])
x = np.linspace(0, 2, 5)
y = 3 * x - 2
z = x + np.cos(y)
r = np.array([x, y, z]).T
mean = np.array([1, 3, 2], 'd')
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
pdf = multivariate_normal.pdf(r, mean, cov)
assert_allclose(pdf, r_pdf, atol=1e-10)
def test_multivariate_normal_rvs_zero_covariance(self):
mean = np.zeros(2)
covariance = np.zeros((2, 2))
model = multivariate_normal(mean, covariance, allow_singular=True)
sample = model.rvs()
assert_equal(sample, [0, 0])
def test_rvs_shape(self):
# Check that rvs parses the mean and covariance correctly, and returns
# an array of the right shape
N = 300
d = 4
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
assert_equal(sample.shape, (N, d))
sample = multivariate_normal.rvs(mean=None,
cov=np.array([[2, .1], [.1, 1]]),
size=N)
assert_equal(sample.shape, (N, 2))
u = multivariate_normal(mean=0, cov=1)
sample = u.rvs(N)
assert_equal(sample.shape, (N, ))
def test_large_sample(self):
# Generate large sample and compare sample mean and sample covariance
# with mean and covariance matrix.
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
size = 5000
sample = multivariate_normal.rvs(mean, cov, size)
assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
assert_allclose(sample.mean(0), mean, rtol=1e-1)
def test_entropy(self):
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
rv = multivariate_normal(mean, cov)
# Check that frozen distribution agrees with entropy function
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
# Compare entropy with manually computed expression involving
# the sum of the logs of the eigenvalues of the covariance matrix
eigs = np.linalg.eig(cov)[0]
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
assert_almost_equal(desired, rv.entropy())
def test_lnB(self):
alpha = np.array([1, 1, 1])
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
assert_almost_equal(np.exp(_lnB(alpha)), desired)
class TestDirichlet(TestCase):
def test_frozen_dirichlet(self):
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
assert_equal(d.var(), dirichlet.var(alpha))
assert_equal(d.mean(), dirichlet.mean(alpha))
assert_equal(d.entropy(), dirichlet.entropy(alpha))
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
def test_numpy_rvs_shape_compatibility(self):
np.random.seed(2846)
alpha = np.array([1.0, 2.0, 3.0])
x = np.random.dirichlet(alpha, size=7)
assert_equal(x.shape, (7, 3))
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
dirichlet.pdf(x.T, alpha)
dirichlet.pdf(x.T[:-1], alpha)
dirichlet.logpdf(x.T, alpha)
dirichlet.logpdf(x.T[:-1], alpha)
def test_alpha_with_zeros(self):
np.random.seed(2846)
alpha = [1.0, 0.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_with_negative_entries(self):
np.random.seed(2846)
alpha = [1.0, -2.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_zeros(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_negative_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, -0.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_too_large_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 1.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_too_deep_c(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((2, 7, 7)) / 14
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_too_deep(self):
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
x = np.ones((2, 2, 7)) / 4
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_correct_depth(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 3
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
def test_non_simplex_data(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_short(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((2, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_long(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((5, 7)) / 5
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_simple_values(self):
alpha = np.array([1, 1])
d = dirichlet(alpha)
assert_almost_equal(d.mean(), 0.5)
assert_almost_equal(d.var(), 1. / 12.)
b = beta(1, 1)
assert_almost_equal(d.mean(), b.mean())
assert_almost_equal(d.var(), b.var())
def test_K_and_K_minus_1_calls_equal(self):
# Test that calls with K and K-1 entries yield the same results.
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
def test_multiple_entry_calls(self):
# Test that calls with multiple x vectors as matrix work
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
num_multiple = 5
xm = None
for i in range(num_tests):
for m in range(num_multiple):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
if xm is not None:
xm = np.vstack((xm, x))
else:
xm = x
rm = d.pdf(xm.T)
rs = None
for xs in xm:
r = d.pdf(xs)
if rs is not None:
rs = np.append(rs, r)
else:
rs = r
assert_array_almost_equal(rm, rs)
def test_2D_dirichlet_is_beta(self):
np.random.seed(2846)
alpha = np.random.uniform(10e-10, 100, 2)
d = dirichlet(alpha)
b = beta(alpha[0], alpha[1])
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, 2)
x /= np.sum(x)
assert_almost_equal(b.pdf(x), d.pdf([x]))
assert_almost_equal(b.mean(), d.mean()[0])
assert_almost_equal(b.var(), d.var()[0])
def test_multivariate_normal_dimensions_mismatch():
# Regression test for GH #3493. Check that setting up a PDF with a mean of
# length M and a covariance matrix of size (N, N), where M != N, raises a
# ValueError with an informative error message.
mu = np.array([0.0, 0.0])
sigma = np.array([[1.0]])
assert_raises(ValueError, multivariate_normal, mu, sigma)
# A simple check that the right error message was passed along. Checking
# that the entire message is there, word for word, would be somewhat
# fragile, so we just check for the leading part.
try:
multivariate_normal(mu, sigma)
except ValueError as e:
msg = "Dimension mismatch"
assert_equal(str(e)[:len(msg)], msg)
class TestWishart(TestCase):
def test_scale_dimensions(self):
# Test that we can call the Wishart with various scale dimensions
# Test case: dim=1, scale=1
true_scale = np.array(1, ndmin=2)
scales = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2) # 2-dim
]
for scale in scales:
w = wishart(1, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# Test case: dim=2, scale=[[1,0]
# [0,2]
true_scale = np.array([[1,0],
[0,2]])
scales = [
[1,2], # iterable
np.r_[1,2], # 1-dim
np.array([[1,0], # 2-dim
[0,2]])
]
for scale in scales:
w = wishart(2, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# We cannot call with a df < dim
assert_raises(ValueError, wishart, 1, np.eye(2))
# We cannot call with a 3-dimension array
scale = np.array(1, ndmin=3)
assert_raises(ValueError, wishart, 1, scale)
def test_quantile_dimensions(self):
# Test that we can call the Wishart rvs with various quantile dimensions
# If dim == 1, consider x.shape = [1,1,1]
X = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2), # 2-dim
np.array([1], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array(1, ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 1, consider x.shape = [1,1,*]
X = [
[1,2,3], # iterable
np.r_[1,2,3], # 1-dim
np.array([1,2,3], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array([1,2,3], ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 2, consider x.shape = [2,2,1]
# where x[:,:,*] = np.eye(1)*2
X = [
2, # scalar
[2,2], # iterable
np.array(2), # 0-dim
np.r_[2,2], # 1-dim
np.array([[2,0],
[0,2]]), # 2-dim
np.array([[2,0],
[0,2]])[:,:,np.newaxis] # 3-dim
]
w = wishart(2,np.eye(2))
density = w.pdf(np.array([[2,0],
[0,2]])[:,:,np.newaxis])
for x in X:
assert_equal(w.pdf(x), density)
def test_frozen(self):
# Test that the frozen and non-frozen Wishart gives the same answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
w = wishart(df, scale)
assert_equal(w.var(), wishart.var(df, scale))
assert_equal(w.mean(), wishart.mean(df, scale))
assert_equal(w.mode(), wishart.mode(df, scale))
assert_equal(w.entropy(), wishart.entropy(df, scale))
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
def test_1D_is_chisquared(self):
# The 1-dimensional Wishart with an identity scale matrix is just a
# chi-squared distribution.
# Test variance, mean, entropy, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(1, 10, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
w = wishart(df, scale)
c = chi2(df)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
def test_is_scaled_chisquared(self):
# The 2-dimensional Wishart with an arbitrary scale matrix can be
# transformed to a scaled chi-squared distribution.
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
np.random.seed(482974)
sn = 500
df = 10
dim = 4
# Construct an arbitrary positive definite matrix
scale = np.diag(np.arange(4)+1)
scale[np.tril_indices(4, k=-1)] = np.arange(6)
scale = np.dot(scale.T, scale)
# Use :math:`\lambda = [1, \dots, 1]'`
lamda = np.ones((dim,1))
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
w = wishart(df, sigma_lamda)
c = chi2(df, scale=sigma_lamda)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
X = np.linspace(0.1,10,num=10)
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,0,sigma_lamda)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
class TestInvwishart(TestCase):
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
def test_1D_is_invgamma(self):
# The 1-dimensional inverse Wishart with an identity scale matrix is
# just an inverse gamma distribution.
# Test variance, mean, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(5, 20, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
iw = invwishart(df, scale)
ig = invgamma(df/2, scale=1./2)
# Statistics
assert_allclose(iw.var(), ig.var())
assert_allclose(iw.mean(), ig.mean())
# PDF
assert_allclose(iw.pdf(X), ig.pdf(X))
# rvs
rvs = iw.rvs(size=sn)
args = (df/2, 0, 1./2)
alpha = 0.01
check_distribution_rvs('invgamma', args, alpha, rvs)
def test_wishart_invwishart_2D_rvs(self):
dim = 3
df = 10
# Construct a simple non-diagonal positive definite matrix
scale = np.eye(dim)
scale[0,1] = 0.5
scale[1,0] = 0.5
# Construct frozen Wishart and inverse Wishart random variables
w = wishart(df, scale)
iw = invwishart(df, scale)
# Get the generated random variables from a known seed
np.random.seed(248042)
w_rvs = wishart.rvs(df, scale)
np.random.seed(248042)
frozen_w_rvs = w.rvs()
np.random.seed(248042)
iw_rvs = invwishart.rvs(df, scale)
np.random.seed(248042)
frozen_iw_rvs = iw.rvs()
# Manually calculate what it should be, based on the Bartlett (1933)
# decomposition of a Wishart into D A A' D', where D is the Cholesky
# factorization of the scale matrix and A is the lower triangular matrix
# with the square root of chi^2 variates on the diagonal and N(0,1)
# variates in the lower triangle.
np.random.seed(248042)
covariances = np.random.normal(size=3)
variances = np.r_[
np.random.chisquare(df),
np.random.chisquare(df-1),
np.random.chisquare(df-2),
]**0.5
# Construct the lower-triangular A matrix
A = np.diag(variances)
A[np.tril_indices(dim, k=-1)] = covariances
# Wishart random variate
D = np.linalg.cholesky(scale)
DA = D.dot(A)
manual_w_rvs = np.dot(DA, DA.T)
# inverse Wishart random variate
# Supposing that the inverse wishart has scale matrix `scale`, then the
# random variate is the inverse of a random variate drawn from a Wishart
# distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
iD = np.linalg.cholesky(np.linalg.inv(scale))
iDA = iD.dot(A)
manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
# Test for equality
assert_allclose(w_rvs, manual_w_rvs)
assert_allclose(frozen_w_rvs, manual_w_rvs)
assert_allclose(iw_rvs, manual_iw_rvs)
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
def test_random_state_property():
scale = np.eye(3)
scale[0,1] = 0.5
scale[1,0] = 0.5
dists = [
[multivariate_normal, ()],
[dirichlet, (np.array([1.]), )],
[wishart, (10, scale)],
[invwishart, (10, scale)]
]
for distfn, args in dists:
check_random_state_property(distfn, args)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -1,342,112,144,352,811,000 | -1,929,615,912,820,487,700 | 34.332176 | 82 | 0.541586 | false |
3nids/QGIS | python/plugins/processing/algs/grass7/ext/r_shade.py | 45 | 1527 | # -*- coding: utf-8 -*-
"""
***************************************************************************
r_shade.py
----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def processInputs(alg, parameters, context, feedback):
# We need to import all the bands and color tables of the input rasters
alg.loadRasterLayerFromParameter('shade', parameters, context,
False, None)
alg.loadRasterLayerFromParameter('color', parameters, context,
False, None)
def processOutputs(alg, parameters, context, feedback):
# Keep color table
alg.exportRasterLayerFromParameter('output', parameters, context, True)
| gpl-2.0 | -5,852,877,863,102,885,000 | 8,973,849,888,310,873,000 | 42.457143 | 75 | 0.451677 | false |
MicBrain/Tic_Tac_Toe | Tic_Tac_Toe.py | 1 | 8653 | ###################
### DESCRIPTION ###
###################
"""
Tic-tac-toe (or Noughts and crosses, Xs and Os) is a game for two players, X and O, who take
turns marking the spaces in a 3×3 grid. The player who succeeds in placing three respective marks
in a horizontal, vertical, or diagonal row wins the game.
The simplicity of Tic-tac-toe makes it ideal as a pedagogical tool for teaching the concepts
of good sportsmanship and the branch of artificial intelligence that deals with the searching of
game trees. It is straightforward to write a computer program to play Tic-tac-toe perfectly.
The game can be generalized to an m,n,k-game in which two players alternate placing stones of
their own color on an m×n board, with the goal of getting k of their own color in a row. Tic-tac-toe
is the (3,3,3)-game.
Despite its apparent simplicity, Tic-tac-toe requires detailed analysis to determine even some
elementary combinatory facts, the most interesting of which are the number of possible games and the
number of possible positions. A position is merely a state of the board, while a game usually refers
to the way a terminal position is obtained.
"""
from string import *
from random import *
import itertools
import math
####################
## MAIN VARIABLES ##
####################
Player_1 = 'x' # player 1's mark
Player_2 = 'o' # player 2's mark
A = 'A' # these just make it easier to keep referring to 'A', 'B' and 'C'
B = 'B'
C = 'C'
#####################
## State variables ##
#####################
EMPTY = ' '
Table = [[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY],
[EMPTY, EMPTY, EMPTY]]
current = randint(1, 2)
#########################
### Coordinate system ###
#########################
def square(row, col): # squares are represented as tuples of (row, col).
return (row, col) # rows are numbered 1 thru 3, cols 'A' thru 'C'.
def square_row(square): # these two functions save us the hassle of using
return square[0] # index values in our code, e.g. square[0]...
def square_col(square): # from this point on, i should never directly use
return square[1] # tuples when working with squares.
def get_square(square):
row_i = square_row(square) - 1
col_i = ord(square_col(square)) - ord(A)
return Table[row_i][col_i] # note how this and set_square are the ONLY
# functions which directly use board!
def set_square(square, mark):
row_i = square_row(square) - 1
col_i = ord(square_col(square)) - ord(A)
Table[row_i][col_i] = mark # note how this and get_square are the ONLY
def get_row(row):
return [get_square((row, A)), get_square((row, B)), get_square((row, C))]
def get_column(col):
return [get_square((1, col)), get_square((2, col)), get_square((3, col))]
def get_diagonal(corner_square):
if corner_square == (1, A) or corner_square == (3, C):
return [get_square((1, A)), get_square((2, B)), get_square((3, C))]
else:
return [get_square((1, C)), get_square((2, B)), get_square((3, A))]
def get_mark(player):
if player == 1:
return Player_1
else:
return Player_2
def all_squares_filled():
for row in range(1, 4): # range(1, 4) returns the list [1, 2, 3]
if EMPTY in get_row(row):
return False # this row contains an empty square, we know enough
return True # no empty squares found, all squares are filled
def player_has_won(player):
MARK = get_mark(player)
win = [MARK, MARK, MARK]
if get_row(1) == win or get_row(2) == win or get_row(3) == win:
return True
if get_column(A) == win or get_column(B) == win or get_column(C) == win:
return True
if get_diagonal((1, A)) == win or get_diagonal((1, C)) == win:
return True
return False
def draw_board_straight():
A1, A2, A3 = get_square((1, A)), get_square((2, A)), get_square((3, A))
B1, B2, B3 = get_square((1, B)), get_square((2, B)), get_square((3, B))
C1, C2, C3 = get_square((1, C)), get_square((2, C)), get_square((3, C))
lines = []
lines.append("")
lines.append(" " + A + " " + B + " " + C + " ")
lines.append(" ")
lines.append("1 " + A1 + " | " + B1 + " | " + C1 + " ")
lines.append(" ---+---+---")
lines.append("2 " + A2 + " | " + B2 + " | " + C2 + " ")
lines.append(" ---+---+---")
lines.append("3 " + A3 + " | " + B3 + " | " + C3 + " ")
lines.append("")
return str.join(str(lines), '\n') # the '\n' represents a newline
def draw_board_slanted():
A1, A2, A3 = get_square((1, A)), get_square((2, A)), get_square((3, A))
B1, B2, B3 = get_square((1, B)), get_square((2, B)), get_square((3, B))
C1, C2, C3 = get_square((1, C)), get_square((2, C)), get_square((3, C))
lines = []
lines.append("")
lines.append(" " + A + " " + B + " " + C + " ")
lines.append(" ")
lines.append(" 1 " + A1 + " / " + B1 + " / " + C1 + " ")
lines.append(" ---/---/--- ")
lines.append(" 2 " + A2 + " / " + B2 + " / " + C2 + " ")
lines.append(" ---/---/--- ")
lines.append("3 " + A3 + " / " + B3 + " / " + C3 + " ")
lines.append("")
return str.join(str(lines), '\n')
def draw_board():
return draw_board_slanted()
def reset_main_board():
for row in (1, 2, 3):
for col in (A, B, C):
set_square(square(row, col), EMPTY)
def play():
global current
reset_main_board()
current = randint(1, 2)
print ("Tic-Tac-Toe!")
print
player1_name = input("Player 1, what is your name? ")
player2_name = input("Player 2, what is your name? ")
def get_name(player):
if player == 1:
return player1_name
else:
return player2_name
print
print ("Welcome,", player1_name, "and", player2_name + "!")
print (player1_name, "will be", Player_1 + ", and", player2_name, "will be", Player_2 + ".")
print ("By random decision,", get_name(current), "will go first.")
print
input("[Press enter when ready to play.] ") # just waiting for them to press enter
print (draw_board())
while not all_squares_filled():
choice = input(get_name(current) + ", which square? (e.g. 2B, 2b, B2 or b2) ")
if len(choice) != 2:
print ("That's not a square. You must enter a square like b2, or 3C.")
print
continue
if choice[0] not in ["1", "2", "3"] and str.upper(choice[0]) not in [A, B, C]:
print ("The first character must be a row (1, 2 or 3) or column (A, B or C).")
print
continue
if choice[1] not in ["1", "2", "3"] and str.upper(choice[1]) not in [A, B, C]:
print ("The second character must be a row (1, 2 or 3) or column (A, B or C).")
print
continue
if choice[0] in ["1", "2", "3"] and choice[1] in ["1", "2", "3"]:
print ("You entered two rows! You must enter one row and one column (A, B or C).")
print
continue
if str.upper(choice[0]) in [A, B, C] and str.upper(choice[1]) in [A, B, C]:
print ("You entered two columns! You must enter one row (1, 2 or 3) and one column.")
print
continue
if choice[0] in ["1", "2", "3"]:
row = int(choice[0])
col = str.upper(choice[1])
else:
row = int(choice[1])
col = str.upper(choice[0])
choice = square(row, col) # make this into a (row, col) tuple
if get_square(choice) != EMPTY:
print ("Sorry, that square is already marked.")
print
continue
set_square(choice, get_mark(current))
print (draw_board())
if player_has_won(current):
print ("Congratulations", get_name(current), "-- you win!")
print
break
if all_squares_filled():
print ("Cats game!", player1_name, "and", player2_name, "draw.")
print
break
current = 3 - current # sets 1 to 2 and 2 to 1
print ("GAME IS OVER")
print
if __name__ == "__main__":
continue_playing = True
while continue_playing:
play()
again = str.lower(input("Play again? (y/n) "))
print
print
print
if again != "y":
continue_playing = False
print ("Thanks for playing!")
print
| gpl-3.0 | 977,756,936,795,794,800 | 504,324,949,857,321,200 | 37.620536 | 101 | 0.539475 | false |
osu-cass/whats-fresh-api | whats_fresh/whats_fresh_api/tests/views/entry/test_new_image.py | 2 | 3620 | from django.test import TestCase
from django.core.urlresolvers import reverse
from whats_fresh.whats_fresh_api.models import Image
from django.contrib.auth.models import User, Group
import os
class NewImageTestCase(TestCase):
"""
Test that the New Image page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the creation of a new
object with the specified details
POSTing data with all fields missing (hitting "save" without entering
data) returns the same field with notations of missing fields
"""
def setUp(self):
user = User.objects.create_user(
'temporary', '[email protected]', 'temporary')
user.save()
admin_group = Group(name='Administration Users')
admin_group.save()
user.groups.add(admin_group)
response = self.client.login(
username='temporary', password='temporary')
self.assertEqual(response, True)
self.test_media_directory = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', 'testdata', 'media'))
self.image = open(
os.path.join(self.test_media_directory, 'cat.jpg'), 'r')
def tearDown(self):
self.image.close()
def test_not_logged_in(self):
self.client.logout()
response = self.client.get(
reverse('new-image'))
self.assertRedirects(response, '/login?next=/entry/images/new')
def test_url_endpoint(self):
url = reverse('new-image')
self.assertEqual(url, '/entry/images/new')
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(reverse('new-image'))
fields = {'image': 'file', 'caption': 'input', 'name': 'input'}
form = response.context['image_form']
for field in fields:
# for the Edit tests, you should be able to access
# form[field].value
self.assertIn(fields[field], str(form[field]))
def test_successful_image_creation(self):
"""
POST a proper "new image" command to the server, and see if the
new image appears in the database. All optional fields are null.
"""
Image.objects.all().delete()
# Data that we'll post to the server to get the new image created
new_image = {
'caption': "Catption",
'name': "A cat",
'image': self.image}
self.client.post(reverse('new-image'), new_image)
image = Image.objects.all()[0]
self.assertEqual(getattr(image, 'caption'), new_image['caption'])
self.assertEqual(getattr(image, 'name'), new_image['name'])
self.assertIn('/media/images/cat', getattr(image, 'image').url)
def test_no_data_error(self):
"""
POST a "new image" command to the server missing all of the
required fields, and test to see what the error comes back as.
"""
# Create a list of all objects before sending bad POST data
all_images = Image.objects.all()
response = self.client.post(reverse('new-image'))
required_fields = ['image', 'name']
for field_name in required_fields:
self.assertIn(field_name,
response.context['image_form'].errors)
# Test that we didn't add any new objects
self.assertEqual(
list(Image.objects.all()), list(all_images))
| apache-2.0 | -4,370,641,489,105,543,700 | -6,891,954,389,580,919,000 | 33.47619 | 77 | 0.606354 | false |
danakj/chromium | chrome/test/data/nacl/debug_stub_browser_tests.py | 42 | 3536 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import sys
import xml.etree.ElementTree
import gdb_rsp
def AssertRaises(exc_class, func):
try:
func()
except exc_class:
pass
else:
raise AssertionError('Function did not raise %r' % exc_class)
def GetTargetArch(connection):
"""Get the CPU architecture of the NaCl application."""
reply = connection.RspRequest('qXfer:features:read:target.xml:0,fff')
assert reply[0] == 'l', reply
tree = xml.etree.ElementTree.fromstring(reply[1:])
arch_tag = tree.find('architecture')
assert arch_tag is not None, reply
return arch_tag.text.strip()
def ReverseBytes(byte_string):
"""Reverse bytes in the hex string: '09ab' -> 'ab09'. This converts
little-endian number in the hex string to its normal string representation.
"""
assert len(byte_string) % 2 == 0, byte_string
return ''.join([byte_string[i - 2 : i]
for i in xrange(len(byte_string), 0, -2)])
def GetProgCtrString(connection, arch):
"""Get current execution point."""
registers = connection.RspRequest('g')
# PC register indices can be found in
# native_client/src/trusted/debug_stub/abi.cc in AbiInit function.
if arch == 'i386':
# eip index is 8
return ReverseBytes(registers[8 * 8 : 8 * 8 + 8])
if arch == 'i386:x86-64':
# rip index is 16
return ReverseBytes(registers[16 * 16 : 16 * 16 + 8])
if arch == 'iwmmxt':
# pc index is 15
return ReverseBytes(registers[15 * 8 : 15 * 8 + 8])
raise AssertionError('Unknown architecture: %s' % arch)
def TestContinue(connection):
# Once the NaCl test module reports that the test passed, the NaCl <embed>
# element is removed from the page. The NaCl module will be killed by the
# browser which will appear as EOF (end-of-file) on the debug stub socket.
AssertRaises(gdb_rsp.EofOnReplyException,
lambda: connection.RspRequest('vCont;c'))
def TestBreakpoint(connection):
# Breakpoints and single-stepping might interfere with Chrome sandbox. So we
# check that they work properly in this test.
arch = GetTargetArch(connection)
registers = connection.RspRequest('g')
pc = GetProgCtrString(connection, arch)
# Set breakpoint
result = connection.RspRequest('Z0,%s,1' % pc)
assert result == 'OK', result
# Check that we stopped at breakpoint
result = connection.RspRequest('vCont;c')
stop_reply = re.compile(r'T05thread:(\d+);')
assert stop_reply.match(result), result
thread = stop_reply.match(result).group(1)
# Check that registers haven't changed
result = connection.RspRequest('g')
assert result == registers, (result, registers)
# Remove breakpoint
result = connection.RspRequest('z0,%s,1' % pc)
assert result == 'OK', result
# Check single stepping
result = connection.RspRequest('vCont;s:%s' % thread)
assert result == 'T05thread:%s;' % thread, result
assert pc != GetProgCtrString(connection, arch)
# Check that we terminate normally
AssertRaises(gdb_rsp.EofOnReplyException,
lambda: connection.RspRequest('vCont;c'))
def Main(args):
port = int(args[0])
name = args[1]
connection = gdb_rsp.GdbRspConnection(('localhost', port))
if name == 'continue':
TestContinue(connection)
elif name == 'breakpoint':
TestBreakpoint(connection)
else:
raise AssertionError('Unknown test name: %r' % name)
if __name__ == '__main__':
Main(sys.argv[1:])
| bsd-3-clause | 6,027,801,462,293,610,000 | -8,744,033,792,469,469,000 | 32.046729 | 78 | 0.691459 | false |
jpetto/bedrock | bedrock/firefox/helpers.py | 1 | 8778 | from collections import OrderedDict
from django.core.cache import cache
from django.conf import settings
import jingo
import jinja2
from bedrock.firefox.models import FirefoxOSFeedLink
from bedrock.firefox.firefox_details import firefox_desktop, firefox_android, firefox_ios
from bedrock.base.urlresolvers import reverse
from lib.l10n_utils import get_locale
def android_builds(channel, builds=None):
builds = builds or []
variations = OrderedDict([
('api-9', 'Gingerbread'),
('api-15', 'Ice Cream Sandwich+'),
('x86', 'x86'),
])
if channel == 'alpha':
for type, arch_pretty in variations.iteritems():
link = firefox_android.get_download_url('alpha', type)
builds.append({'os': 'android',
'os_pretty': 'Android',
'os_arch_pretty': 'Android %s' % arch_pretty,
'arch': 'x86' if type == 'x86' else 'armv7up %s' % type,
'arch_pretty': arch_pretty,
'download_link': link})
else:
link = firefox_android.get_download_url(channel)
builds.append({'os': 'android',
'os_pretty': 'Android',
'download_link': link})
return builds
def ios_builds(channel, builds=None):
builds = builds or []
link = firefox_ios.get_download_url(channel)
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': link})
return builds
@jingo.register.function
@jinja2.contextfunction
def download_firefox(ctx, channel='release', small=False, icon=True,
platform='all', dom_id=None, locale=None, simple=False,
force_direct=False, force_full_installer=False,
force_funnelcake=False, check_old_fx=False):
""" Output a "download firefox" button.
:param ctx: context from calling template.
:param channel: name of channel: 'release', 'beta' or 'alpha'.
:param small: Display the small button if True.
:param icon: Display the Fx icon on the button if True.
:param platform: Target platform: 'desktop', 'android', 'ios', or 'all'.
:param dom_id: Use this string as the id attr on the element.
:param locale: The locale of the download. Default to locale of request.
:param simple: Display button with text only if True. Will not display
icon or privacy/what's new/systems & languages links. Can be used
in conjunction with 'small'.
:param force_direct: Force the download URL to be direct.
:param force_full_installer: Force the installer download to not be
the stub installer (for aurora).
:param force_funnelcake: Force the download version for en-US Windows to be
'latest', which bouncer will translate to the funnelcake build.
:param check_old_fx: Checks to see if the user is on an old version of
Firefox and, if true, changes the button text from 'Free Download'
to 'Update your Firefox'. Must be used in conjunction with
'simple' param being true.
:return: The button html.
"""
show_desktop = platform in ['all', 'desktop']
show_android = platform in ['all', 'android']
show_ios = platform in ['all', 'ios']
alt_channel = '' if channel == 'release' else channel
locale = locale or get_locale(ctx['request'])
funnelcake_id = ctx.get('funnelcake_id', False)
dom_id = dom_id or 'download-button-%s-%s' % (
'desktop' if platform == 'all' else platform, channel)
l_version = firefox_desktop.latest_builds(locale, channel)
if l_version:
version, platforms = l_version
else:
locale = 'en-US'
version, platforms = firefox_desktop.latest_builds('en-US', channel)
# Gather data about the build for each platform
builds = []
if show_desktop:
for plat_os, plat_os_pretty in firefox_desktop.platform_labels.iteritems():
# Windows 64-bit builds are not available on the ESR channel yet
if plat_os == 'win64' and channel in ['esr', 'esr_next']:
continue
# Fallback to en-US if this plat_os/version isn't available
# for the current locale
_locale = locale if plat_os_pretty in platforms else 'en-US'
# And generate all the info
download_link = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=force_direct,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
# If download_link_direct is False the data-direct-link attr
# will not be output, and the JS won't attempt the IE popup.
if force_direct:
# no need to run get_download_url again with the same args
download_link_direct = False
else:
download_link_direct = firefox_desktop.get_download_url(
channel, version, plat_os, _locale,
force_direct=True,
force_full_installer=force_full_installer,
force_funnelcake=force_funnelcake,
funnelcake_id=funnelcake_id,
)
if download_link_direct == download_link:
download_link_direct = False
builds.append({'os': plat_os,
'os_pretty': plat_os_pretty,
'download_link': download_link,
'download_link_direct': download_link_direct})
if show_android:
builds = android_builds(channel, builds)
if show_ios:
builds.append({'os': 'ios',
'os_pretty': 'iOS',
'download_link': firefox_ios.get_download_url()})
# Get the native name for current locale
langs = firefox_desktop.languages
locale_name = langs[locale]['native'] if locale in langs else locale
data = {
'locale_name': locale_name,
'version': version,
'product': 'firefox-%s' % platform,
'builds': builds,
'id': dom_id,
'small': small,
'simple': simple,
'channel': alt_channel,
'show_desktop': show_desktop,
'show_android': show_android,
'show_ios': show_ios,
'icon': icon,
'check_old_fx': check_old_fx and simple,
}
html = jingo.render_to_string(ctx['request'],
'firefox/includes/download-button.html',
data)
return jinja2.Markup(html)
@jingo.register.function
def firefox_url(platform, page, channel=None):
"""
Return a product-related URL like /firefox/all/ or /mobile/beta/notes/.
Examples
========
In Template
-----------
{{ firefox_url('desktop', 'all', 'organizations') }}
{{ firefox_url('desktop', 'sysreq', channel) }}
{{ firefox_url('android', 'notes') }}
"""
kwargs = {}
# Tweak the channel name for the naming URL pattern in urls.py
if channel == 'release':
channel = None
if channel == 'alpha':
if platform == 'desktop':
channel = 'developer'
if platform == 'android':
channel = 'aurora'
if channel == 'esr':
channel = 'organizations'
if channel:
kwargs['channel'] = channel
if platform != 'desktop':
kwargs['platform'] = platform
# Firefox for Android and iOS have the system requirements page on SUMO
if platform in ['android', 'ios'] and page == 'sysreq':
return settings.FIREFOX_MOBILE_SYSREQ_URL
return reverse('firefox.%s' % page, kwargs=kwargs)
@jingo.register.function
def firefox_os_feed_links(locale, force_cache_refresh=False):
if locale in settings.FIREFOX_OS_FEED_LOCALES:
cache_key = 'firefox-os-feed-links-' + locale
if not force_cache_refresh:
links = cache.get(cache_key)
if links:
return links
links = list(
FirefoxOSFeedLink.objects.filter(locale=locale).order_by(
'-id').values_list('link', 'title')[:10])
cache.set(cache_key, links)
return links
elif '-' in locale:
return firefox_os_feed_links(locale.split('-')[0])
@jingo.register.function
def firefox_os_blog_link(locale):
try:
return settings.FXOS_PRESS_BLOG_LINKS[locale]
except KeyError:
if '-' in locale:
return firefox_os_blog_link(locale.split('-')[0])
else:
return None
| mpl-2.0 | -5,636,090,157,901,164,000 | 1,615,937,560,686,007,800 | 35.728033 | 89 | 0.583276 | false |
PeterSurda/PyBitmessage | src/kivymd/bottomsheet.py | 3 | 6751 | # -*- coding: utf-8 -*-
'''
Bottom Sheets
=============
`Material Design spec Bottom Sheets page <http://www.google.com/design/spec/components/bottom-sheets.html>`_
In this module there's the :class:`MDBottomSheet` class which will let you implement your own Material Design Bottom Sheets, and there are two classes called :class:`MDListBottomSheet` and :class:`MDGridBottomSheet` implementing the ones mentioned in the spec.
Examples
--------
.. note::
These widgets are designed to be called from Python code only.
For :class:`MDListBottomSheet`:
.. code-block:: python
bs = MDListBottomSheet()
bs.add_item("Here's an item with text only", lambda x: x)
bs.add_item("Here's an item with an icon", lambda x: x, icon='md-cast')
bs.add_item("Here's another!", lambda x: x, icon='md-nfc')
bs.open()
For :class:`MDListBottomSheet`:
.. code-block:: python
bs = MDGridBottomSheet()
bs.add_item("Facebook", lambda x: x, icon_src='./assets/facebook-box.png')
bs.add_item("YouTube", lambda x: x, icon_src='./assets/youtube-play.png')
bs.add_item("Twitter", lambda x: x, icon_src='./assets/twitter.png')
bs.add_item("Da Cloud", lambda x: x, icon_src='./assets/cloud-upload.png')
bs.add_item("Camera", lambda x: x, icon_src='./assets/camera.png')
bs.open()
API
---
'''
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.modalview import ModalView
from kivy.uix.scrollview import ScrollView
from kivymd.backgroundcolorbehavior import BackgroundColorBehavior
from kivymd.label import MDLabel
from kivymd.list import MDList, OneLineListItem, ILeftBody, \
OneLineIconListItem
from kivymd.theming import ThemableBehavior
Builder.load_string('''
<MDBottomSheet>
background: 'atlas://data/images/defaulttheme/action_group_disabled'
background_color: 0,0,0,.8
sv: sv
upper_padding: upper_padding
gl_content: gl_content
ScrollView:
id: sv
do_scroll_x: False
BoxLayout:
size_hint_y: None
orientation: 'vertical'
padding: 0,1,0,0
height: upper_padding.height + gl_content.height + 1 # +1 to allow overscroll
BsPadding:
id: upper_padding
size_hint_y: None
height: root.height - min(root.width * 9 / 16, gl_content.height)
on_release: root.dismiss()
BottomSheetContent:
id: gl_content
size_hint_y: None
background_color: root.theme_cls.bg_normal
cols: 1
''')
class BsPadding(ButtonBehavior, FloatLayout):
pass
class BottomSheetContent(BackgroundColorBehavior, GridLayout):
pass
class MDBottomSheet(ThemableBehavior, ModalView):
sv = ObjectProperty()
upper_padding = ObjectProperty()
gl_content = ObjectProperty()
dismiss_zone_scroll = 1000 # Arbitrary high number
def open(self, *largs):
super(MDBottomSheet, self).open(*largs)
Clock.schedule_once(self.set_dismiss_zone, 0)
def set_dismiss_zone(self, *largs):
# Scroll to right below overscroll threshold:
self.sv.scroll_y = 1 - self.sv.convert_distance_to_scroll(0, 1)[1]
# This is a line where m (slope) is 1/6 and b (y-intercept) is 80:
self.dismiss_zone_scroll = self.sv.convert_distance_to_scroll(
0, (self.height - self.upper_padding.height) * (1 / 6.0) + 80)[
1]
# Uncomment next line if the limit should just be half of
# visible content on open (capped by specs to 16 units to width/9:
# self.dismiss_zone_scroll = (self.sv.convert_distance_to_scroll(
# 0, self.height - self.upper_padding.height)[1] * 0.50)
# Check if user has overscrolled enough to dismiss bottom sheet:
self.sv.bind(on_scroll_stop=self.check_if_scrolled_to_death)
def check_if_scrolled_to_death(self, *largs):
if self.sv.scroll_y >= 1 + self.dismiss_zone_scroll:
self.dismiss()
def add_widget(self, widget, index=0):
if type(widget) == ScrollView:
super(MDBottomSheet, self).add_widget(widget, index)
else:
self.gl_content.add_widget(widget,index)
Builder.load_string('''
#:import md_icons kivymd.icon_definitions.md_icons
<ListBSIconLeft>
font_style: 'Icon'
text: u"{}".format(md_icons[root.icon])
halign: 'center'
theme_text_color: 'Primary'
valign: 'middle'
''')
class ListBSIconLeft(ILeftBody, MDLabel):
icon = StringProperty()
class MDListBottomSheet(MDBottomSheet):
mlist = ObjectProperty()
def __init__(self, **kwargs):
super(MDListBottomSheet, self).__init__(**kwargs)
self.mlist = MDList()
self.gl_content.add_widget(self.mlist)
Clock.schedule_once(self.resize_content_layout, 0)
def resize_content_layout(self, *largs):
self.gl_content.height = self.mlist.height
def add_item(self, text, callback, icon=None):
if icon:
item = OneLineIconListItem(text=text, on_release=callback)
item.add_widget(ListBSIconLeft(icon=icon))
else:
item = OneLineListItem(text=text, on_release=callback)
item.bind(on_release=lambda x: self.dismiss())
self.mlist.add_widget(item)
Builder.load_string('''
<GridBSItem>
orientation: 'vertical'
padding: 0, dp(24), 0, 0
size_hint_y: None
size: dp(64), dp(96)
BoxLayout:
padding: dp(8), 0, dp(8), dp(8)
size_hint_y: None
height: dp(48)
Image:
source: root.source
MDLabel:
font_style: 'Caption'
theme_text_color: 'Secondary'
text: root.caption
halign: 'center'
''')
class GridBSItem(ButtonBehavior, BoxLayout):
source = StringProperty()
caption = StringProperty()
class MDGridBottomSheet(MDBottomSheet):
def __init__(self, **kwargs):
super(MDGridBottomSheet, self).__init__(**kwargs)
self.gl_content.padding = (dp(16), 0, dp(16), dp(24))
self.gl_content.height = dp(24)
self.gl_content.cols = 3
def add_item(self, text, callback, icon_src):
item = GridBSItem(
caption=text,
on_release=callback,
source=icon_src
)
item.bind(on_release=lambda x: self.dismiss())
if len(self.gl_content.children) % 3 == 0:
self.gl_content.height += dp(96)
self.gl_content.add_widget(item)
| mit | -6,924,945,365,796,255,000 | -5,173,691,999,524,274,000 | 30.995261 | 260 | 0.642868 | false |
Elettronik/SickRage | lib/pgi/cffilib/gir/giunioninfo.py | 20 | 1903 | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
from .._compat import xrange
from ._ffi import lib
from .gibaseinfo import GIBaseInfo, GIInfoType
from .gitypeinfo import GITypeInfo
from .giregisteredtypeinfo import GIRegisteredTypeInfo
@GIBaseInfo._register(GIInfoType.UNION)
class GIUnionInfo(GIRegisteredTypeInfo):
@property
def n_fields(self):
return lib.g_union_info_get_n_fields(self._ptr)
def get_field(self, n):
return lib.g_union_info_get_field(self._ptr, n)
def get_fields(self):
for i in xrange(self.n_fields):
yield self.get_field(i)
@property
def n_methods(self):
return lib.g_union_info_get_n_methods(self._ptr)
def get_method(self):
return lib.g_union_info_get_method(self._ptr)
def get_methods(self):
for i in xrange(self.n_methods):
yield self.get_method(i)
@property
def is_discriminated(self):
return bool(lib.g_union_info_is_discriminated(self._ptr))
@property
def discriminator_offset(self):
return lib.g_union_info_get_discriminator_offset(self._ptr)
@property
def discriminator_type(self):
return GITypeInfo(lib.g_union_info_get_discriminator_type(self._ptr))
def get_discriminator(self, n):
# FIXME
return lib.g_union_info_get_discriminator(self._ptr, n)
def find_method(self, name):
# FIXME
return lib.g_union_info_find_method(self._ptr, name)
@property
def size(self):
return lib.g_union_info_get_size(self._ptr)
@property
def alignment(self):
return lib.g_union_info_get_alignment(self._ptr)
| gpl-3.0 | -1,471,789,161,832,336,600 | -279,756,367,070,071,330 | 27.833333 | 77 | 0.676826 | false |
KohlsTechnology/ansible | test/runner/lib/docker_util.py | 16 | 5429 | """Functions for accessing docker via the docker cli."""
from __future__ import absolute_import, print_function
import json
import os
import time
from lib.executor import (
SubprocessError,
)
from lib.util import (
ApplicationError,
run_command,
common_environment,
display,
)
from lib.config import (
EnvironmentConfig,
)
BUFFER_SIZE = 256 * 256
def get_docker_container_id():
"""
:rtype: str | None
"""
path = '/proc/self/cgroup'
if not os.path.exists(path):
return None
with open(path) as cgroup_fd:
contents = cgroup_fd.read()
paths = [line.split(':')[2] for line in contents.splitlines()]
container_ids = set(path.split('/')[2] for path in paths if path.startswith('/docker/'))
if not container_ids:
return None
if len(container_ids) == 1:
return container_ids.pop()
raise ApplicationError('Found multiple container_id candidates: %s\n%s' % (sorted(container_ids), contents))
def docker_pull(args, image):
"""
:type args: EnvironmentConfig
:type image: str
"""
if not args.docker_pull:
display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
return
for _ in range(1, 10):
try:
docker_command(args, ['pull', image])
return
except SubprocessError:
display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to pull docker image "%s".' % image)
def docker_put(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(src, 'rb') as src_fd:
docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdin=src_fd, capture=True)
def docker_get(args, container_id, src, dst):
"""
:type args: EnvironmentConfig
:type container_id: str
:type src: str
:type dst: str
"""
# avoid 'docker cp' due to a bug which causes 'docker rm' to fail
with open(dst, 'wb') as dst_fd:
docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
options=['-i'], stdout=dst_fd, capture=True)
def docker_run(args, image, options, cmd=None):
"""
:type args: EnvironmentConfig
:type image: str
:type options: list[str] | None
:type cmd: list[str] | None
:rtype: str | None, str | None
"""
if not options:
options = []
if not cmd:
cmd = []
for _ in range(1, 3):
try:
return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
except SubprocessError as ex:
display.error(ex)
display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
time.sleep(3)
raise ApplicationError('Failed to run docker image "%s".' % image)
def docker_rm(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
"""
docker_command(args, ['rm', '-f', container_id], capture=True)
def docker_inspect(args, container_id):
"""
:type args: EnvironmentConfig
:type container_id: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['inspect', container_id], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_network_inspect(args, network):
"""
:type args: EnvironmentConfig
:type network: str
:rtype: list[dict]
"""
if args.explain:
return []
try:
stdout, _ = docker_command(args, ['network', 'inspect', network], capture=True)
return json.loads(stdout)
except SubprocessError as ex:
try:
return json.loads(ex.stdout)
except:
raise ex # pylint: disable=locally-disabled, raising-bad-type
def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type container_id: str
:type cmd: list[str]
:type options: list[str] | None
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
if not options:
options = []
return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
def docker_command(args, cmd, capture=False, stdin=None, stdout=None):
"""
:type args: EnvironmentConfig
:type cmd: list[str]
:type capture: bool
:type stdin: file | None
:type stdout: file | None
:rtype: str | None, str | None
"""
env = docker_environment()
return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout)
def docker_environment():
"""
:rtype: dict[str, str]
"""
env = common_environment()
env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
return env
| gpl-3.0 | 8,925,152,469,466,206,000 | 1,981,573,051,028,745,200 | 25.612745 | 119 | 0.603058 | false |
satish-avninetworks/murano | murano/dsl/murano_package.py | 1 | 7758 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import weakref
import semantic_version
import six
from yaql.language import specs
from yaql.language import utils
from murano.dsl import constants
from murano.dsl import dsl_types
from murano.dsl import exceptions
from murano.dsl import helpers
from murano.dsl import meta as dslmeta
from murano.dsl import murano_object
from murano.dsl import murano_type
from murano.dsl import namespace_resolver
from murano.dsl import principal_objects
from murano.dsl import yaql_integration
class MuranoPackage(dsl_types.MuranoPackage, dslmeta.MetaProvider):
def __init__(self, package_loader, name, version=None,
runtime_version=None, requirements=None, meta=None):
super(MuranoPackage, self).__init__()
self._package_loader = weakref.proxy(package_loader)
self._name = name
self._meta = None
self._version = helpers.parse_version(version)
self._runtime_version = helpers.parse_version(runtime_version)
self._requirements = {
name: semantic_version.Spec('==' + str(self._version.major))
}
if name != constants.CORE_LIBRARY:
self._requirements[constants.CORE_LIBRARY] = \
semantic_version.Spec('==0')
self._classes = {}
self._imported_types = {object, murano_object.MuranoObject}
for key, value in six.iteritems(requirements or {}):
self._requirements[key] = helpers.parse_version_spec(value)
self._load_queue = {}
self._native_load_queue = {}
if self.name == constants.CORE_LIBRARY:
principal_objects.register(self)
self._package_class = self._create_package_class()
self._meta = dslmeta.MetaData(
meta, dsl_types.MetaTargets.Package, self._package_class)
@property
def package_loader(self):
return self._package_loader
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def runtime_version(self):
return self._runtime_version
@property
def requirements(self):
return self._requirements
@property
def classes(self):
return set(self._classes.keys()).union(
self._load_queue.keys()).union(self._native_load_queue.keys())
def get_resource(self, name):
raise NotImplementedError('resource API is not implemented')
# noinspection PyMethodMayBeStatic
def get_class_config(self, name):
return {}
def _register_mpl_classes(self, data, name=None):
type_obj = self._classes.get(name)
if type_obj is not None:
return type_obj
if callable(data):
data = data()
data = helpers.list_value(data)
unnamed_class = None
last_ns = {}
for cls_data in data:
last_ns = cls_data.setdefault('Namespaces', last_ns.copy())
if len(cls_data) == 1:
continue
cls_name = cls_data.get('Name')
if not cls_name:
if unnamed_class:
raise exceptions.AmbiguousClassName(name)
unnamed_class = cls_data
else:
ns_resolver = namespace_resolver.NamespaceResolver(last_ns)
cls_name = ns_resolver.resolve_name(cls_name)
if cls_name == name:
type_obj = murano_type.create(
cls_data, self, cls_name, ns_resolver)
self._classes[name] = type_obj
else:
self._load_queue.setdefault(cls_name, cls_data)
if type_obj is None and unnamed_class:
unnamed_class['Name'] = name
return self._register_mpl_classes(unnamed_class, name)
return type_obj
def _register_native_class(self, cls, name):
if cls in self._imported_types:
return self._classes[name]
try:
m_class = self.find_class(name, False)
except exceptions.NoClassFound:
m_class = self._register_mpl_classes({'Name': name}, name)
m_class.extension_class = cls
for method_name in dir(cls):
if method_name.startswith('_'):
continue
method = getattr(cls, method_name)
if not any((
helpers.inspect_is_method(cls, method_name),
helpers.inspect_is_static(cls, method_name),
helpers.inspect_is_classmethod(cls, method_name))):
continue
method_name_alias = (getattr(
method, '__murano_name', None) or
specs.convert_function_name(
method_name, yaql_integration.CONVENTION))
m_class.add_method(method_name_alias, method, method_name)
self._imported_types.add(cls)
return m_class
def register_class(self, cls, name=None):
if inspect.isclass(cls):
name = name or getattr(cls, '__murano_name', None) or cls.__name__
if name in self._classes:
self._register_native_class(cls, name)
else:
self._native_load_queue.setdefault(name, cls)
elif isinstance(cls, dsl_types.MuranoType):
self._classes[cls.name] = cls
elif name not in self._classes:
self._load_queue[name] = cls
def find_class(self, name, search_requirements=True):
payload = self._native_load_queue.pop(name, None)
if payload is not None:
return self._register_native_class(payload, name)
payload = self._load_queue.pop(name, None)
if payload is not None:
result = self._register_mpl_classes(payload, name)
if result:
return result
result = self._classes.get(name)
if result:
return result
if search_requirements:
pkgs_for_search = []
for package_name, version_spec in six.iteritems(
self._requirements):
if package_name == self.name:
continue
referenced_package = self._package_loader.load_package(
package_name, version_spec)
try:
return referenced_package.find_class(name, False)
except exceptions.NoClassFound:
pkgs_for_search.append(referenced_package)
continue
raise exceptions.NoClassFound(
name, packages=pkgs_for_search + [self])
raise exceptions.NoClassFound(name, packages=[self])
@property
def context(self):
return None
def _create_package_class(self):
ns_resolver = namespace_resolver.NamespaceResolver(None)
return murano_type.MuranoClass(
ns_resolver, self.name, self, utils.NO_VALUE)
def get_meta(self, context):
if not self._meta:
return []
return self._meta.get_meta(context)
def __repr__(self):
return 'MuranoPackage({name})'.format(name=self.name)
| apache-2.0 | 959,477,402,751,523,600 | -6,575,480,018,239,622,000 | 35.252336 | 78 | 0.59603 | false |
DevHugo/zds-site | zds/utils/tutorials.py | 1 | 2669 | # coding: utf-8
import os
# Used for indexing tutorials, we need to parse each manifest to know which content have been published
class GetPublished:
published_part = []
published_chapter = []
published_extract = []
def __init__(self):
pass
@classmethod
def get_published_content(cls):
# If all array are empty load_it
if not len(GetPublished.published_part) and \
not len(GetPublished.published_chapter) and \
not len(GetPublished.published_extract):
# Get all published tutorials
from zds.tutorial.models import Tutorial
tutorials_database = Tutorial.objects.filter(sha_public__isnull=False).all()
for tutorial in tutorials_database:
# Load Manifest
json = tutorial.load_json_for_public()
# Parse it
GetPublished.load_tutorial(json)
return {"parts": GetPublished.published_part,
"chapters": GetPublished.published_chapter,
"extracts": GetPublished.published_extract}
@classmethod
def load_tutorial(cls, json):
# Load parts, chapter and extract
if 'parts' in json:
for part_json in json['parts']:
# If inside of parts we have chapters, load it
GetPublished.load_chapters(part_json)
GetPublished.load_extracts(part_json)
GetPublished.published_part.append(part_json['pk'])
GetPublished.load_chapters(json)
GetPublished.load_extracts(json)
@classmethod
def load_chapters(cls, json):
if 'chapters' in json:
for chapters_json in json['chapters']:
GetPublished.published_chapter.append(chapters_json['pk'])
GetPublished.load_extracts(chapters_json)
return GetPublished.published_chapter
@classmethod
def load_extracts(cls, json):
if 'extracts' in json:
for extract_json in json['extracts']:
GetPublished.published_extract.append(extract_json['pk'])
return GetPublished.published_extract
def get_blob(tree, chemin):
for blob in tree.blobs:
try:
if os.path.abspath(blob.path) == os.path.abspath(chemin):
data = blob.data_stream.read()
return data.decode('utf-8')
except (OSError, IOError):
return ""
if len(tree.trees) > 0:
for atree in tree.trees:
result = get_blob(atree, chemin)
if result is not None:
return result
return None
else:
return None
| gpl-3.0 | 1,795,364,862,088,379,000 | 4,118,165,211,108,298,000 | 30.034884 | 103 | 0.59423 | false |
piyushroshan/xen-4.3.2 | tools/python/xen/xm/help.py | 52 | 3242 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <[email protected]>
#============================================================================
"""Variable definition and help support for Python defconfig files.
"""
import sys
class Vars:
"""A set of configuration variables.
"""
def __init__(self, name, help, env):
"""Create a variable set.
name name of the defconfig file
help help flag
env local environment
"""
self.name = name
self.help = help
self.env = env
self.vars = []
def var(self, name, use=None, check=None):
"""Define a configuration variable.
If provided, the check function will be called as check(var, val)
where var is the variable name and val is its value (string).
It should return a new value for the variable, or raise ValueError if
the value is not acceptable.
name variable name
use variable usage string
check variable check function
"""
self.vars.append(Var(name, use, check))
def check(self):
"""Execute the variable checks or print help, depending on the value
of the help flag passed to the constructor.
"""
if self.help:
self.doHelp()
else:
for v in self.vars:
v.doCheck(self.env)
def doHelp(self, out=sys.stderr):
"""Print help for the variables.
"""
if self.vars:
print >>out, "\nConfiguration variables for %s:\n" % self.name
for v in self.vars:
v.doHelp(out)
print >>out
class Var:
"""A single variable.
"""
def __init__(self, name, use, check):
"""Create a variable.
name variable name
use variable use string
check variable value check function
"""
self.name = name
self.use = use or ''
self.check = check
def doCheck(self, env):
"""Execute the check and set the variable to the new value.
"""
if not self.check: return
try:
env[self.name] = self.check(self.name, env.get(self.name))
except StandardError, ex:
raise sys.exc_type, self.name + " - " + str(ex)
def doHelp(self, out):
"""Print help for the variable.
"""
print >>out, "%-12s" % self.name, self.use
| gpl-2.0 | -5,461,120,152,358,725,000 | -8,443,955,145,466,627,000 | 31.42 | 77 | 0.55984 | false |
HPPTECH/hpp_IOSTressTest | Refer/IOST_OLD_SRC/IOST_0.18/IOST.py | 1 | 8248 | #!/usr/bin/env python
#======================================================================
#
# Project : hpp_IOStressTest
# File : IOST.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : [email protected]
# : [email protected]
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import operator
import sys
import base64
import time
# from Libs.IOST_Prepare import *
# from Libs.IOST_Config import *
# from Libs.IOST_WMain import *
# from Libs.IOST_Basic import *
sys.path.append("Libs")
sys.path.append("../Libs")
from Libs import IOST_Basic
from Libs import IOST_Config
from Libs import IOST_WMain
from Libs.IOST_WMain import *
from Libs import IOST_Prepare
import gtk
import gtk.glade
import gobject
# from Libs import *
# from Libs import *
# from Libs import *
#======================================================================
IOST_Debug_Enable = 0
#======================================================================
# argv_number = len(sys.argv)
# for i in range(0, argv_number):
# print sys.argv[i]
#======================================================================
IOST_WMAIN = "IOST_WMain"
IOST_CHIP = "Skylark"
IOST_OBJECT = "_Skylark"
IOST_CONFIG_DATA_DEFAULTE_FILE = "IOST_DataDefault.json"
IOST_CONFIG_OBJS_DEFAULTE_FILE = "IOST_ObjsDefault.json"
IOST_PROGRAM_PATH=os.path.dirname(os.path.abspath(sys.argv[0]))
if IOST_Debug_Enable:
print IOST_PROGRAM_PATH
IOST_SSH_BIN = "ssh"
IOST_TELNET_BIN = "telnet"
IOST_SHELL_BIN = os.environ["SHELL"]
IOST_CONGIG_DATA_PATH = IOST_PROGRAM_PATH + "/" + IOST_CHIP + "/" + IOST_CONFIG_DATA_DEFAULTE_FILE
if IOST_Debug_Enable:
print IOST_CONGIG_DATA_PATH
IOST_CONGIG_OBJS_PATH = IOST_PROGRAM_PATH + "/" + IOST_CHIP + "/" + IOST_CONFIG_OBJS_DEFAULTE_FILE
if IOST_Debug_Enable:
print IOST_CONGIG_OBJS_PATH
#======================================================================
class IOST(IOST_WMain):
"""
This is a main class of the program.
"""
#----------------------------------------------------------------------
def __init__(self, glade_filename = "",
window_name = "",
object_name = "",
iost_data = None,
iost_objs = None):
"The function is main function to start IOST program"
IOST_WMain.__init__(self, glade_filename, window_name, object_name, iost_data, iost_objs)
#----------------------------------------------------------------------
def IOST_Main(self):
gtk.main()
#======================================================================
# MAIN FUNCTION
#======================================================================
if __name__ == "__main__":
"The main function of IOST"
IOST_Config=IOST_Config()
#-------------------------------------------------------------------------
IOST_Config.IOST_Data = IOST_Config.ReadFile(file_name=IOST_CONGIG_DATA_PATH)
#-------------------------------------------------------------------------
IOST_Config.IOST_Objs = IOST_Config.ReadFile(file_name=IOST_CONGIG_OBJS_PATH)
IOST_Config.IOST_Data["GladeFileName"] = IOST_PROGRAM_PATH + "/" + IOST_CHIP+ '/'+ IOST_Config.IOST_Data["GladeFileName"] + '_'+ IOST_Config.IOST_Data["ProjectVersion"] + '.glade'
# print IOST_Config.IOST_Data["GladeFileName"]
# print "=================================================================="
# pprint (IOST_Config.IOST_Data.keys())
# print "=================================================================="
# pprint (IOST_Config.IOST_Objs["IOST_WMain"].keys())
# print "=================================================================="
argv_number = len(sys.argv)
if IOST_Debug_Enable:
print "=================================================================="
print "Number of arg have entered is : ", argv_number
for i in range(0, argv_number):
print "========== argv[%s] = : " %(i, sys.argv[i])
#Add config file to a Files list
for i in range(1, argv_number):
# print i
# IOST_Config.IOST_Files.append(sys.argv[1]+'/'+sys.argv[i])
if os.path.isfile(sys.argv[i]):
IOST_Config.AddFileConfig2List(IOST_Config.IOST_Files, sys.argv[i])
else:
IOST_Config.AddFileConfig2List(IOST_Config.IOST_Files, IOST_PROGRAM_PATH +'/'+sys.argv[i])
# Print to debug name of all file config have inputed
if IOST_Debug_Enable:
print "=========================The list config files have entered==========================="
print "Number of config Files is %s" % (len (IOST_Config.IOST_Files))
print "Number of config files is: "
for i in range(0, len (IOST_Config.IOST_Files)):
pprint (IOST_Config.IOST_Files[i])
#Read file and store in Files Dist type at location (2n+1)
if argv_number > 1:
IOST_Config.AddObjConfig2List(IOST_Config.IOST_Files)
if IOST_Debug_Enable:
for i in range(0, len (IOST_Config.IOST_Files)):
print "================================= %s =================================" %i
pprint (IOST_Config.IOST_Files[i])
for i in range(0, len (IOST_Config.IOST_Files), 2):
IOST_Config.ModifyIOST_Objs(IOST_Config.IOST_Data, IOST_Config.IOST_Files[i+1] )
if IOST_Debug_Enable:
print "IOST_Config.IOST_Data is : "
pprint (IOST_Config.IOST_Data)
print "IOST_Config.IOST_Data['I2C0'] is : "
pprint (IOST_Config.IOST_Data["I2C0"])
IOST_Config.IOST_Data["IOST_Path"] = IOST_PROGRAM_PATH
IOST_Config.IOST_Data["IOST_RunPath"] = os.getcwd()
IOST_Config.IOST_Data["ConfigFile"]["CfgDataPath"] = IOST_CONGIG_DATA_PATH
IOST_Config.IOST_Data["ConfigFile"]["CfgObjsPath"] = IOST_CONGIG_OBJS_PATH
#-------------------------------------------------------------------------
IOST_Config.WriteFile(IOST_PROGRAM_PATH+"/Temp_Configs/Config_Data.json", IOST_Config.IOST_Data)
IOST_Config.WriteFile(IOST_PROGRAM_PATH+"/Temp_Configs/Config_Objects.json", IOST_Config.IOST_Objs)
# Some debug code here
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
if False:
for key, value in IOST_Config.IOST_Objs["IOST_WSetupTestcase"].iteritems():
print key, value
if False:
len__temp = len(IOST_Config.IOST_Objs["IOST_WSetupTestcase"])
print "=============================================="
print "Len of IOST_WSetupTestcase object is", len__temp
print "=============================================="
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()
for i in range(0, len__temp, 2 ):
print "=============================================="
print i
print "----------------------------------------------"
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()[i]
print "----------------------------------------------"
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()[i+1]
print "----------------------------------------------"
print IOST_Config.IOST_Objs["IOST_WSetupTestcase"][IOST_Config.IOST_Objs["IOST_WSetupTestcase"].keys()[(i+1)]]
if False:
exit(1)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#-------------------------------------------------------------------------
main = IOST(glade_filename=IOST_Config.IOST_Data["GladeFileName"],
window_name=IOST_WMAIN,
object_name=IOST_OBJECT,
iost_data=IOST_Config.IOST_Data, iost_objs=IOST_Config.IOST_Objs)
main.IOST_Main()
| mit | 4,420,168,346,719,087,600 | 4,961,571,924,577,833,000 | 37.90566 | 183 | 0.466537 | false |
LUTAN/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 53 | 4430 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
def cnn_model(features, target):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
target = tf.one_hot(target, 15, 1, 0)
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | -7,717,353,867,816,155,000 | 7,456,578,337,570,613,000 | 33.609375 | 80 | 0.702257 | false |
gkarlin/django-jenkins | build/pylint/reporters/html.py | 5 | 2481 | # Copyright (c) 2003-2006 Sylvain Thenault ([email protected]).
# Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""HTML reporter"""
import sys
from cgi import escape
from logilab.common.ureports import HTMLWriter, Section, Table
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
class HTMLReporter(BaseReporter):
"""report messages and layouts in HTML"""
__implements__ = IReporter
extension = 'html'
def __init__(self, output=sys.stdout):
BaseReporter.__init__(self, output)
self.msgs = []
def add_message(self, msg_id, location, msg):
"""manage message of different type and in the context of path"""
module, obj, line, col_offset = location[1:]
sigle = self.make_sigle(msg_id)
self.msgs += [sigle, module, obj, str(line), str(col_offset), escape(msg)]
def set_output(self, output=None):
"""set output stream
messages buffered for old output is processed first"""
if self.out and self.msgs:
self._display(Section())
BaseReporter.set_output(self, output)
def _display(self, layout):
"""launch layouts display
overridden from BaseReporter to add insert the messages section
(in add_message, message is not displayed, just collected so it
can be displayed in an html table)
"""
if self.msgs:
# add stored messages to the layout
msgs = ['type', 'module', 'object', 'line', 'col_offset', 'message']
msgs += self.msgs
sect = Section('Messages')
layout.append(sect)
sect.append(Table(cols=6, children=msgs, rheaders=1))
self.msgs = []
HTMLWriter().format(layout, self.out)
| lgpl-3.0 | 395,316,471,220,725,600 | -7,194,137,870,837,780,000 | 36.590909 | 82 | 0.667473 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.