gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
## Elements
### `<document>`
### `<line>`
### `<span>`
attributes:
- `width`:
if not supplied, the span will be limited only by the width of the
containing element.
- `align`:
one of `left`, `right`, `centerLeft`, `centerRight` or `center`. `center`
is shorthand for `centerLeft`. Defaults to `left`. If width is not
specified and cannot be determined from the context, the content width is
used and this attribute has no effect.
### `<bold>`
"""
import re
def _compress_whitespace(xml):
""" Replace all sequences of whitespace characters in an xml etree with a
single space.
To add multiple spaces use a fixed width `span` or set the alignment of the
containing element.
"""
for elem in xml.iter():
if elem.text is not None:
elem.text = re.sub('\s+', ' ', elem.text)
if elem.tail is not None:
elem.tail = re.sub('\s+', ' ', elem.tail)
def _strip_outer_whitespace(xml):
""" Removes whitespace immediately after opening tags and immediately
before closing tags.
Intended to make it safer to do pretty printing without affecting the
printers output.
"""
for elem in xml.iter():
if elem.text is not None:
# if there are child tags, trailing whitespace will be attached to
# the tail of the last child rather than `elem.text` so only strip
# the leading whitespace from `elem.text`.
if len(elem.getchildren()):
elem.text = elem.text.lstrip()
else:
elem.text = elem.text.strip()
if elem.text == '':
elem.text = None
if elem.tail is not None:
if elem.getnext() is None:
elem.tail = elem.tail.rstrip()
if elem.text == '':
elem.text = None
class _XMLRenderer(object):
def __init__(self, source, *, max_width=None, prelude=True):
self._source = source
self._max_width = max_width
self._prelude = prelude
self._bold_stack = 0
self._highlight_stack = 0
try:
import lxml
assert lxml
except ImportError as e:
raise ImportError(
"lxml not installed. "
"Please install python-linemode with the XMLRenderer flag."
) from e
# TODO this seems funky
self._generator = self._render()
def _body_width(self, elem, *, max_width=None):
width = len(elem.text or '')
if max_width is not None:
if width > max_width:
return max_width
for child in elem.getchildren():
width += self._element_width(
child, max_width=max_width - width
)
width += len(child.tail or '')
if width > max_width:
return max_width
else:
for child in elem.getchildren():
width += self._element_width(child)
width += len(child.tail or '')
return width
def _span_width(self, elem, *, max_width=None):
if 'width' in elem.attrib:
width = int(elem.attrib['width'])
if max_width is not None:
width = min(width, max_width)
return width
# TODO right to left filling
alignment = elem.attrib.get('align', 'left')
if alignment in {'right', 'centerLeft', 'centerRight', 'center'}:
if max_width is not None:
return max_width
return self._body_width(elem, max_width=max_width)
def _element_width(self, elem, *, max_width=None):
if elem.tag in {'span', 'bold', 'highlight'}:
width = self._span_width(elem, max_width=max_width)
else:
raise Exception('unknown element', elem)
if max_width is not None:
assert width <= max_width
return width
def _render_bold(self, elem, *, max_width=None):
self._bold_stack += 1
if self._bold_stack == 1:
yield ('select-bold')
yield from self._render_body(elem, max_width=max_width)
self._bold_stack -= 1
if self._bold_stack == 0:
yield ('cancel-bold')
def _render_highlight(self, elem, *, max_width=None):
self._highlight_stack += 1
if self._highlight_stack == 1:
yield ('select-highlight')
yield from self._render_body(elem, max_width=max_width)
self._highlight_stack -= 1
if self._highlight_stack == 0:
yield ('cancel-highlight')
def _render_span(self, elem, *, max_width=None):
width = self._span_width(elem, max_width=max_width)
body_width = self._body_width(elem, max_width=width)
if body_width >= width:
# no point in trying to justify text that overflows. Just align
# left and truncate rather than trying to truncate at the start
alignment = 'left'
else:
alignment = elem.attrib.get('align', 'left')
if alignment == 'left':
left_padding = 0
elif alignment == 'right':
left_padding = width - body_width
elif alignment == 'centerLeft' or alignment == 'center':
left_padding = (width - body_width) // 2
elif alignment == 'centerRight':
left_padding = int(round((width - body_width) / 2))
if left_padding > 0:
yield ('write', ' ' * left_padding)
yield from self._render_body(elem, max_width=width)
right_padding = width - body_width - left_padding
if right_padding > 0:
yield ('write', ' ' * right_padding)
def _render_body(self, elem, *, max_width=None):
if max_width is None:
max_width = self._body_width(elem)
children = elem.getchildren()
if elem.text is not None and len(elem.text):
yield ('write', elem.text[:max_width])
max_width -= len(elem.text)
if max_width <= 0:
return
for child in children:
yield from self._render_element(child, max_width=max_width)
max_width -= self._element_width(child, max_width=max_width)
assert max_width >= 0
if max_width == 0:
return
if child.tail is not None and len(child.tail):
yield ('write', child.tail[:max_width])
max_width -= len(child.tail)
if max_width <= 0:
return
def _render_element(self, elem, *, max_width=None):
yield from {
'span': self._render_span,
'bold': self._render_bold,
'highlight': self._render_highlight,
}[elem.tag](elem, max_width=max_width)
def _render(self):
# imported here as lxml is an `extras_require` dependency
from lxml import etree
xml = etree.fromstring(self._source)
_strip_outer_whitespace(xml)
_compress_whitespace(xml)
if self._prelude:
yield ('reset')
for line in xml.getchildren():
height = int(line.attrib.get('height', '1'))
if height != 1:
yield {
2: ('fontsize-medium'),
3: ('fontsize-large'),
}[height]
yield from self._render_body(line, max_width=self._max_width)
yield ('newline')
if height != 1:
yield ('fontsize-small')
# TODO better name for flag
if self._prelude:
yield ('cut-through')
def __iter__(self):
return self
def __next__(self):
return next(self._generator)
def render(source, *, max_width=None, prelude=True):
return _XMLRenderer(source, max_width=max_width, prelude=prelude)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo_utils import uuidutils
import six
from neutron._i18n import _
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron.common import utils as common_utils
from neutron.ipam import exceptions as ipam_exc
@six.add_metaclass(abc.ABCMeta)
class SubnetPool(object):
"""Represents a pool of IPs available inside an address scope."""
@six.add_metaclass(abc.ABCMeta)
class SubnetRequest(object):
"""Carries the data needed to make a subnet request
The data validated and carried by an instance of this class is the data
that is common to any type of request. This class shouldn't be
instantiated on its own. Rather, a subclass of this class should be used.
"""
def __init__(self, tenant_id, subnet_id,
gateway_ip=None, allocation_pools=None):
"""Initialize and validate
:param tenant_id: The tenant id who will own the subnet
:type tenant_id: str uuid
:param subnet_id: Neutron's subnet ID
:type subnet_id: str uuid
:param gateway_ip: An IP to reserve for the subnet gateway.
:type gateway_ip: None or convertible to netaddr.IPAddress
:param allocation_pools: The pool from which IPAM should allocate
addresses. The allocator *may* allow allocating addresses outside
of this range if specifically requested.
:type allocation_pools: A list of netaddr.IPRange. None if not
specified.
"""
self._tenant_id = tenant_id
self._subnet_id = subnet_id
self._gateway_ip = None
self._allocation_pools = None
if gateway_ip is not None:
self._gateway_ip = netaddr.IPAddress(gateway_ip)
if allocation_pools is not None:
allocation_pools = sorted(allocation_pools)
previous = None
for pool in allocation_pools:
if not isinstance(pool, netaddr.ip.IPRange):
raise TypeError(_("Ranges must be netaddr.IPRange"))
if previous and pool.first <= previous.last:
raise ValueError(_("Ranges must not overlap"))
previous = pool
if 1 < len(allocation_pools):
# Checks that all the ranges are in the same IP version.
# IPRange sorts first by ip version so we can get by with just
# checking the first and the last range having sorted them
# above.
first_version = allocation_pools[0].version
last_version = allocation_pools[-1].version
if first_version != last_version:
raise ValueError(_("Ranges must be in the same IP "
"version"))
self._allocation_pools = allocation_pools
if self.gateway_ip and self.allocation_pools:
if self.gateway_ip.version != self.allocation_pools[0].version:
raise ValueError(_("Gateway IP version inconsistent with "
"allocation pool version"))
@property
def tenant_id(self):
return self._tenant_id
@property
def subnet_id(self):
return self._subnet_id
@property
def gateway_ip(self):
return self._gateway_ip
@property
def allocation_pools(self):
return self._allocation_pools
def _validate_with_subnet(self, subnet_cidr):
if self.allocation_pools:
if subnet_cidr.version != self.allocation_pools[0].version:
raise ipam_exc.IpamValueInvalid(_(
"allocation_pools use the wrong ip version"))
for pool in self.allocation_pools:
if pool not in subnet_cidr:
raise ipam_exc.IpamValueInvalid(_(
"allocation_pools are not in the subnet"))
class AnySubnetRequest(SubnetRequest):
"""A template for allocating an unspecified subnet from IPAM
Support for this type of request in a driver is optional. For example, the
initial reference implementation will not support this. The API has no way
of creating a subnet without a specific address until subnet-allocation is
implemented.
"""
WILDCARDS = {constants.IPv4: '0.0.0.0',
constants.IPv6: '::'}
def __init__(self, tenant_id, subnet_id, version, prefixlen,
gateway_ip=None, allocation_pools=None):
"""
:param version: Either constants.IPv4 or constants.IPv6
:param prefixlen: The prefix len requested. Must be within the min and
max allowed.
:type prefixlen: int
"""
super(AnySubnetRequest, self).__init__(
tenant_id=tenant_id,
subnet_id=subnet_id,
gateway_ip=gateway_ip,
allocation_pools=allocation_pools)
net = netaddr.IPNetwork(self.WILDCARDS[version] + '/' + str(prefixlen))
self._validate_with_subnet(net)
self._prefixlen = prefixlen
@property
def prefixlen(self):
return self._prefixlen
class SpecificSubnetRequest(SubnetRequest):
"""A template for allocating a specified subnet from IPAM
The initial reference implementation will probably just allow any
allocation, even overlapping ones. This can be expanded on by future
blueprints.
"""
def __init__(self, tenant_id, subnet_id, subnet_cidr,
gateway_ip=None, allocation_pools=None):
"""
:param subnet: The subnet requested. Can be IPv4 or IPv6. However,
when IPAM tries to fulfill this request, the IP version must match
the version of the address scope being used.
:type subnet: netaddr.IPNetwork or convertible to one
"""
super(SpecificSubnetRequest, self).__init__(
tenant_id=tenant_id,
subnet_id=subnet_id,
gateway_ip=gateway_ip,
allocation_pools=allocation_pools)
self._subnet_cidr = netaddr.IPNetwork(subnet_cidr)
self._validate_with_subnet(self._subnet_cidr)
@property
def subnet_cidr(self):
return self._subnet_cidr
@property
def prefixlen(self):
return self._subnet_cidr.prefixlen
@six.add_metaclass(abc.ABCMeta)
class AddressRequest(object):
"""Abstract base class for address requests"""
class SpecificAddressRequest(AddressRequest):
"""For requesting a specified address from IPAM"""
def __init__(self, address):
"""
:param address: The address being requested
:type address: A netaddr.IPAddress or convertible to one.
"""
super(SpecificAddressRequest, self).__init__()
self._address = netaddr.IPAddress(address)
@property
def address(self):
return self._address
class AnyAddressRequest(AddressRequest):
"""Used to request any available address from the pool."""
class AutomaticAddressRequest(SpecificAddressRequest):
"""Used to create auto generated addresses, such as EUI64"""
EUI64 = 'eui64'
def _generate_eui64_address(self, **kwargs):
if set(kwargs) != set(['prefix', 'mac']):
raise ipam_exc.AddressCalculationFailure(
address_type='eui-64',
reason=_('must provide exactly 2 arguments - cidr and MAC'))
prefix = kwargs['prefix']
mac_address = kwargs['mac']
return ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac_address)
_address_generators = {EUI64: _generate_eui64_address}
def __init__(self, address_type=EUI64, **kwargs):
"""
This constructor builds an automatic IP address. Parameter needed for
generating it can be passed as optional keyword arguments.
:param address_type: the type of address to generate.
It could be an eui-64 address, a random IPv6 address, or
an ipv4 link-local address.
For the Kilo release only eui-64 addresses will be supported.
"""
address_generator = self._address_generators.get(address_type)
if not address_generator:
raise ipam_exc.InvalidAddressType(address_type=address_type)
address = address_generator(self, **kwargs)
super(AutomaticAddressRequest, self).__init__(address)
class RouterGatewayAddressRequest(AddressRequest):
"""Used to request allocating the special router gateway address."""
class AddressRequestFactory(object):
"""Builds request using ip info
Additional parameters(port and context) are not used in default
implementation, but planned to be used in sub-classes
provided by specific ipam driver,
"""
@classmethod
def get_request(cls, context, port, ip_dict):
"""
:param context: context (not used here, but can be used in sub-classes)
:param port: port dict (not used here, but can be used in sub-classes)
:param ip_dict: dict that can contain 'ip_address', 'mac' and
'subnet_cidr' keys. Request to generate is selected depending on
this ip_dict keys.
:return: returns prepared AddressRequest (specific or any)
"""
if ip_dict.get('ip_address'):
return SpecificAddressRequest(ip_dict['ip_address'])
elif ip_dict.get('eui64_address'):
return AutomaticAddressRequest(prefix=ip_dict['subnet_cidr'],
mac=ip_dict['mac'])
else:
return AnyAddressRequest()
class SubnetRequestFactory(object):
"""Builds request using subnet info"""
@classmethod
def get_request(cls, context, subnet, subnetpool):
cidr = subnet.get('cidr')
subnet_id = subnet.get('id', uuidutils.generate_uuid())
is_any_subnetpool_request = not attributes.is_attr_set(cidr)
if is_any_subnetpool_request:
prefixlen = subnet['prefixlen']
if not attributes.is_attr_set(prefixlen):
prefixlen = int(subnetpool['default_prefixlen'])
return AnySubnetRequest(
subnet['tenant_id'],
subnet_id,
common_utils.ip_version_from_int(subnetpool['ip_version']),
prefixlen)
else:
return SpecificSubnetRequest(subnet['tenant_id'],
subnet_id,
cidr,
subnet.get('gateway_ip'),
subnet.get('allocation_pools'))
|
|
from collections import OrderedDict
def sorted_values_by_key(d):
return [d[k] for k in sorted(d)]
class OrderedSet:
def __init__(self):
self.d = OrderedDict()
def add(self, x):
if x not in self.d:
self.d[x] = None
def update(self, xs):
for x in xs:
if x not in self.d:
self.d[x] = None
def __len__(self):
return len(self.d)
def __iter__(self):
for x in self.d.keys():
yield x
def __contains__(self, item):
return item in self.d
class Scope:
"""A scope is used to help assign unique readable names to addressable objects.
A scope keeps references to modules, hyperparameters, inputs, and outputs.
"""
def __init__(self):
self.name_to_elem = OrderedDict()
self.elem_to_name = OrderedDict()
def register(self, name, elem):
"""Registers an addressable object with the desired name.
The name cannot exist in the scope, otherwise asserts ``False``.
Args:
name (str): Unique name.
elem (deep_architect.core.Addressable): Addressable object to register.
"""
assert name not in self.name_to_elem
assert isinstance(elem, Addressable)
self.name_to_elem[name] = elem
self.elem_to_name[elem] = name
def get_unused_name(self, prefix):
"""Creates a unique name by adding a numbered suffix to the prefix.
Args:
prefix (str): Prefix of the desired name.
Returns:
str: Unique name in the current scope.
"""
i = 0
while True:
name = prefix + str(i)
if name not in self.name_to_elem:
break
i += 1
return name
def get_name(self, elem):
"""Get the name of the addressable object registered in the scope.
The object must exist in the scope.
Args:
elem (deep_architect.core.Addressable): Addressable object
registered in the scope.
Returns:
str: Name with which the object was registered in the scope.
"""
return self.elem_to_name[elem]
def get_elem(self, name):
"""Get the object that is registered in the scope with the desired name.
The name must exist in the scope.
Args:
name (str): Name of the addressable object registered in the scope.
Returns:
str: Addressable object with the corresponding name.
"""
return self.name_to_elem[name]
@staticmethod
def reset_default_scope():
"""Replaces the current default scope with a new empty scope."""
Scope.default_scope = Scope()
# NOTE: is this called once for each time core is imported?
# TODO: check.
Scope.default_scope = Scope()
class Addressable:
"""Base class for classes whose objects have to be registered in a scope.
Provides functionality to register objects in a scope.
Args:
scope (deep_architect.core.Scope): Scope object where the addressable
object will be registered.
name (str): Unique name used to register the addressable object.
"""
def __init__(self, scope, name):
scope.register(name, self)
self.scope = scope
def __repr__(self):
return self.get_name()
def get_name(self):
"""Get the name with which the object was registered in the scope.
Returns:
str: Unique name used to register the object.
"""
return self.scope.get_name(self)
def _get_base_name(self):
"""Get the class name.
Useful to create unique names for an addressable object.
Returns:
str: Class name.
"""
return self.__class__.__name__
class Hyperparameter(Addressable):
"""Base hyperparameter class.
Specific hyperparameter types are created by inheriting from this class.
Hyperparameters keep references to the modules that are dependent on them.
.. note::
Hyperparameters with easily serializable values are preferred due to the
interaction with the search logging and multi-GPU functionalities.
Typical valid serializable types are integers, floats, strings. Lists
and dictionaries of serializable types are also valid.
Args:
scope (deep_architect.core.Scope, optional): Scope in which the hyperparameter
will be registered. If none is given, uses the default scope.
name (str, optional): Name used to derive an unique name for the
hyperparameter. If none is given, uses the class name to derive
the name.
"""
def __init__(self, scope=None, name=None):
scope = scope if scope is not None else Scope.default_scope
name = scope.get_unused_name('.'.join(
['H', (name if name is not None else self._get_base_name()) + '-']))
Addressable.__init__(self, scope, name)
self.assign_done = False
self.modules = OrderedSet()
self.dependent_hyperps = OrderedSet()
self.val = None
def has_value_assigned(self):
"""Checks if the hyperparameter has been assigned a value.
Returns:
bool: ``True`` if the hyperparameter has been assigned a value.
"""
return self.assign_done
def assign_value(self, val):
"""Assigns a value to the hyperparameter.
The hyperparameter value must be valid for the hyperparameter in question.
The hyperparameter becomes set if the call is successful.
Args:
val (object): Value to assign to the hyperparameter.
"""
assert not self.assign_done
self._check_value(val)
self.assign_done = True
self.val = val
# calls update on the dependent modules to signal that this hyperparameter
# has been set, and trigger any relevant local changes.
for m in self.modules:
m._update()
# calls updates on the dependent hyperparameters.
for h in self.dependent_hyperps:
if not h.assign_done:
h._update()
def get_value(self):
"""Get the value assigned to the hyperparameter.
The hyperparameter must have already been assigned a value, otherwise
asserts ``False``.
Returns:
object: Value assigned to the hyperparameter.
"""
assert self.assign_done
return self.val
def _register_module(self, module):
"""Registers a module as being dependent of this hyperparameter.
Args:
module (deep_architect.core.Module): Module dependent of this hyperparameter.
"""
self.modules.add(module)
def _register_dependent_hyperparameter(self, hyperp):
"""Registers an hyperparameter as being dependent on this hyperparameter.
Args:
module (deep_architect.core.Hyperparameter): Hyperparameter dependent of this hyperparameter.
"""
# NOTE: for now, it is odd to register the same hyperparameter multiple times.
assert hyperp not in self.dependent_hyperps
assert isinstance(hyperp, DependentHyperparameter)
self.dependent_hyperps.add(hyperp)
def _check_value(self, val):
"""Checks if the value is valid for the hyperparameter.
When ``set_val`` is called, this function is called to verify the
validity of ``val``. This function is useful for error checking.
"""
raise NotImplementedError
class DependentHyperparameter(Hyperparameter):
"""Hyperparameter that depends on other hyperparameters.
The value of a dependent hyperparameter is set by a calling a function
using the values of the dependent hyperparameters as arguments.
This hyperparameter is convenient when we want to express search spaces where
the values of some hyperparameters are computed as a function of the
values of some other hyperparameters, rather than set independently.
Args:
fn (dict[str, object] -> object): Function used to compute the value of the
hyperparameter based on the values of the dependent hyperparameters.
hyperps (dict[str, deep_architect.core.Hyperparameter]): Dictionary mapping
names to hyperparameters. The names used in the dictionary should
correspond to the names of the arguments of ``fn``.
scope (deep_architect.core.Scope, optional): The scope in which to register the
hyperparameter in.
name (str, optional): Name from which the name of the hyperparameter
in the scope is derived.
"""
def __init__(self, fn, hyperps, scope=None, name=None):
Hyperparameter.__init__(self, scope, name)
# NOTE: this assert may or may not be necessary.
# assert isinstance(hyperps, OrderedDict)
self._hyperps = OrderedDict([(k, hyperps[k]) for k in sorted(hyperps)])
self._fn = fn
# registering the dependencies.
for h in self._hyperps.values():
h._register_dependent_hyperparameter(self)
self._update()
def _update(self):
"""Checks if the hyperparameter is ready to be set, and sets it if that
is the case.
"""
# assert not self.has_value_assigned()
if all(h.has_value_assigned() for h in self._hyperps.values()):
dh = {name: h.get_value() for name, h in self._hyperps.items()}
v = self._fn(dh)
self.assign_value(v)
def _check_value(self, val):
pass
class Input(Addressable):
"""Manages input connections.
Inputs may be connected to a single output. Inputs and outputs are associated
to a single module.
See also: :class:`deep_architect.core.Output` and :class:`deep_architect.core.Module`.
Args:
module (deep_architect.core.Module): Module with which the input object
is associated to.
scope (deep_architect.core.Scope): Scope object where the input is
going to be registered in.
name (str): Unique name with which to register the input object.
"""
def __init__(self, module, scope, name):
name = '.'.join([module.get_name(), 'I', name])
Addressable.__init__(self, scope, name)
self.module = module
self.from_output = None
def is_connected(self):
"""Checks if the input is connected.
Returns:
bool: ``True`` if the input is connected.
"""
return self.from_output is not None
def get_connected_output(self):
"""Get the output to which the input is connected to.
Returns:
deep_architect.core.Output: Output to which the input is connected to.
"""
return self.from_output
def get_module(self):
"""Get the module with which the input is associated with.
Returns:
deep_architect.core.Module: Module with which the input is associated with.
"""
return self.module
def connect(self, from_output):
"""Connect an output to this input.
Changes the state of both the input and the output. Asserts ``False`` if
the input is already connected.
Args:
from_output (deep_architect.core.Output): Output to connect to this input.
"""
assert isinstance(from_output, Output)
assert self.from_output is None
self.from_output = from_output
from_output.to_inputs.append(self)
def disconnect(self):
"""Disconnects the input from the output it is connected to.
Changes the state of both the input and the output. Asserts ``False`` if
the input is not connected.
"""
assert self.from_output is not None
self.from_output.to_inputs.remove(self)
self.from_output = None
def reroute_connected_output(self, to_input):
"""Disconnects the input from the output it is connected to and connects
the output to a new input, leaving this input in a disconnected state.
Changes the state of both this input, the other input, and the output
to which this input is connected to.
.. note::
Rerouting operations are widely used in
:class:`deep_architect.modules.SubstitutionModule`. See also:
:meth:`deep_architect.core.Output.reroute_all_connected_inputs`.
Args:
to_input (deep_architect.core.Input): Input to which the output
is going to be connected to.
"""
assert isinstance(to_input, Input)
old_ox = self.from_output
self.disconnect()
old_ox.connect(to_input)
class Output(Addressable):
"""Manages output connections.
Outputs may be connected to multiple inputs. Inputs and outputs are associated
to a single module.
See also: :class:`deep_architect.core.Input` and :class:`deep_architect.core.Module`.
Args:
module (deep_architect.core.Module): Module with which the output object
is associated to.
scope (deep_architect.core.Scope): Scope object where the output is
going to be registered in.
name (str): Unique name with which to register the output object.
"""
def __init__(self, module, scope, name):
name = '.'.join([module.get_name(), 'O', name])
Addressable.__init__(self, scope, name)
self.module = module
self.to_inputs = []
def is_connected(self):
"""Checks if the output is connected.
Returns:
bool: ``True`` if the output is connected.
"""
return len(self.to_inputs) > 0
def get_connected_inputs(self):
"""Get the list of inputs to which is the output is connected to.
Returns:
list[deep_architect.core.Input]: List of the inputs to which the
output is connect to.
"""
return self.to_inputs
def get_module(self):
"""Get the module object with which the output is associated with.
Returns:
deep_architect.core.Module: Module object with which the output is
associated with.
"""
return self.module
def connect(self, to_input):
"""Connect an additional input to this output.
Changes the state of both the input and the output.
Args:
to_input (deep_architect.core.Input): Input to connect to this output.
"""
to_input.connect(self)
def disconnect_all(self):
"""Disconnects all the inputs connected to this output.
Changes the state of the output and all the inputs connected to it.
"""
to_inputs = list(self.to_inputs)
for ix in to_inputs:
ix.disconnect()
def reroute_all_connected_inputs(self, from_output):
"""Reroutes all the inputs to which the output is connected to a
different output.
.. note::
Rerouting operations are widely used in
:class:`deep_architect.modules.SubstitutionModule`. See also:
:meth:`deep_architect.core.Input.reroute_connected_output`.
Args:
from_output (deep_architect.core.Output): Output to which the
connected inputs are going to be rerouted to.
"""
to_inputs = list(self.to_inputs)
for ix in to_inputs:
ix.disconnect()
ix.connect(from_output)
class Module(Addressable):
"""Modules inputs and outputs, and depend on hyperparameters.
Modules are some of the main components used to define search spaces.
The inputs, outputs, and hyperparameters have names local to the module.
These names are different than the ones used in the scope in which
these objects are registered in.
Search spaces based on modules are very general. They can be used
across deep learning frameworks, and even for purposes that do not involve
deep learning, e.g., searching over scikit-learn pipelines. The main
operations to understand are compile and forward.
Args:
scope (deep_architect.core.Scope, optional): Scope object where the
module is going to be registered in.
name (str, optional): Unique name with which to register the module.
"""
def __init__(self, scope=None, name=None):
scope = scope if scope is not None else Scope.default_scope
name = scope.get_unused_name('.'.join(
['M', (name if name is not None else self._get_base_name()) + '-']))
Addressable.__init__(self, scope, name)
self.inputs = OrderedDict()
self.outputs = OrderedDict()
self.hyperps = OrderedDict()
self._is_compiled = False
def _register_input(self, name):
"""Creates a new input with the chosen local name.
Args:
name (str): Local name given to the input.
"""
assert name not in self.inputs
self.inputs[name] = Input(self, self.scope, name)
def _register_output(self, name):
"""Creates a new output with the chosen local name.
Args:
name (str): Local name given to the output.
"""
assert name not in self.outputs
self.outputs[name] = Output(self, self.scope, name)
def _register_hyperparameter(self, name, h):
"""Registers an hyperparameter that the module depends on.
Args:
name (str): Local name to give to the hyperparameter.
h (deep_architect.core.Hyperparameter): Hyperparameter that the
module depends on.
"""
assert isinstance(h, Hyperparameter) and name not in self.hyperps
self.hyperps[name] = h
h._register_module(self)
def _register(self, input_names, output_names, name_to_hyperp):
"""Registers inputs, outputs, and hyperparameters locally for the module.
This function is convenient to avoid code repetition when registering
multiple inputs, outputs, and hyperparameters.
Args:
input_names (list[str]): List of inputs names of the module.
output_names (list[str]): List of the output names of the module.
name_to_hyperp (dict[str, deep_architect.core.Hyperparameter]):
Dictionary of names of hyperparameters to hyperparameters.
"""
for name in input_names:
self._register_input(name)
for name in output_names:
self._register_output(name)
for name in sorted(name_to_hyperp):
self._register_hyperparameter(name, name_to_hyperp[name])
def _get_input_values(self):
"""Get the values associated to the inputs of the module.
This function is used to implement forward. See also:
:meth:`_set_output_values` and :func:`forward`.
Returns:
dict[str, object]: Dictionary of local input names to their corresponding values.
"""
return {name: ix.val for name, ix in self.inputs.items()}
def _get_hyperp_values(self):
"""Get the values of the hyperparameters.
Returns:
dict[str, object]:
Dictionary of local hyperparameter names to their corresponding values.
"""
return {name: h.get_value() for name, h in self.hyperps.items()}
def _set_output_values(self, output_name_to_val):
"""Set the values of the outputs of the module.
This function is used to implement forward. See also:
:meth:`_get_input_values` and :func:`forward`.
Args:
output_name_to_val (dict[str, object]): Dictionary of local output
names to the corresponding values to assign to those outputs.
"""
for name, val in output_name_to_val.items():
self.outputs[name].val = val
def get_io(self):
"""
Returns:
(dict[str,deep_architect.core.Input], dict[str,deep_architect.core.Output]):
Pair with dictionaries mapping
the local input and output names to their corresponding
input and output objects.
"""
return self.inputs, self.outputs
def get_hyperps(self):
"""
Returns:
dict[str, deep_architect.core.Hyperparameter]:
Dictionary of local hyperparameter names to the corresponding
hyperparameter objects.
"""
return self.hyperps
def _update(self):
"""Called when an hyperparameter that the module depends on is set."""
# raise NotImplementedError
pass
def _compile(self):
"""Compile operation for the module.
Called once when all the hyperparameters that the module depends on,
and the other hyperparameters of the search space are specified.
See also: :meth:`_forward`.
"""
raise NotImplementedError
def _forward(self):
"""Forward operation for the module.
Called once the compile operation has been called. See also: :meth:`_compile`.
"""
raise NotImplementedError
def forward(self):
"""The forward computation done by the module is decomposed into
:meth:`_compile` and :meth:`_forward`.
Compile can be thought as creating the parameters of the module (done
once). Forward can be thought as using the parameters of the module to
do the specific computation implemented by the module on some specific
data (done multiple times).
This function can only called after the module and the other modules in
the search space are fully specified. See also: :func:`forward`.
"""
if not self._is_compiled:
self._compile()
self._is_compiled = True
self._forward()
def extract_unique_modules(input_or_output_lst):
"""Get the modules associated to the inputs and outputs in the list.
Each module appears appear only once in the resulting list of modules.
Args:
input_or_output_lst (list[deep_architect.core.Input or deep_architect.core.Output]):
List of inputs or outputs from which to extract the associated modules.
Returns:
list[deep_architect.core.Module]:
Unique modules to which the inputs and outputs in the list belong to.
"""
ms = OrderedSet()
for x in input_or_output_lst:
assert isinstance(x, (Input, Output))
ms.add(x.get_module())
return list(ms)
# assumes that the inputs provided are sufficient to evaluate all the network.
# TODO: add the more general functionality that allows us to compute the sequence
# of forward operations for a subgraph of the full computational graph.
def determine_module_eval_seq(inputs):
"""Computes the module forward evaluation sequence necessary to evaluate
the computational graph starting from the provided inputs.
The computational graph is a directed acyclic graph. This function sorts
the modules topologically based on their dependencies. It is assumed that
the inputs in the dictionary provided are sufficient to compute forward for all
modules in the graph. See also: :func:`forward`.
Args:
inputs (dict[str, deep_architect.core.Input]): dictionary of inputs sufficient
to compute the forward computation of the whole graph through propagation.
Returns:
list[deep_architect.core.Module]:
List of modules ordered in a way that allows to call forward on the
modules in that order.
"""
module_seq = []
module_memo = set()
input_memo = set(inputs.values())
ms = extract_unique_modules(list(inputs.values()))
for m in ms:
if m not in module_memo and all(
ix in input_memo for ix in m.inputs.values()):
module_seq.append(m)
module_memo.add(m)
for ox in m.outputs.values():
ix_lst = ox.get_connected_inputs()
input_memo.update(ix_lst)
m_lst = [ix.get_module() for ix in ix_lst]
ms.extend(m_lst)
return module_seq
def traverse_backward(outputs, fn):
"""Backward traversal function through the graph.
Traverses the graph going from outputs to inputs. The provided function is
applied once to each module reached this way. This function is used to
implement other functionality that requires traversing the graph. ``fn``
typically has side effects, e.g., see :func:`is_specified` and
:func:`get_unassigned_hyperparameters`. See also: :func:`traverse_forward`.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named of
outputs to start the traversal at.
fn ((deep_architect.core.Module) -> (bool)): Function to apply to each
module. Returns ``True`` if the traversal is to be stopped.
"""
memo = set()
output_lst = sorted_values_by_key(outputs)
ms = extract_unique_modules(output_lst)
for m in ms:
is_over = fn(m)
if is_over:
break
else:
for ix in m.inputs.values():
if ix.is_connected():
m_prev = ix.get_connected_output().get_module()
if m_prev not in memo:
memo.add(m_prev)
ms.append(m_prev)
def traverse_forward(inputs, fn):
"""Forward traversal function through the graph.
Traverses the graph going from inputs to outputs. The provided function is
applied once to each module reached this way. This function is used to
implement other functionality that requires traversing the graph. ``fn``
typically has side effects, e.g., see :func:`get_unconnected_outputs`.
See also: :func:`traverse_backward`.
Args:
inputs (dict[str, deep_architect.core.Input]): Dictionary of named inputs
to start the traversal at.
fn ((deep_architect.core.Module) -> (bool)): Function to apply to each
module. Returns ``True`` if the traversal is to be stopped.
"""
memo = set()
input_lst = sorted_values_by_key(inputs)
ms = extract_unique_modules(input_lst)
for m in ms:
is_over = fn(m)
if is_over:
break
else:
for ox in m.outputs.values():
if ox.is_connected():
for ix in ox.get_connected_inputs():
m_next = ix.get_module()
if m_next not in memo:
memo.add(m_next)
ms.append(m_next)
def get_modules_with_cond(outputs, cond_fn):
ms = []
def fn(m):
if cond_fn(m):
ms.append(m)
traverse_backward(outputs, fn)
return ms
def is_specified(outputs):
"""Checks if all the hyperparameters reachable by traversing backward from
the outputs have been set.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs to start the traversal at.
Returns:
bool: ``True`` if all the hyperparameters have been set. ``False`` otherwise.
"""
is_spec = [True]
def fn(module):
for h in module.hyperps.values():
if not h.has_value_assigned():
is_spec[0] = False
return True
return False
traverse_backward(outputs, fn)
return is_spec[0]
def forward(input_to_val, _module_seq=None):
"""Forward pass through the graph starting with the provided inputs.
The starting inputs are given the values in the dictionary. The values for
the other inputs are obtained through propagation, i.e., through successive
calls to :meth:`deep_architect.core.Module.forward` of the appropriate modules.
.. note::
For efficiency, in dynamic frameworks, the module evaluation sequence
is best computed once and reused in each forward call. The module
evaluation sequence is computed with :func:`determine_module_eval_seq`.
Args:
input_to_val (dict[deep_architect.core.Input, object]): Dictionary of initial
inputs to their corresponding values.
_module_seq (list[deep_architect.core.Module], optional): List of modules ordered
in a way that calling :meth:`deep_architect.core.Module.forward` on them
starting from the values given for the inputs is valid. If it is
not provided, the module sequence is computed.
"""
if _module_seq is None:
inputs = {"in%d" % i: ix for (i, ix) in enumerate(input_to_val.keys())}
_module_seq = determine_module_eval_seq(inputs)
for ix, val in input_to_val.items():
ix.val = val
for m in _module_seq:
m.forward()
for ox in m.outputs.values():
for ix in ox.get_connected_inputs():
ix.val = ox.val
def get_unconnected_inputs(outputs):
"""Get the inputs that are reachable going backward from the provided outputs,
but are not connected to any outputs.
Often, these inputs have to be provided with a value when calling
:func:`forward`.
Args:
outputs (list[deep_architect.core.Output]): Dictionary of named outputs
to start the backward traversal at.
Returns:
list[deep_architect.core.Input]:
Unconnected inputs reachable by traversing the graph backward starting
from the provided outputs.
"""
ix_lst = []
def fn(x):
for ix in x.inputs.values():
if not ix.is_connected():
ix_lst.append(ix)
return False
traverse_backward(outputs, fn)
return ix_lst
def get_unconnected_outputs(inputs):
"""Get the outputs that are reachable going forward from the provided inputs,
but are not connected to outputs.
Often, the final result of a forward pass through the network will be at
these outputs.
Args:
inputs (dict[str, deep_architect.core.Input]): Dictionary of named
inputs to start the forward traversal at.
Returns:
list[deep_architect.core.Output]:
Unconnected outputs reachable by traversing the graph forward starting
from the provided inputs.
"""
ox_lst = []
def fn(x):
for ox in x.outputs.values():
if not ox.is_connected():
ox_lst.append(ox)
return False
traverse_forward(inputs, fn)
return ox_lst
def get_all_hyperparameters(outputs):
"""Going backward from the outputs provided, gets all hyperparameters.
Hyperparameters that can be reached by traversing dependency links between
hyperparameters are also included. Setting an hyperparameter may lead to the
creation of additional hyperparameters, which will be most likely not set.
Such behavior happens when dealing with,
for example, hyperparameters associated with substitutition
modules such as :func:`deep_architect.modules.siso_optional`,
:func:`deep_architect.modules.siso_or`, and :func:`deep_architect.modules.siso_repeat`.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs to start the traversal at.
Returns:
OrderedSet[deep_architect.core.Hyperparameter]:
Ordered set of hyperparameters that are currently present in the
graph.
"""
visited_hs = OrderedSet()
def _add_reachable_hs(h_dep):
assert isinstance(h_dep, DependentHyperparameter)
local_memo = set([h_dep])
h_dep_lst = [h_dep]
idx = 0
while idx < len(h_dep_lst):
for h in h_dep_lst[idx]._hyperps.values():
# cycle detection.
# assert h not in local_memo
if h not in visited_hs:
if isinstance(h, DependentHyperparameter):
h_dep_lst.append(h)
local_memo.add(h)
visited_hs.add(h)
idx += 1
# this function is applied on each of the modules in the graph.
def fn(module):
for h in module.hyperps.values():
if h not in visited_hs:
visited_hs.add(h)
if isinstance(h, DependentHyperparameter):
_add_reachable_hs(h)
return False
traverse_backward(outputs, fn)
return visited_hs
def get_unassigned_independent_hyperparameters(outputs):
"""Going backward from the outputs provided, gets all the independent
hyperparameters that are not set yet.
Setting an hyperparameter may lead to the creation of additional hyperparameters,
which will be most likely not set. Such behavior happens when dealing with,
for example, hyperparameters associated with substitutition
modules such as :func:`deep_architect.modules.siso_optional`,
:func:`deep_architect.modules.siso_or`, and :func:`deep_architect.modules.siso_repeat`.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs to start the traversal at.
Returns:
OrderedSet[deep_architect.core.Hyperparameter]:
Ordered set of hyperparameters that are currently present in the
graph and not have been assigned a value yet.
"""
assert not is_specified(outputs)
unassigned_indep_hs = OrderedSet()
for h in get_all_hyperparameters(outputs):
if not isinstance(
h, DependentHyperparameter) and not h.has_value_assigned():
unassigned_indep_hs.add(h)
return unassigned_indep_hs
# TODO: perhaps change to not have to work until everything is specified.
# this can be done through a flag.
def unassigned_independent_hyperparameter_iterator(outputs):
"""Returns an iterator over the hyperparameters that are not specified in
the current search space.
This iterator is used by the searchers to go over the unspecified
hyperparameters.
.. note::
It is assumed that all the hyperparameters that are touched by the
iterator will be specified (most likely, right away). Otherwise, the
iterator will never terminate.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs which by being traversed back will reach all the
modules in the search space, and correspondingly all the current
unspecified hyperparameters of the search space.
Yields:
(deep_architect.core.Hyperparameter):
Next unspecified hyperparameter of the search space.
"""
while not is_specified(outputs):
hs = get_unassigned_independent_hyperparameters(outputs)
for h in hs:
if not h.has_value_assigned():
yield h
def determine_input_output_cleanup_seq(inputs):
"""Determines the order in which the outputs can be cleaned.
This sequence is aligned with the module evaluation sequence. Positionally,
after each module evaluation, the values stored in val for both inputs and
outputs can be deleted. This is useful to remove intermediate results to
save memory.
.. note::
This function should be used only for fully-specified search spaces.
Args:
inputs (dict[str, deep_architect.core.Input]): Dictionary of named
inputs which by being traversed forward will reach all the
modules in the search space.
Returns:
(list[list[deep_architect.core.Input]], list[list[deep_architect.core.Output]]):
List of lists with the inputs and outputs in the order they should be
cleaned up after they are no longer needed.
"""
module_eval_seq = determine_module_eval_seq(inputs)
input_cleanup_seq = []
for m in module_eval_seq:
lst = list(m.inputs.values())
input_cleanup_seq.append(lst)
# number of inputs dependent on each input.
output_counters = {}
for m in module_eval_seq:
for ox in m.outputs.values():
output_counters[ox] = len(ox.get_connected_inputs())
output_cleanup_seq = []
for m in module_eval_seq:
lst = []
for ix in m.inputs.values():
if ix.is_connected():
ox = ix.get_connected_output()
output_counters[ox] -= 1
if output_counters[ox] == 0:
lst.append(ox)
output_cleanup_seq.append(lst)
return input_cleanup_seq, output_cleanup_seq
def jsonify(inputs, outputs):
"""Returns a JSON representation of the fully-specified search space.
This function is useful to create a representation of model that does not
rely on the graph representation involving :class:`deep_architect.core.Module`,
:class:`deep_architect.core.Input`, and :class:`deep_architect.core.Output`.
Args:
inputs (dict[str, deep_architect.core.Input]): Dictionary of named
inputs which by being traversed forward will reach all the
modules in the search space.
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs which by being traversed back will reach all the
modules in the search space.
Returns:
(dict): JSON representation of the fully specified model.
"""
modules = {}
def add_module(m):
module_name = m.get_name()
input_names = {name: ix.get_name() for name, ix in m.inputs.items()}
output_names = {name: ox.get_name() for name, ox in m.outputs.items()}
hyperp_name_to_val = m._get_hyperp_values()
in_connections = {}
out_connections = {}
in_modules = set()
out_modules = set()
for ix in m.inputs.values():
if ix.is_connected():
ox = ix.get_connected_output()
ix_name = ix.get_name()
in_connections[ix_name] = ox.get_name()
in_module_name = ox.get_module().get_name()
in_modules.add(in_module_name)
for ox in m.outputs.values():
if ox.is_connected():
ox_name = ox.get_name()
lst = []
for ix in ox.get_connected_inputs():
lst.append(ix.get_name())
out_module_name = ix.get_module().get_name()
out_modules.add(out_module_name)
out_connections[ox_name] = lst
module_name = m.get_name()
start_idx = module_name.index('.') + 1
end_idx = len(module_name) - module_name[::-1].index('-') - 1
module_type = module_name[start_idx:end_idx]
modules[m.get_name()] = {
"module_name": module_name,
"module_type": module_type,
"hyperp_name_to_val": hyperp_name_to_val,
"input_names": input_names,
"output_names": output_names,
"in_connections": in_connections,
"out_connections": out_connections,
"in_modules": list(in_modules),
"out_modules": list(out_modules),
}
traverse_backward(outputs, add_module)
ms = determine_module_eval_seq(inputs)
module_eval_seq = [m.get_name() for m in ms]
ixs_lst, oxs_lst = determine_input_output_cleanup_seq(inputs)
input_cleanup_seq = [[ix.get_name() for ix in ixs] for ixs in ixs_lst]
output_cleanup_seq = [[ox.get_name() for ox in oxs] for oxs in oxs_lst]
unconnected_inputs = {name: ix.get_name() for name, ix in inputs.items()}
unconnected_outputs = {name: ox.get_name() for name, ox in outputs.items()}
graph = {
"modules": modules,
"unconnected_inputs": unconnected_inputs,
"unconnected_outputs": unconnected_outputs,
"module_eval_seq": module_eval_seq,
"input_cleanup_seq": input_cleanup_seq,
"output_cleanup_seq": output_cleanup_seq,
}
return graph
|
|
import furl
from urlparse import urlparse
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from framework.guid.model import Guid
from website.addons.wiki.model import NodeWikiPage
from tests.base import ApiWikiTestCase
from tests.factories import (ProjectFactory, RegistrationFactory,
NodeWikiFactory, PrivateLinkFactory,
CommentFactory)
class TestWikiDetailView(ApiWikiTestCase):
def _set_up_public_project_with_wiki_page(self, project_options=None):
project_options = project_options or {}
self.public_project = ProjectFactory(is_public=True, creator=self.user, **project_options)
self.public_wiki = self._add_project_wiki_page(self.public_project, self.user)
self.public_url = '/{}wikis/{}/'.format(API_BASE, self.public_wiki._id)
def _set_up_private_project_with_wiki_page(self):
self.private_project = ProjectFactory(creator=self.user)
self.private_wiki = self._add_project_wiki_page(self.private_project, self.user)
self.private_url = '/{}wikis/{}/'.format(API_BASE, self.private_wiki._id)
def _set_up_public_registration_with_wiki_page(self):
self._set_up_public_project_with_wiki_page()
self.public_registration = RegistrationFactory(project=self.public_project, user=self.user, is_public=True)
self.public_registration_wiki_id = self.public_registration.wiki_pages_versions['home'][0]
self.public_registration.wiki_pages_current = {'home': self.public_registration_wiki_id}
self.public_registration.save()
self.public_registration_url = '/{}wikis/{}/'.format(API_BASE, self.public_registration_wiki_id)
def _set_up_private_registration_with_wiki_page(self):
self._set_up_private_project_with_wiki_page()
self.private_registration = RegistrationFactory(project=self.private_project, user=self.user)
self.private_registration_wiki_id = self.private_registration.wiki_pages_versions['home'][0]
self.private_registration.wiki_pages_current = {'home': self.private_registration_wiki_id}
self.private_registration.save()
self.private_registration_url = '/{}wikis/{}/'.format(API_BASE, self.private_registration_wiki_id)
def test_public_node_logged_out_user_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki._id)
def test_public_node_logged_in_non_contributor_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki._id)
def test_public_node_logged_in_contributor_can_view_wiki(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_wiki._id)
def test_private_node_logged_out_user_cannot_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_private_node_logged_in_non_contributor_cannot_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_private_node_logged_in_contributor_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_private_node_user_with_anonymous_link_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
private_link = PrivateLinkFactory(anonymous=True)
private_link.nodes.append(self.private_project)
private_link.save()
url = furl.furl(self.private_url).add(query_params={'view_only': private_link.key}).url
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_private_node_user_with_view_only_link_can_view_wiki(self):
self._set_up_private_project_with_wiki_page()
private_link = PrivateLinkFactory(anonymous=False)
private_link.nodes.append(self.private_project)
private_link.save()
url = furl.furl(self.private_url).add(query_params={'view_only': private_link.key}).url
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_wiki._id)
def test_public_registration_logged_out_user_cannot_view_wiki(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_public_registration_logged_in_non_contributor_cannot_view_wiki(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_public_registration_contributor_can_view_wiki(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.public_registration_wiki_id)
def test_user_cannot_view_withdrawn_registration_wikis(self):
self._set_up_public_registration_with_wiki_page()
withdrawal = self.public_registration.retract_registration(user=self.user, save=True)
token = withdrawal.approval_state.values()[0]['approval_token']
withdrawal.approve_retraction(self.user, token)
withdrawal.save()
res = self.app.get(self.public_registration_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_private_registration_logged_out_user_cannot_view_wiki(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_equal(res.json['errors'][0]['detail'], 'Authentication credentials were not provided.')
def test_private_registration_logged_in_non_contributor_cannot_view_wiki(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_equal(res.json['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_private_registration_contributor_can_view_wiki(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['id'], self.private_registration_wiki_id)
def test_wiki_has_user_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['user']['links']['related']['href']
expected_url = '/{}users/{}/'.format(API_BASE, self.user._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_wiki_has_node_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_wiki_has_comments_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
url = res.json['data']['relationships']['comments']['links']['related']['href']
CommentFactory(node=self.public_project, target=Guid.load(self.public_wiki._id), user=self.user)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'][0]['type'], 'comments')
def test_only_project_contrib_can_comment_on_closed_project(self):
self._set_up_public_project_with_wiki_page(project_options={'comment_level': 'private'})
res = self.app.get(self.public_url, auth=self.user.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_any_loggedin_user_can_comment_on_open_project(self):
self._set_up_public_project_with_wiki_page(project_options={'comment_level': 'public'})
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, True)
def test_non_logged_in_user_cant_comment(self):
self._set_up_public_project_with_wiki_page(project_options={'comment_level': 'public'})
res = self.app.get(self.public_url)
can_comment = res.json['data']['attributes']['current_user_can_comment']
assert_equal(res.status_code, 200)
assert_equal(can_comment, False)
def test_wiki_has_download_link(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
url = res.json['data']['links']['download']
expected_url = '/{}wikis/{}/content/'.format(API_BASE, self.public_wiki._id)
assert_equal(res.status_code, 200)
assert_in(expected_url, url)
def test_wiki_invalid_id_not_found(self):
url = '/{}wikis/{}/'.format(API_BASE, 'abcde')
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_old_wiki_versions_not_returned(self):
self._set_up_public_project_with_wiki_page()
current_wiki = NodeWikiFactory(node=self.public_project, user=self.user)
old_version_id = self.public_project.wiki_pages_versions[current_wiki.page_name][-2]
old_version = NodeWikiPage.load(old_version_id)
url = '/{}wikis/{}/'.format(API_BASE, old_version._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 404)
def test_public_node_wiki_relationship_links(self):
self._set_up_public_project_with_wiki_page()
res = self.app.get(self.public_url)
expected_nodes_relationship_url = '{}nodes/{}/'.format(API_BASE, self.public_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(API_BASE, self.public_project._id)
assert_in(expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href'])
def test_private_node_wiki_relationship_links(self):
self._set_up_private_project_with_wiki_page()
res = self.app.get(self.private_url, auth=self.user.auth)
expected_nodes_relationship_url = '{}nodes/{}/'.format(API_BASE, self.private_project._id)
expected_comments_relationship_url = '{}nodes/{}/comments/'.format(API_BASE, self.private_project._id)
assert_in(expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href'])
def test_public_registration_wiki_relationship_links(self):
self._set_up_public_registration_with_wiki_page()
res = self.app.get(self.public_registration_url)
expected_nodes_relationship_url = '{}registrations/{}/'.format(API_BASE, self.public_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(API_BASE, self.public_registration._id)
assert_in(expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href'])
def test_private_registration_wiki_relationship_links(self):
self._set_up_private_registration_with_wiki_page()
res = self.app.get(self.private_registration_url, auth=self.user.auth)
expected_nodes_relationship_url = '{}registrations/{}/'.format(API_BASE, self.private_registration._id)
expected_comments_relationship_url = '{}registrations/{}/comments/'.format(API_BASE, self.private_registration._id)
assert_in(expected_nodes_relationship_url, res.json['data']['relationships']['node']['links']['related']['href'])
assert_in(expected_comments_relationship_url, res.json['data']['relationships']['comments']['links']['related']['href'])
|
|
from sympy.core.basic import Basic
from sympy import (sympify, eye, sin, cos, rot_axis1, rot_axis2,
rot_axis3, ImmutableMatrix as Matrix, Symbol)
from sympy.core.cache import cacheit
import sympy.vector
class Orienter(Basic):
"""
Super-class for all orienter classes.
"""
def rotation_matrix(self):
"""
The rotation matrix corresponding to this orienter
instance.
"""
return self._parent_orient
class AxisOrienter(Orienter):
"""
Class to denote an axis orienter.
"""
def __new__(cls, angle, axis):
if not isinstance(axis, sympy.vector.Vector):
raise TypeError("axis should be a Vector")
angle = sympify(angle)
obj = super(AxisOrienter, cls).__new__(cls, angle,
axis)
obj._angle = angle
obj._axis = axis
return obj
def __init__(self, angle, axis):
"""
Axis rotation is a rotation about an arbitrary axis by
some angle. The angle is supplied as a SymPy expr scalar, and
the axis is supplied as a Vector.
Parameters
==========
angle : Expr
The angle by which the new system is to be rotated
axis : Vector
The axis around which the rotation has to be performed
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = CoordSysCartesian('N')
>>> from sympy.vector import AxisOrienter
>>> orienter = AxisOrienter(q1, N.i + 2 * N.j)
>>> B = N.orient_new('B', (orienter, ))
"""
#Dummy initializer for docstrings
pass
@cacheit
def rotation_matrix(self, system):
"""
The rotation matrix corresponding to this orienter
instance.
Parameters
==========
system : CoordSysCartesian
The coordinate system wrt which the rotation matrix
is to be computed
"""
axis = sympy.vector.express(self.axis, system).normalize()
axis = axis.to_matrix(system)
theta = self.angle
parent_orient = ((eye(3) - axis * axis.T) * cos(theta) +
Matrix([[0, -axis[2], axis[1]],
[axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]]) * sin(theta) +
axis * axis.T)
parent_orient = parent_orient.T
return parent_orient
@property
def angle(self):
return self._angle
@property
def axis(self):
return self._axis
class ThreeAngleOrienter(Orienter):
"""
Super-class for Body and Space orienters.
"""
def __new__(cls, angle1, angle2, angle3, rot_order):
approved_orders = ('123', '231', '312', '132', '213',
'321', '121', '131', '212', '232',
'313', '323', '')
original_rot_order = rot_order
rot_order = str(rot_order).upper()
if not (len(rot_order) == 3):
raise TypeError('rot_order should be a str of length 3')
rot_order = [i.replace('X', '1') for i in rot_order]
rot_order = [i.replace('Y', '2') for i in rot_order]
rot_order = [i.replace('Z', '3') for i in rot_order]
rot_order = ''.join(rot_order)
if not rot_order in approved_orders:
raise TypeError('Invalid rot_type parameter')
a1 = int(rot_order[0])
a2 = int(rot_order[1])
a3 = int(rot_order[2])
angle1 = sympify(angle1)
angle2 = sympify(angle2)
angle3 = sympify(angle3)
if cls._in_order:
parent_orient = (_rot(a1, angle1) *
_rot(a2, angle2) *
_rot(a3, angle3))
else:
parent_orient = (_rot(a3, angle3) *
_rot(a2, angle2) *
_rot(a1, angle1))
parent_orient = parent_orient.T
obj = super(ThreeAngleOrienter, cls).__new__(
cls, angle1, angle2, angle3, Symbol(original_rot_order))
obj._angle1 = angle1
obj._angle2 = angle2
obj._angle3 = angle3
obj._rot_order = original_rot_order
obj._parent_orient = parent_orient
return obj
@property
def angle1(self):
return self._angle1
@property
def angle2(self):
return self._angle2
@property
def angle3(self):
return self._angle3
@property
def rot_order(self):
return self._rot_order
class BodyOrienter(ThreeAngleOrienter):
"""
Class to denote a body-orienter.
"""
_in_order = True
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Body orientation takes this coordinate system through three
successive simple rotations.
Body fixed rotations include both Euler Angles and
Tait-Bryan Angles, see http://en.wikipedia.org/wiki/Euler_angles.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
Examples
========
>>> from sympy.vector import CoordSysCartesian, BodyOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
A 'Body' fixed rotation is described by three angles and
three body-fixed rotation axes. To orient a coordinate system D
with respect to N, each sequential rotation is always about
the orthogonal unit vectors fixed to D. For example, a '123'
rotation will specify rotations about N.i, then D.j, then
D.k. (Initially, D.i is same as N.i)
Therefore,
>>> body_orienter = BodyOrienter(q1, q2, q3, '123')
>>> D = N.orient_new('D', (body_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> D = N.orient_new('D', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, D.j)
>>> D = D.orient_new('D', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, D.k)
>>> D = D.orient_new('D', (axis_orienter3, ))
Acceptable rotation orders are of length 3, expressed in XYZ or
123, and cannot have a rotation about about an axis twice in a row.
>>> body_orienter1 = BodyOrienter(q1, q2, q3, '123')
>>> body_orienter2 = BodyOrienter(q1, q2, 0, 'ZXZ')
>>> body_orienter3 = BodyOrienter(0, 0, 0, 'XYX')
"""
#Dummy initializer for docstrings
pass
class SpaceOrienter(ThreeAngleOrienter):
"""
Class to denote a space-orienter.
"""
_in_order = False
def __new__(cls, angle1, angle2, angle3, rot_order):
obj = ThreeAngleOrienter.__new__(cls, angle1, angle2, angle3,
rot_order)
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Space rotation is similar to Body rotation, but the rotations
are applied in the opposite order.
Parameters
==========
angle1, angle2, angle3 : Expr
Three successive angles to rotate the coordinate system by
rotation_order : string
String defining the order of axes for rotation
See Also
========
BodyOrienter : Orienter to orient systems wrt Euler angles.
Examples
========
>>> from sympy.vector import CoordSysCartesian, SpaceOrienter
>>> from sympy import symbols
>>> q1, q2, q3 = symbols('q1 q2 q3')
>>> N = CoordSysCartesian('N')
To orient a coordinate system D with respect to N, each
sequential rotation is always about N's orthogonal unit vectors.
For example, a '123' rotation will specify rotations about
N.i, then N.j, then N.k.
Therefore,
>>> space_orienter = SpaceOrienter(q1, q2, q3, '312')
>>> D = N.orient_new('D', (space_orienter, ))
is same as
>>> from sympy.vector import AxisOrienter
>>> axis_orienter1 = AxisOrienter(q1, N.i)
>>> B = N.orient_new('B', (axis_orienter1, ))
>>> axis_orienter2 = AxisOrienter(q2, N.j)
>>> C = B.orient_new('C', (axis_orienter2, ))
>>> axis_orienter3 = AxisOrienter(q3, N.k)
>>> D = C.orient_new('C', (axis_orienter3, ))
"""
#Dummy initializer for docstrings
pass
class QuaternionOrienter(Orienter):
"""
Class to denote a quaternion-orienter.
"""
def __new__(cls, q0, q1, q2, q3):
q0 = sympify(q0)
q1 = sympify(q1)
q2 = sympify(q2)
q3 = sympify(q3)
parent_orient = (Matrix([[q0 ** 2 + q1 ** 2 - q2 ** 2 -
q3 ** 2,
2 * (q1 * q2 - q0 * q3),
2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3),
q0 ** 2 - q1 ** 2 +
q2 ** 2 - q3 ** 2,
2 * (q2 * q3 - q0 * q1)],
[2 * (q1 * q3 - q0 * q2),
2 * (q0 * q1 + q2 * q3),
q0 ** 2 - q1 ** 2 -
q2 ** 2 + q3 ** 2]]))
parent_orient = parent_orient.T
obj = super(QuaternionOrienter, cls).__new__(cls, q0, q1, q2, q3)
obj._q0 = q0
obj._q1 = q1
obj._q2 = q2
obj._q3 = q3
obj._parent_orient = parent_orient
return obj
def __init__(self, angle1, angle2, angle3, rot_order):
"""
Quaternion orientation orients the new CoordSysCartesian with
Quaternions, defined as a finite rotation about lambda, a unit
vector, by some amount theta.
This orientation is described by four parameters:
q0 = cos(theta/2)
q1 = lambda_x sin(theta/2)
q2 = lambda_y sin(theta/2)
q3 = lambda_z sin(theta/2)
Quaternion does not take in a rotation order.
Parameters
==========
q0, q1, q2, q3 : Expr
The quaternions to rotate the coordinate system by
Examples
========
>>> from sympy.vector import CoordSysCartesian
>>> from sympy import symbols
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = CoordSysCartesian('N')
>>> from sympy.vector import QuaternionOrienter
>>> q_orienter = QuaternionOrienter(q0, q1, q2, q3)
>>> B = N.orient_new('B', (q_orienter, ))
"""
#Dummy initializer for docstrings
pass
@property
def q0(self):
return self._q0
@property
def q1(self):
return self._q1
@property
def q2(self):
return self._q2
@property
def q3(self):
return self._q3
def _rot(axis, angle):
"""DCM for simple axis 1, 2 or 3 rotations. """
if axis == 1:
return Matrix(rot_axis1(angle).T)
elif axis == 2:
return Matrix(rot_axis2(angle).T)
elif axis == 3:
return Matrix(rot_axis3(angle).T)
|
|
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library functions for training with dopamine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl import flags
from dopamine.agents.dqn import dqn_agent
from dopamine.discrete_domains import gym_lib
from dopamine.discrete_domains import run_experiment
import core
from experiments import infectious_disease as infectious_disease_exp
import numpy as np
import tensorflow.compat.v1 as tf
flags.DEFINE_string(
'output_dir',
'/tmp/ml-fairness-gym/infection_rl',
'directory to write files.')
FLAGS = flags.FLAGS
class _DopamineWrapper(gym_lib.GymPreprocessing):
"""Wraps the infectious disease environment to be compatible with Dopamine."""
def format_observation(self, obs, padding=2):
"""Formats health state observations into a numpy array.
The health-states are one-hot encoded as row vectors, and then stacked
together vertically to create a |population| x |health states| array.
The population is padded on top and bottom with "recovered" indivduals,
which don't affect the disease spread but make convolutions simpler.
Args:
obs: An observation dictionary.
padding: An integer indicating how many people to use for padding.
Returns:
A numpy array suitable for passing to a DQN agent.
"""
vecs = []
initial_params = self.environment.initial_params
num_states = len(initial_params.state_names)
recovered_state = initial_params.state_names.index('recovered')
for state in obs['health_states']:
vecs.append(np.zeros((num_states, 1), dtype=float))
vecs[-1][state] = 1.0
pad = np.zeros((num_states, padding))
pad[recovered_state, :] = 1.0
return np.hstack([pad] + vecs + [pad]).T
@property
def action_space(self):
return self.environment.action_space
@property
def observation_shape(self):
return self.format_observation(
self.environment.observation_space.sample()).shape
@property
def initial_params(self):
return self.environment.initial_params
def reset(self):
"""Resets the environment and chooses an initial patient to infect."""
observation = self.environment.reset()
self.environment.set_scalar_reward(
NegativeDeltaPercentSick(_percent_sick(observation)))
return self.format_observation(observation)
def step(self, action):
"""Play the environment one step forward."""
action = np.array([action])
observation, reward, game_over, info = self.environment.step(action)
self.game_over = game_over
return self.format_observation(observation), reward, game_over, info
def set_initial_health_state(self, initial_health_state):
return self.environment.set_initial_health_state(initial_health_state)
def _percent_sick(observation):
return np.mean(
[health_state == 1 for health_state in observation['health_states']])
class NegativeDeltaPercentSick(core.RewardFn):
"""Reward function that penalizes newly sick days."""
def __init__(self, base=0):
super(NegativeDeltaPercentSick, self).__init__()
self.base = base
def __call__(self, observation):
percent_sick = _percent_sick(observation)
delta = percent_sick - self.base
self.base = percent_sick
return -delta
def _create_environment(seed=100, network='chain'):
"""Returns a Dopamine-compatible version of the infectious disease env."""
experiment = infectious_disease_exp.Experiment(graph_name=network)
env, _ = experiment.scenario_builder()
env.seed(seed)
env.reset()
env.set_scalar_reward(NegativeDeltaPercentSick())
return _DopamineWrapper(env)
DQNNetworkType = collections.namedtuple('dqn_network', ['q_values'])
class _SimpleDQNNetwork(tf.keras.Model):
"""The convolutional network used to compute the agent's Q-values."""
def __init__(self, num_actions, hidden_layer_size=64, name=None):
"""Creates the layers used for calculating Q-values.
Args:
num_actions: int, number of actions.
hidden_layer_size: int, number of hidden units.
name: str, used to create scope for network parameters.
"""
super(_SimpleDQNNetwork, self).__init__(name=name)
self.num_actions = num_actions
activation_fn = tf.keras.activations.relu
# Set names of the layers manually to make variable names more similar
# with tf.slim variable names/checkpoints.
self.conv1 = tf.keras.layers.Conv1D(
32,
5,
strides=1,
padding='valid',
activation=activation_fn,
name='Conv')
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(
hidden_layer_size, activation=activation_fn, name='fully_connected')
self.dense2 = tf.keras.layers.Dense(
num_actions,
kernel_regularizer=tf.keras.regularizers.l2(0.01),
name='fully_connected')
def call(self, state):
"""Creates the output tensor/op given the state tensor as input.
See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
information on this. Note that tf.keras.Model implements `call` which is
wrapped by `__call__` function by tf.keras.Model.
Parameters created here will have scope according to the `name` argument
given at `.__init__()` call.
Args:
state: Tensor, input tensor.
Returns:
collections.namedtuple, output ops (graph mode) or output tensors (eager).
"""
x = tf.cast(state, tf.float32)
# Fully connected network. No convolutions or graph convolutions here.
x = self.flatten(x)
x = self.dense1(x)
x = self.dense2(x)
return DQNNetworkType(x)
def _create_agent(sess,
environment,
summary_writer,
gamma=0.95,
hidden_layer_size=32,
learning_rate=0.00025):
"""Returns a DQN agent."""
return dqn_agent.DQNAgent(
sess,
network=functools.partial(
_SimpleDQNNetwork, hidden_layer_size=hidden_layer_size),
num_actions=int(environment.action_space.nvec[0]),
observation_shape=environment.observation_shape,
observation_dtype=tf.int32,
gamma=gamma,
stack_size=1,
epsilon_train=0.5,
min_replay_history=1000,
summary_writer=summary_writer,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate))
def dopamine_train(base_dir,
hidden_layer_size,
gamma,
learning_rate,
num_train_steps,
network='chain'):
"""Train an agent using dopamine."""
runner = run_experiment.Runner(
base_dir,
functools.partial(
_create_agent,
hidden_layer_size=hidden_layer_size,
gamma=gamma,
learning_rate=learning_rate),
functools.partial(_create_environment, network=network),
num_iterations=num_train_steps,
training_steps=500,
evaluation_steps=100,
max_steps_per_episode=20)
runner.run_experiment()
return runner
def dopamine_eval(runner, patient0, seed=100):
"""Evaluate an agent."""
base_env = runner._environment.environment # pylint: disable=protected-access
initial_health_state = np.zeros_like(
base_env.initial_params.initial_health_state)
initial_health_state[patient0] = 1
base_env.set_initial_health_state(initial_health_state)
base_env.seed(seed)
base_env.reset()
metrics = {
'state_tracker': infectious_disease_exp.StateTracker(base_env),
'sick-days': infectious_disease_exp.DayTracker(base_env, 1)
}
runner._agent.eval_mode = True # pylint: disable=protected-access
runner._run_one_episode() # pylint: disable=protected-access
retval = {name: metric.measure(base_env) for name, metric in metrics.items()}
retval['actions'] = [step.action for step in base_env.history]
return retval
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable-msg=too-many-arguments, too-many-locals, assignment-from-no-return
""" Conv Int8 functional and performance testing"""
import sys
import logging
import numpy as np
import tvm
from tvm import te
from tvm import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger("test_conv_int8_intel")
LOGGER.disabled = False
# All the WORKLOADS from Resnet except first layer
# Workload is ['height', 'width', 'in_filter', 'out_filter',
# 'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride'])
WORKLOADS = [
(56, 56, 64, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 64, 128, 3, 3, 1, 1, 2, 2),
(56, 56, 64, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 256, 3, 3, 1, 1, 2, 2),
(28, 28, 128, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 512, 3, 3, 1, 1, 2, 2),
(14, 14, 256, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 512, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 256, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 512, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 512, 1, 1, 0, 0, 2, 2),
(28, 28, 512, 128, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 1024, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 1024, 1, 1, 0, 0, 2, 2),
(14, 14, 1024, 256, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 2048, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 2048, 1, 1, 0, 0, 2, 2),
(7, 7, 2048, 512, 1, 1, 0, 0, 1, 1),
]
TARGET_NAME = "llvm -mcpu=skylake-avx512"
NUM_VEC_LANES = 16
CTX = tvm.context(TARGET_NAME, 0)
def get_shape(
im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype
):
"""
Finds out the shape of all data structures
"""
## Find shapes
data_shape = (1, in_filter // NUM_VEC_LANES, im_height, im_width, NUM_VEC_LANES)
if out_dtype == "int32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES // 4,
NUM_VEC_LANES,
4,
)
elif out_dtype == "float32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES,
NUM_VEC_LANES,
)
out_height = (im_height + 2 * hpad - k_h) // hstride + 1
out_width = (im_width + 2 * wpad - k_w) // wstride + 1
o_shape = (1, out_filter // NUM_VEC_LANES, out_height, out_width, NUM_VEC_LANES)
return (data_shape, kernel_shape, o_shape)
def run_inference(
data_dtype,
kernel_dtype,
out_dtype,
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
):
"""
Runs the inference and checks the functional correctness between
compute and schedule outputs
"""
(data_shape, kernel_shape, o_shape) = get_shape(
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
out_dtype,
)
# Create TVM placeholders
data = te.placeholder(data_shape, name="data", dtype=data_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype)
# Create the numpy arrays to be used for executing conv models
if data_dtype == "float32":
data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), CTX)
kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), CTX)
else:
data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype))
kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype))
# c_orig will be used for declaration ouptut
# c_sch will be used for scheduled computation output
c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), CTX)
c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), CTX)
with tvm.target.Target(TARGET_NAME):
conv = topi.nn.conv2d_NCHWc(
data,
kernel,
stride=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
out = topi.nn.relu(conv)
sch = te.create_schedule(out.op)
func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out")
func(data_array, kernel_array, c_orig)
LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True))
# Generate and run the optimized schedule
sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out])
func = tvm.build(sconv, [data, kernel, out], target=TARGET_NAME, name="conv")
func(data_array, kernel_array, c_sch)
# Functional check
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.asnumpy(), c_sch.asnumpy())
else:
assert np.allclose(c_orig.asnumpy(), c_sch.asnumpy())
evaluator = func.time_evaluator(func.entry_name, CTX, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
return evaluator(data_array, kernel_array, c_sch).mean
if __name__ == "__main__":
LOGGER.info("Workload, Kernel_size, FP32_time, INT8_time, Speedup")
SPEEDUP_ARRAY = []
for i, wkl in enumerate(WORKLOADS):
fp32_time = run_inference("float32", "float32", "float32", *wkl)
int8_time = run_inference("uint8", "int8", "int32", *wkl)
kernel_h = wkl[4]
kernel_w = wkl[5]
LOGGER.info(
"Workload#"
+ str(i)
+ ", "
+ str(kernel_h)
+ "x"
+ str(kernel_w)
+ ", "
+ str(fp32_time)
+ ", "
+ str(int8_time)
+ ", "
+ str(fp32_time / int8_time)
)
SPEEDUP_ARRAY.append(fp32_time / int8_time)
LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY))))
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curses-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import curses
from curses import textpad
import signal
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
class CursesUI(object):
"""Curses-based Command-line UI.
In this class, the methods with the prefix "_screen_" are the methods that
interact with the actual terminal using the curses library.
"""
CLI_PROMPT = "tfdbg> "
CLI_EXIT_COMMANDS = ["exit", "quit"]
CLI_TERMINATOR_KEY = 7 # Terminator key for input text box.
CLI_TAB_KEY = ord("\t")
REGEX_SEARCH_PREFIX = "/"
TENSOR_INDICES_NAVIGATION_PREFIX = "@"
ERROR_MESSAGE_PREFIX = "ERROR: "
# Possible Enter keys. 343 is curses key code for the num-pad Enter key when
# num lock is off.
CLI_CR_KEYS = [ord("\n"), ord("\r"), 343]
_SCROLL_REFRESH = "refresh"
_SCROLL_UP = "up"
_SCROLL_DOWN = "down"
_SCROLL_HOME = "home"
_SCROLL_END = "end"
_SCROLL_TO_LINE_INDEX = "scroll_to_line_index"
_FOREGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"magenta": curses.COLOR_MAGENTA,
"black": curses.COLOR_BLACK,
}
_BACKGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"black": curses.COLOR_BLACK,
}
# Font attribute for search and highlighting.
_SEARCH_HIGHLIGHT_FONT_ATTR = "black_on_white"
_ARRAY_INDICES_COLOR_PAIR = "black_on_white"
_ERROR_TOAST_COLOR_PAIR = "red_on_white"
_STATUS_BAR_COLOR_PAIR = "black_on_white"
def __init__(self):
self._screen_init()
self._screen_refresh_size()
# TODO(cais): Error out if the size of the screen is too small.
# Initialize some UI component size and locations.
self._init_layout()
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
# Create tab completion registry and register the empty-str (top-level)
# tab-completion context with it.
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._command_history_store = debugger_cli_common.CommandHistory()
# Active list of command history, used in history navigation.
# _command_handler_registry holds all the history commands the CLI has
# received, up to a size limit. _active_command_history is the history
# currently being navigated in, e.g., using the Up/Down keys. The latter
# can be different from the former during prefixed or regex-based history
# navigation, e.g., when user enter the beginning of a command and hit Up.
self._active_command_history = []
# Pointer to the current position in the history sequence.
# 0 means it is a new command being keyed in.
self._command_pointer = 0
self._command_history_limit = 100
self._pending_command = ""
# State related to screen output.
self._output_pad = None
self._output_pad_row = 0
self._output_array_pointer_indices = None
self._curr_unwrapped_output = None
self._curr_wrapped_output = None
# Register signal handler for SIGINT.
signal.signal(signal.SIGINT, self._interrupt_handler)
def _init_layout(self):
"""Initialize the layout of UI components.
Initialize the location and size of UI components such as command textbox
and output region according to the terminal size.
"""
# NamedTuple for rectangular locations on screen
self.rectangle = collections.namedtuple("rectangle",
"top left bottom right")
# Height of command text box
self._command_textbox_height = 2
self._title_row = 0
# Top row index of the output pad.
# A "pad" is a curses object that holds lines of text and not limited to
# screen size. It can be rendered on the screen partially with scroll
# parameters specified.
self._output_top_row = 1
# Number of rows that the output pad has.
self._output_num_rows = (
self._max_y - self._output_top_row - self._command_textbox_height - 1)
# Row index of scroll information line: Taking into account the zero-based
# row indexing and the command textbox area under the scroll information
# row.
self._output_scroll_row = self._max_y - 1 - self._command_textbox_height
# Tab completion bottom row.
self._candidates_top_row = self._output_scroll_row - 4
self._candidates_bottom_row = self._output_scroll_row - 1
# Maximum number of lines the candidates display can have.
self._candidates_max_lines = int(self._output_num_rows / 2)
self.max_output_lines = 10000
# Regex search state.
self._curr_search_regex = None
self._unwrapped_regex_match_lines = []
# Size of view port on screen, which is always smaller or equal to the
# screen size.
self._output_pad_screen_height = self._output_num_rows - 1
self._output_pad_screen_width = self._max_x - 1
self._output_pad_screen_location = self.rectangle(
top=self._output_top_row,
left=0,
bottom=self._output_top_row + self._output_num_rows,
right=self._output_pad_screen_width)
def _screen_init(self):
"""Screen initialization.
Creates curses stdscr and initialize the color pairs for display.
"""
self._stdscr = curses.initscr()
self._command_window = None
# Prepare color pairs.
curses.start_color()
self._color_pairs = {}
color_index = 0
for fg_color in self._FOREGROUND_COLORS:
for bg_color in self._BACKGROUND_COLORS:
color_index += 1
curses.init_pair(color_index, self._FOREGROUND_COLORS[fg_color],
self._BACKGROUND_COLORS[bg_color])
color_name = fg_color
if bg_color != "black":
color_name += "_on_" + bg_color
self._color_pairs[color_name] = curses.color_pair(color_index)
# A_BOLD is not really a "color". But place it here for convenience.
self._color_pairs["bold"] = curses.A_BOLD
# Default color pair to use when a specified color pair does not exist.
self._default_color_pair = self._color_pairs["white"]
def _screen_launch(self):
"""Launch the curses screen."""
curses.noecho()
curses.cbreak()
self._stdscr.keypad(1)
self._screen_create_command_window()
def _screen_create_command_window(self):
"""Create command window according to screen size."""
if self._command_window:
del self._command_window
self._command_window = curses.newwin(
self._command_textbox_height, self._max_x - len(self.CLI_PROMPT),
self._max_y - self._command_textbox_height, len(self.CLI_PROMPT))
def _screen_refresh(self):
self._stdscr.refresh()
def _screen_terminate(self):
"""Terminate the curses screen."""
self._stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
# Remove SIGINT handler.
signal.signal(signal.SIGINT, signal.SIG_DFL)
def run_ui(self, init_command=None, title=None, title_color=None):
"""Run the Curses CLI.
Args:
init_command: (str) Optional command to run on CLI start up.
title: (str) Optional title to display in the CLI.
title_color: (str) Optional color of the title, e.g., "yellow".
Returns:
An exit token of arbitrary type. Can be None.
"""
self._screen_launch()
# Optional initial command.
if init_command is not None:
self._dispatch_command(init_command)
if title is not None:
self._title(title, title_color=title_color)
# CLI main loop.
exit_token = self._ui_loop()
self._screen_terminate()
return exit_token
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""A wrapper around CommandHandlerRegistry.register_command_handler().
In addition to calling the wrapped register_command_handler() method, this
method also registers the top-level tab-completion context based on the
command prefixes and their aliases.
See the doc string of the wrapped method for more details on the args.
Args:
prefix: (str) command prefix.
handler: (callable) command handler.
help_info: (str) help information.
prefix_aliases: (list of str) aliases of the command prefix.
"""
self._command_handler_registry.register_command_handler(
prefix, handler, help_info, prefix_aliases=prefix_aliases)
self._tab_completion_registry.extend_comp_items("", [prefix])
if prefix_aliases:
self._tab_completion_registry.extend_comp_items("", prefix_aliases)
def register_tab_comp_context(self, *args, **kwargs):
"""Wrapper around TabCompletionRegistry.register_tab_comp_context()."""
self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)
def set_help_intro(self, help_intro):
"""Set an introductory message to the help output of the command registry.
Args:
help_intro: (list of str) Text lines appended to the beginning of the
the output of the command "help", as introductory information.
"""
self._command_handler_registry.set_help_intro(help_intro=help_intro)
def get_help(self):
return self._command_handler_registry.get_help()
def _screen_create_command_textbox(self, existing_command):
"""Create command textbox on screen.
Args:
existing_command: (str) A command string to put in the textbox right
after its creation.
"""
# Display the tfdbg prompt.
self._stdscr.addstr(self._max_y - self._command_textbox_height, 0,
self.CLI_PROMPT, curses.A_BOLD)
self._stdscr.refresh()
self._command_window.clear()
# Command text box.
self._command_textbox = textpad.Textbox(
self._command_window, insert_mode=True)
# Enter existing command.
self._auto_key_in(existing_command)
def _ui_loop(self):
"""Command-line UI loop.
Returns:
An exit token of arbitrary type. The token can be None.
"""
while True:
# Enter history command if pointer is in history (> 0):
if self._command_pointer > 0:
existing_command = self._active_command_history[-self._command_pointer]
else:
existing_command = self._pending_command
self._screen_create_command_textbox(existing_command)
command, terminator, pending_command_changed = self._get_user_command()
if terminator in self.CLI_CR_KEYS:
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
elif terminator == self.CLI_TAB_KEY:
tab_completed = self._tab_complete(command)
self._pending_command = tab_completed
self._cmd_ptr = 0
elif pending_command_changed:
self._pending_command = command
return
def _get_user_command(self):
"""Get user command from UI.
Returns:
command: (str) The user-entered command.
terminator: (str) Terminator type for the command.
If command is a normal command entered with the Enter key, the value
will be the key itself. If this is a tab completion call (using the
Tab key), the value will reflect that as well.
pending_command_changed: (bool) If the pending command has changed.
Used during command history navigation.
"""
# First, reset textbox state variables.
self._textbox_curr_terminator = None
self._textbox_pending_command_changed = False
command = self._screen_get_user_command()
command = self._strip_terminator(command)
return (command, self._textbox_curr_terminator,
self._textbox_pending_command_changed)
def _screen_get_user_command(self):
return self._command_textbox.edit(validate=self._on_textbox_keypress)
def _strip_terminator(self, command):
for v in self.CLI_CR_KEYS:
if v < 256:
command = command.replace(chr(v), "")
return command.strip()
def _screen_refresh_size(self):
self._max_y, self._max_x = self._stdscr.getmaxyx()
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
if command:
self._command_history_store.add_command(command)
if (command.startswith(self.REGEX_SEARCH_PREFIX) and
self._curr_unwrapped_output):
if len(command) > len(self.REGEX_SEARCH_PREFIX):
# Command is like "/regex". Perform regex search.
regex = command[len(self.REGEX_SEARCH_PREFIX):]
self._curr_search_regex = regex
self._display_output(self._curr_unwrapped_output, highlight_regex=regex)
elif self._unwrapped_regex_match_lines:
# Command is "/". Continue scrolling down matching lines.
self._display_output(
self._curr_unwrapped_output,
is_refresh=True,
highlight_regex=self._curr_search_regex)
self._command_pointer = 0
self._pending_command = ""
return
elif command.startswith(self.TENSOR_INDICES_NAVIGATION_PREFIX):
indices_str = command[1:].strip()
if indices_str:
try:
indices = command_parser.parse_indices(indices_str)
omitted, line_index, _, _ = tensor_format.locate_tensor_element(
self._curr_wrapped_output, indices)
if not omitted:
self._scroll_output(
self._SCROLL_TO_LINE_INDEX, line_index=line_index)
except Exception as e: # pylint: disable=broad-except
self._error_toast(str(e))
else:
self._error_toast("Empty indices.")
return
prefix, args = self._parse_command(command)
if not prefix:
# Empty command: take no action. Should not exit.
return
screen_info = {"cols": self._max_x}
exit_token = None
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=screen_info)
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
# Clear active command history. Until next up/down history navigation
# occurs, it will stay empty.
self._active_command_history = []
if exit_token is not None:
return exit_token
self._display_output(screen_output)
self._command_pointer = 0
self._pending_command = ""
def _parse_command(self, command):
"""Parse a command string into prefix and arguments.
Args:
command: (str) Command string to be parsed.
Returns:
prefix: (str) The command prefix.
args: (list of str) The command arguments (i.e., not including the
prefix).
"""
command = command.strip()
if not command:
return "", []
command_items = command_parser.parse_command(command)
return command_items[0], command_items[1:]
def _screen_gather_textbox_str(self):
"""Gather the text string in the command text box.
Returns:
(str) the current text string in the command textbox, excluding any
return keys.
"""
txt = self._command_textbox.gather()
return txt.strip()
def _on_textbox_keypress(self, x):
"""Text box key validator: Callback of key strokes.
Handles a user's keypress in the input text box. Translates certain keys to
terminator keys for the textbox to allow its edit() method to return.
Also handles special key-triggered events such as PgUp/PgDown scrolling of
the screen output.
Args:
x: (int) Key code.
Returns:
(int) A translated key code. In most cases, this is identical to the
input x. However, if x is a Return key, the return value will be
CLI_TERMINATOR_KEY, so that the text box's edit() method can return.
Raises:
TypeError: If the input x is not of type int.
"""
if not isinstance(x, int):
raise TypeError("Key validator expected type int, received type %s" %
type(x))
if x in self.CLI_CR_KEYS:
# Make Enter key the terminator
self._textbox_curr_terminator = x
return self.CLI_TERMINATOR_KEY
elif x == self.CLI_TAB_KEY:
self._textbox_curr_terminator = self.CLI_TAB_KEY
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_PPAGE:
self._scroll_output(self._SCROLL_UP)
return x
elif x == curses.KEY_NPAGE:
self._scroll_output(self._SCROLL_DOWN)
return x
elif x == curses.KEY_HOME:
self._scroll_output(self._SCROLL_HOME)
return x
elif x == curses.KEY_END:
self._scroll_output(self._SCROLL_END)
return x
elif x in [curses.KEY_UP, curses.KEY_DOWN]:
# Command history navigation.
if not self._active_command_history:
hist_prefix = self._screen_gather_textbox_str()
self._active_command_history = (
self._command_history_store.lookup_prefix(
hist_prefix, self._command_history_limit))
if self._active_command_history:
if x == curses.KEY_UP:
if self._command_pointer < len(self._active_command_history):
self._command_pointer += 1
elif x == curses.KEY_DOWN:
if self._command_pointer > 0:
self._command_pointer -= 1
else:
self._command_pointer = 0
self._textbox_curr_terminator = x
# Force return from the textbox edit(), so that the textbox can be
# redrawn with a history command entered.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_RESIZE:
# Respond to terminal resize.
self._screen_refresh_size()
self._init_layout()
self._screen_create_command_window()
if self._curr_unwrapped_output is not None:
# Force render screen output again, under new screen size.
self._output_pad = self._display_output(
self._curr_unwrapped_output, is_refresh=True)
# Force return from the textbox edit(), so that the textbox can be
# redrawn.
return self.CLI_TERMINATOR_KEY
else:
# Mark the pending command as modified.
self._textbox_pending_command_changed = True
# Invalidate active command history.
self._command_pointer = 0
self._active_command_history = []
return x
def _title(self, title, title_color=None):
"""Display title.
Args:
title: (str) The title to display.
title_color: (str) Color of the title, e.g., "yellow".
"""
# Pad input title str with "-" and space characters to make it pretty.
self._title_line = "--- %s " % title
if len(self._title_line) < self._max_x:
self._title_line += "-" * (self._max_x - len(self._title_line))
self._screen_draw_text_line(
self._title_row, self._title_line, color=title_color)
def _auto_key_in(self, command):
"""Automatically key in a command to the command Textbox.
Args:
command: The command, as a string.
"""
for c in command:
self._command_textbox.do_command(ord(c))
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
"""Render a line of text on the screen.
Args:
row: (int) Row index.
line: (str) The line content.
attr: curses font attribute.
color: (str) font foreground color name.
Raises:
TypeError: If row is not of type int.
"""
if not isinstance(row, int):
raise TypeError("Invalid type in row")
if len(line) > self._max_x:
line = line[:self._max_x]
if color is None:
self._stdscr.addstr(row, 0, line, attr)
else:
self._stdscr.addstr(row, 0, line, self._color_pairs[color])
self._screen_refresh()
def _screen_new_output_pad(self, rows, cols):
"""Generate a new pad on the screen.
Args:
rows: (int) Number of rows the pad will have: not limited to screen size.
cols: (int) Number of columns the pad will have: not limited to screen
size.
Returns:
A curses textpad object.
"""
return curses.newpad(rows, cols)
def _screen_display_output(self, output):
"""Actually render text output on the screen.
Wraps the lines according to screen width. Pad lines below according to
screen height so that the user can scroll the output to a state where
the last non-empty line is on the top of the screen. Then renders the
lines on the screen.
Args:
output: (RichTextLines) text lines to display on the screen. These lines
may have widths exceeding the screen width. This method will take care
of the wrapping.
Returns:
(List of int) A list of line indices, in the wrapped output, where there
are regex matches.
"""
# Wrap the output lines according to screen width.
self._curr_wrapped_output, wrapped_line_indices = (
debugger_cli_common.wrap_rich_text_lines(output, self._max_x - 1))
# Append lines to curr_wrapped_output so that the user can scroll to a
# state where the last text line is on the top of the output area.
self._curr_wrapped_output.lines.extend([""] * (self._output_num_rows - 1))
# Limit number of lines displayed to avoid curses overflow problems.
if self._curr_wrapped_output.num_lines() > self.max_output_lines:
self._curr_wrapped_output = self._curr_wrapped_output.slice(
0, self.max_output_lines)
self._curr_wrapped_output.lines.append("Output cut off at %d lines!" %
self.max_output_lines)
self._curr_wrapped_output.font_attr_segs[self.max_output_lines] = [
(0, len(output.lines[-1]), "magenta")
]
(self._output_pad, self._output_pad_height,
self._output_pad_width) = self._display_lines(self._curr_wrapped_output,
self._output_num_rows)
# The indices of lines with regex matches (if any) need to be mapped to
# indices of wrapped lines.
return [
wrapped_line_indices[line]
for line in self._unwrapped_regex_match_lines
]
def _display_output(self, output, is_refresh=False, highlight_regex=None):
"""Display text output in a scrollable text pad.
This method does some preprocessing on the text lines, render them on the
screen and scroll to the appropriate line. These are done according to regex
highlighting requests (if any), scroll-to-next-match requests (if any),
and screen refresh requests (if any).
TODO(cais): Separate these unrelated request to increase clarity and
maintainability.
Args:
output: A RichTextLines object that is the screen output text.
is_refresh: (bool) Is this a refreshing display with existing output.
highlight_regex: (str) Optional string representing the regex used to
search and highlight in the current screen output.
"""
if highlight_regex:
try:
output = debugger_cli_common.regex_find(
output, highlight_regex, font_attr=self._SEARCH_HIGHLIGHT_FONT_ATTR)
except ValueError as e:
self._error_toast(str(e))
return
if not is_refresh:
# Perform new regex search on the current output.
self._unwrapped_regex_match_lines = output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY]
else:
# Continue scrolling down.
self._output_pad_row += 1
else:
self._curr_unwrapped_output = output
self._unwrapped_regex_match_lines = []
# Display output on the screen.
wrapped_regex_match_lines = self._screen_display_output(output)
# Now that the text lines are displayed on the screen scroll to the
# appropriate line according to previous scrolling state and regex search
# and highlighting state.
if highlight_regex:
next_match_line = -1
for match_line in wrapped_regex_match_lines:
if match_line >= self._output_pad_row:
next_match_line = match_line
break
if next_match_line >= 0:
self._scroll_output(
self._SCROLL_TO_LINE_INDEX, line_index=next_match_line)
else:
# Regex search found no match >= current line number. Display message
# stating as such.
self._toast("Pattern not found", color=self._ERROR_TOAST_COLOR_PAIR)
elif is_refresh:
self._scroll_output(self._SCROLL_REFRESH)
else:
self._output_pad_row = 0
self._scroll_output(self._SCROLL_HOME)
def _display_lines(self, output, min_num_rows):
"""Display RichTextLines object on screen.
Args:
output: A RichTextLines object.
min_num_rows: (int) Minimum number of output rows.
Returns:
1) The text pad object used to display the text.
2) (int) number of rows of the text pad, which may exceed screen size.
3) (int) number of columns of the text pad.
Raises:
ValueError: If input argument "output" is invalid.
"""
if not isinstance(output, debugger_cli_common.RichTextLines):
raise ValueError(
"Output is required to be an instance of RichTextLines, but is not.")
self._screen_refresh()
# Number of rows the output area will have.
rows = max(min_num_rows, len(output.lines))
# Size of the output pad, which may exceed screen size and require
# scrolling.
cols = self._max_x - 1
# Create new output pad.
pad = self._screen_new_output_pad(rows, cols)
for i in xrange(len(output.lines)):
if i in output.font_attr_segs:
self._screen_add_line_to_output_pad(
pad, i, output.lines[i], color_segments=output.font_attr_segs[i])
else:
self._screen_add_line_to_output_pad(pad, i, output.lines[i])
return pad, rows, cols
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
"""Render a line in a text pad.
Assumes: segments in color_segments are sorted in ascending order of the
beginning index.
Note: Gaps between the segments are allowed and will be fixed in with a
default color.
Args:
pad: The text pad to render the line in.
row: Row index, as an int.
txt: The text to be displayed on the specified row, as a str.
color_segments: A list of 3-tuples. Each tuple represents the beginning
and the end of a color segment, in the form of a right-open interval:
[start, end). The last element of the tuple is a color string, e.g.,
"red".
Raisee:
TypeError: If color_segments is not of type list.
"""
if not color_segments:
pad.addstr(row, 0, txt, self._default_color_pair)
return
if not isinstance(color_segments, list):
raise TypeError("Input color_segments needs to be a list, but is not.")
all_segments = []
all_color_pairs = []
# Process the beginning.
if color_segments[0][0] == 0:
pass
else:
all_segments.append((0, color_segments[0][0]))
all_color_pairs.append(self._default_color_pair)
for (curr_start, curr_end, curr_color), (next_start, _, _) in zip(
color_segments, color_segments[1:] + [(len(txt), None, None)]):
all_segments.append((curr_start, curr_end))
all_color_pairs.append(
self._color_pairs.get(curr_color, self._default_color_pair))
if curr_end < next_start:
# Fill in the gap with the default color.
all_segments.append((curr_end, next_start))
all_color_pairs.append(self._default_color_pair)
# Finally, draw all the segments.
for segment, color_pair in zip(all_segments, all_color_pairs):
pad.addstr(row, segment[0], txt[segment[0]:segment[1]], color_pair)
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pad.refresh(viewport_top, viewport_left, screen_location_top,
screen_location_left, screen_location_bottom,
screen_location_right)
def _scroll_output(self, direction, line_index=None):
"""Scroll the output pad.
Args:
direction: _SCROLL_REFRESH, _SCROLL_UP, _SCROLL_DOWN, _SCROLL_HOME or
_SCROLL_END, _SCROLL_TO_LINE_INDEX
line_index: (int) Specifies the zero-based line index to scroll to.
Applicable only if direction is _SCROLL_TO_LINE_INDEX.
Raises:
ValueError: On invalid scroll direction.
TypeError: If line_index is not int and direction is
_SCROLL_TO_LINE_INDEX.
"""
if not self._output_pad:
# No output pad is present. Do nothing.
return
if direction == self._SCROLL_REFRESH:
pass
elif direction == self._SCROLL_UP:
# Scroll up
if self._output_pad_row - 1 >= 0:
self._output_pad_row -= 1
elif direction == self._SCROLL_DOWN:
# Scroll down
if self._output_pad_row + 1 < (
self._output_pad_height - self._output_pad_screen_height):
self._output_pad_row += 1
elif direction == self._SCROLL_HOME:
# Scroll to top
self._output_pad_row = 0
elif direction == self._SCROLL_END:
# Scroll to bottom
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
elif direction == self._SCROLL_TO_LINE_INDEX:
if not isinstance(line_index, int):
raise TypeError("Invalid line_index type (%s) under mode %s" %
(type(line_index), self._SCROLL_TO_LINE_INDEX))
self._output_pad_row = line_index
else:
raise ValueError("Unsupported scroll mode: %s" % direction)
# Actually scroll the output pad: refresh with new location.
self._screen_scroll_output_pad(self._output_pad, self._output_pad_row, 0,
self._output_pad_screen_location.top,
self._output_pad_screen_location.left,
self._output_pad_screen_location.bottom,
self._output_pad_screen_location.right)
if self._output_pad_height > self._output_pad_screen_height + 1:
# Display information about the scrolling of tall screen output.
self._scroll_info = "--- Scroll: %.2f%% " % (100.0 * (min(
1.0,
float(self._output_pad_row) /
(self._output_pad_height - self._output_pad_screen_height - 1))))
self._output_array_pointer_indices = self._show_array_indices()
# Add array indices information to scroll message.
if self._output_array_pointer_indices:
if self._output_array_pointer_indices[0]:
self._scroll_info += self._format_indices(
self._output_array_pointer_indices[0])
self._scroll_info += "-"
if self._output_array_pointer_indices[-1]:
self._scroll_info += self._format_indices(
self._output_array_pointer_indices[-1])
self._scroll_info += " "
if len(self._scroll_info) < self._max_x:
self._scroll_info += "-" * (self._max_x - len(self._scroll_info))
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
else:
# Screen output is not tall enough to cause scrolling.
self._scroll_info = "-" * self._max_x
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
def _format_indices(self, indices):
# Remove the spaces to make it compact.
return repr(indices).replace(" ", "")
def _show_array_indices(self):
"""Show array indices for the lines at the top and bottom of the output.
For the top line and bottom line of the output display area, show the
element indices of the array being displayed.
Returns:
If either the top of the bottom row has any matching array indices,
a dict from line index (0 being the top of the display area, -1
being the bottom of the display area) to array element indices. For
example:
{0: [0, 0], -1: [10, 0]}
Otherwise, None.
"""
indices_top = self._show_array_index_at_line(0)
bottom_line_index = (self._output_pad_screen_location.bottom -
self._output_pad_screen_location.top - 1)
indices_bottom = self._show_array_index_at_line(bottom_line_index)
if indices_top or indices_bottom:
return {0: indices_top, -1: indices_bottom}
else:
return None
def _show_array_index_at_line(self, line_index):
"""Show array indices for the specified line in the display area.
Uses the line number to array indices map in the annotations field of the
RichTextLines object being displayed.
If the displayed RichTextLines object does not contain such a mapping,
will do nothing.
Args:
line_index: (int) 0-based line index from the top of the display area.
For example,if line_index == 0, this method will display the array
indices for the line currently at the top of the display area.
Returns:
(list) The array indices at the specified line, if available. None, if
not available.
"""
# Examine whether the index information is available for the specified line
# number.
pointer = self._output_pad_row + line_index
if pointer in self._curr_wrapped_output.annotations:
indices = self._curr_wrapped_output.annotations[pointer]["i0"]
array_indices_str = self._format_indices(indices)
array_indices_info = "@" + array_indices_str
self._toast(
array_indices_info,
color=self._ARRAY_INDICES_COLOR_PAIR,
line_index=self._output_pad_screen_location.top + line_index)
return indices
else:
return None
def _tab_complete(self, command_str):
"""Perform tab completion.
Obtains tab completion candidates.
If there are no candidates, return command_str and take no other actions.
If there are candidates, display the candidates on screen and return
command_str + (common prefix of the candidates).
Args:
command_str: (str) The str in the command input textbox when Tab key is
hit.
Returns:
(str) Completed string. Could be the same as command_str if no completion
candidate is available. If candidate(s) are available, return command_str
appended by the common prefix of the candidates.
"""
command_str = command_str.lstrip()
if not command_str:
# Empty (top-level) context.
context = ""
prefix = ""
items = []
else:
items = command_str.split(" ")
if len(items) == 1:
# Single word: top-level context.
context = ""
prefix = items[0]
else:
# Multiple words.
context = items[0]
prefix = items[-1]
candidates, common_prefix = self._tab_completion_registry.get_completions(
context, prefix)
if candidates and len(candidates) > 1:
self._display_candidates(candidates)
else:
# In the case of len(candidates) == 1, the single completion will be
# entered to the textbox automatically. So there is no need to show any
# candidates.
self._display_candidates([])
if common_prefix:
# Common prefix is not None and non-empty. The completed string will
# incorporate the common prefix.
return " ".join(items[:-1] + [common_prefix])
else:
return " ".join(items)
def _display_candidates(self, candidates):
"""Show candidates (e.g., tab-completion candidates) on multiple lines.
Args:
candidates: (list of str) candidates.
"""
if self._curr_unwrapped_output:
# Force refresh screen output.
self._scroll_output(self._SCROLL_REFRESH)
if not candidates:
return
candidates_prefix = "Candidates: "
candidates_line = candidates_prefix + " ".join(candidates)
candidates_output = debugger_cli_common.RichTextLines(
candidates_line,
font_attr_segs={
0: [(len(candidates_prefix), len(candidates_line), "yellow")]
})
candidates_output, _ = debugger_cli_common.wrap_rich_text_lines(
candidates_output, self._max_x - 2)
# Calculate how many lines the candidate text should occupy. Limit it to
# a maximum value.
candidates_num_rows = min(
len(candidates_output.lines), self._candidates_max_lines)
self._candidates_top_row = (
self._candidates_bottom_row - candidates_num_rows + 1)
# Render the candidate text on screen.
pad, _, _ = self._display_lines(candidates_output, 0)
self._screen_scroll_output_pad(
pad, 0, 0, self._candidates_top_row, 0,
self._candidates_top_row + candidates_num_rows - 1, self._max_x - 1)
def _toast(self, message, color=None, line_index=None):
"""Display a one-line message on the screen.
By default, the toast is displayed in the line right above the scroll bar.
But the line location can be overridden with the line_index arg.
Args:
message: (str) the message to display.
color: (str) optional color attribute for the message.
line_index: (int) line index.
"""
pad, _, _ = self._display_lines(
debugger_cli_common.RichTextLines(
message,
font_attr_segs={0: [(0, len(message), color or "white")]}),
0)
right_end = min(len(message), self._max_x - 1)
if line_index is None:
line_index = self._output_scroll_row - 1
self._screen_scroll_output_pad(pad, 0, 0, line_index, 0, line_index,
right_end)
def _error_toast(self, message):
"""Display a one-line error message on screen.
Args:
message: The error message, without the preceding "ERROR: " substring.
"""
self._toast(
self.ERROR_MESSAGE_PREFIX + message, color=self._ERROR_TOAST_COLOR_PAIR)
def _interrupt_handler(self, signal_num, frame):
_ = signal_num # Unused.
_ = frame # Unused.
self._screen_terminate()
print("\ntfdbg: caught SIGINT; calling sys.exit(1).", file=sys.stderr)
sys.exit(1)
|
|
r'''
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#! /usr/bin/env python
import time
import random
import logging
import logging.handlers
import argparse
import threading
import textwrap
import atexit
import subprocess
import signal
import ConfigParser
import collections
import sys
from nogotofail.mitm.blame import Server as AppBlameServer
from nogotofail.mitm.connection import Server, RedirectConnection, SocksConnection, TproxyConnection
from nogotofail.mitm.connection import handlers
from nogotofail.mitm.connection.handlers import preconditions
from nogotofail.mitm.looper import MitmLoop
from nogotofail.mitm.util import routing, extras
LOG_FORMAT = logging.Formatter("%(asctime)-15s [%(levelname)s] %(message)s")
EVENT_FORMAT = logging.Formatter("%(message)s")
logger = logging.getLogger("nogotofail.mitm")
event_logger = logging.getLogger("event")
traffic_logger = logging.getLogger("traffic")
def build_selector(MITM_all=False):
def handler_selector(connection, app_blame):
if not MITM_all and not app_blame.client_available(
connection.client_addr):
return handlers.base.BaseConnectionHandler
return handlers.connection.LoggingHandler
return handler_selector
def build_ssl_selector(default_ssl_handlers, prob_MITM=0.5, MITM_all=False):
def attack_selector(connection, client_hello, app_blame):
if not MITM_all and not app_blame.client_available(
connection.client_addr):
return None
client_info = app_blame.clients.get(connection.client_addr)
client_info = client_info.info if client_info else None
if client_info:
attack_prob = client_info.get("Attack-Probability", prob_MITM)
ssl_handlers = client_info.get("Attacks", default_ssl_handlers)
else:
attack_prob = prob_MITM
ssl_handlers = default_ssl_handlers
if not ssl_handlers:
return None
if random.random() < attack_prob:
return random.choice(ssl_handlers)
return None
return attack_selector
def build_data_selector(default_handlers, MITM_all, prob_attack=0.5):
internal = handlers.data.handlers.internal
def data_selector(connection, app_blame):
if not MITM_all and not app_blame.client_available(
connection.client_addr):
return internal + []
# Figure out our possible handlers
client_info = app_blame.clients.get(connection.client_addr)
client_info = client_info.info if client_info else None
if client_info:
handlers = client_info.get("Data-Attacks", default_handlers)
attack_prob = client_info.get("Attack-Probability", prob_attack)
else:
handlers = default_handlers
attack_prob = prob_attack
# Split handlers into passive/active
passive, active = [], []
for handler in handlers:
(active, passive)[handler.passive].append(handler)
# 1-p chance of not using any data attacks on a connection.
if random.random() >= attack_prob:
active = []
return internal + passive + active
return data_selector
def build_server(port, blame, selector, ssl_selector, data_selector, block, ipv6, cls):
return Server(port, blame, handler_selector=selector,
ssl_handler_selector=ssl_selector,
data_handler_selector=data_selector,
block_non_clients=block,
ipv6=ipv6,
connection_class=cls)
def build_blame(port, cert, probability, attacks, data_attacks):
return AppBlameServer(port, cert, probability, attacks, data_attacks)
def set_redirect_rules(args):
port = args.port
ipv6 = args.ipv6
routing.enable_redirect_rules(port, ipv6=ipv6)
atexit.register(routing.disable_redirect_rules, ipv6=ipv6)
def set_tproxy_rules(args):
port = args.port
ipv6 = args.ipv6
routing.enable_tproxy_rules(port, ipv6=ipv6)
atexit.register(routing.disable_tproxy_rules, ipv6=ipv6)
# Traffic capture modes
Mode = collections.namedtuple("Mode", ["cls", "setup", "description"])
modes = {
"redirect": Mode(RedirectConnection,
set_redirect_rules,
"Use Iptables REDIRECT to route traffic. Ipv6 support is limited in this mode."),
"tproxy": Mode(TproxyConnection,
set_tproxy_rules,
"Use iptables TPROXY/mark to route traffic"),
"socks": Mode(SocksConnection,
None,
"Listen as a SOCKS server to route traffic"),
}
default_mode = "tproxy"
def parse_args():
all_attacks = handlers.connection.handlers.map.keys()
default_attacks = [h.name for h in handlers.connection.handlers.default]
all_data = handlers.data.handlers.map.keys()
default_data = [h.name for h in handlers.data.handlers.default]
# Check for a config file
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-c", "--config")
args, argv = parser.parse_known_args()
if args.config:
config = ConfigParser.SafeConfigParser()
config.read(args.config)
config = dict(config.items("nogotofail.mitm"))
if "attacks" in config:
config["attacks"] = config["attacks"].split(" ")
if "data" in config:
config["data"] = config["data"].split(" ")
else:
config = {}
modes_str = ("Supported modes:\n" +
"\n".join(["\n\t".join(
textwrap.wrap("%s - %s" % (name, mode.description), 80))
for name, mode in modes.items()]))
attacks_str = ("Supported attacks:\n" +
"\n".join(["\n\t".join(
textwrap.wrap("%s - %s" % (name, handler.description), 80))
for name, handler in handlers.connection.handlers.map.items()]))
data_str = ("Supported data handlers:\n" +
"\n".join(["\n\t".join(
textwrap.wrap("%s - %s" % (name, handler.description), 80))
for name, handler in handlers.data.handlers.map.items()]))
epilog = "\n\n".join([modes_str, attacks_str, data_str])
parser = (
argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter, epilog=epilog))
# Technically --config is eaten by the previous parser, this is just to make
# it show up in --help.
parser.add_argument(
"-c", "--config", help="Configuration file", metavar="FILE")
parser.add_argument(
"-p", "--probability", help="probably of attacking a SSL connection",
action="store", type=float, default=0.5)
parser.add_argument(
"-d", "--debug", help="Print debug output", action="store_true",
default=False)
parser.add_argument(
"-b", "--bridge", help="Bridge connections from hosts without a client",
action="store_false", default=True, dest="all")
parser.add_argument(
"-l", "--logfile", help="Log output file", action="store")
parser.add_argument(
"-e", "--eventlogfile", help="Event log output file", action="store")
parser.add_argument(
"-t", "--trafficfile", help="Traffic output file", action="store")
parser.add_argument(
"-q", "--quiet",
help="Quiet output. Only prints important messages.",
action="store_true", default=False)
parser.add_argument(
"--port", help="Port to bind the mitm to", action="store",
type=int, default=8080)
parser.add_argument(
"--cport", help="Port to listen for nogotofail clients on", action="store",
type=int, default=8443)
parser.add_argument(
"-6", "--ipv6",
help=("Route IPv6 traffic. "
"Requires support for ip6tables NAT redirect when in redirect mode (iptables > 1.4.17)"),
default=False, action="store_true")
parser.add_argument(
"-A", "--attacks",
help="Connection attacks to run. Supported attacks are " +
", ".join(all_attacks),
choices=handlers.connection.handlers.map, nargs="+", metavar="ATTACK",
action="store", default=default_attacks)
parser.add_argument(
"-D", "--data",
help="Data attacks to run. Supported attacks are " +
", ".join(all_data), choices=handlers.data.handlers.map, nargs="+",
metavar="ATTACK", action="store", default=default_data)
parser.add_argument(
"--serverssl", help="Run the app blame server with SSL using PEMFILE",
metavar="PEMFILE", action="store")
parser.add_argument(
"--block", help="Block connections with unknown blame info",
action="store_true", default=False)
parser.add_argument(
"--mode", help="Traffic capture mode. Options are " + ", ".join(modes.keys()),
choices=modes, metavar="MODE", action="store", default=default_mode)
parser.add_argument(
"-x", "--extrasdir", help="Directory containing extra files required by handlers",
default = "./")
parser.set_defaults(**config)
return parser.parse_args(argv)
def sigterm_handler(num, frame):
"""Gracefully exit on a SIGTERM.
atexit isn't called on a SIGTERM, causing our cleanup code not to be called.
instead catch the sigterm and call sys.exit, which will call our cleanup
"""
sys.exit()
def setup_logging(args):
"""Setup logging handlers based on arguments
"""
handler = logging.StreamHandler()
handler.setFormatter(LOG_FORMAT)
if args.debug:
handler.setLevel(logging.DEBUG)
elif args.quiet:
handler.setLevel(logging.WARNING)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
if args.logfile:
handler = logging.handlers.WatchedFileHandler(args.logfile)
handler.setFormatter(LOG_FORMAT)
if args.debug:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
if args.eventlogfile:
handler = logging.handlers.WatchedFileHandler(args.eventlogfile)
else:
handler = logging.NullHandler()
handler.setLevel(logging.INFO)
event_logger.addHandler(handler)
event_logger.setLevel(logging.INFO)
if args.trafficfile:
handler = logging.handlers.WatchedFileHandler(args.trafficfile)
else:
handler = logging.NullHandler()
handler.setLevel(logging.INFO)
traffic_logger.addHandler(handler)
traffic_logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
def run():
args = parse_args()
setup_logging(args)
extras.extras_dir = args.extrasdir
selector = build_selector(args.all)
attack_cls = [handlers.connection.handlers.map[name]
for name in args.attacks]
attack_cls = preconditions.filter_preconditions(attack_cls, logger)
data_cls = [handlers.data.handlers.map[name] for name in args.data]
data_cls = preconditions.filter_preconditions(data_cls, logger)
ssl_selector = build_ssl_selector(attack_cls, args.probability, args.all)
data_selector = build_data_selector(data_cls, args.all, prob_attack=args.probability)
logger.info("Starting...")
try:
signal.signal(signal.SIGTERM, sigterm_handler)
mode = modes[args.mode]
if mode.setup:
mode.setup(args)
blame = (
build_blame(
args.cport, args.serverssl, args.probability, attack_cls,
data_cls))
server = (
build_server(
args.port, blame, selector, ssl_selector,
data_selector, args.block, args.ipv6, mode.cls))
blame.start_listening()
server.start_listening()
# Run the main loop
looper = MitmLoop(blame, server)
looper.run()
except KeyboardInterrupt:
server.shutdown()
blame.shutdown()
except Exception as e:
logger.exception("Uncaught top level exception!")
logger.fatal("EXITING")
if __name__ == "__main__":
run()
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO as StringIO
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class CreateRegisterImagesTest(base.BaseV1ImageTest):
"""Here we test the registration and creation of images."""
@test.attr(type='gate')
def test_register_then_upload(self):
# Register, then upload an image
properties = {'prop1': 'val1'}
_, body = self.create_image(name='New Name',
container_format='bare',
disk_format='raw',
is_public=False,
properties=properties)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
# Now try uploading an image file
image_file = StringIO.StringIO(data_utils.random_bytes())
_, body = self.client.update_image(image_id, data=image_file)
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@test.attr(type='gate')
def test_register_remote_image(self):
# Register a new remote image
_, body = self.create_image(name='New Remote Image',
container_format='bare',
disk_format='raw', is_public=False,
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
properties = body.get('properties')
self.assertEqual(properties['key1'], 'value1')
self.assertEqual(properties['key2'], 'value2')
@test.attr(type='gate')
def test_register_http_image(self):
_, body = self.create_image(name='New Http Image',
container_format='bare',
disk_format='raw', is_public=False,
copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
self.client.get_image(image_id)
@test.attr(type='gate')
def test_register_image_with_min_ram(self):
# Register an image with min ram
properties = {'prop1': 'val1'}
_, body = self.create_image(name='New_image_with_min_ram',
container_format='bare',
disk_format='raw',
is_public=False,
min_ram=40,
properties=properties)
self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
self.client.delete_image(body['id'])
class ListImagesTest(base.BaseV1ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
img1 = cls._create_remote_image('one', 'bare', 'raw')
img2 = cls._create_remote_image('two', 'ami', 'ami')
img3 = cls._create_remote_image('dup', 'bare', 'raw')
img4 = cls._create_remote_image('dup', 'bare', 'raw')
img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
cls.created_set = set(cls.created_images)
# 4x-4x remote image
cls.remote_set = set((img1, img2, img3, img4))
cls.standard_set = set((img5, img6, img7, img8))
# 5x bare, 3x ami
cls.bare_set = set((img1, img3, img4, img7, img8))
cls.ami_set = set((img2, img5, img6))
# 1x with size 42
cls.size42_set = set((img5,))
# 3x with size 142
cls.size142_set = set((img6, img7, img8,))
# dup named
cls.dup_set = set((img3, img4))
@classmethod
def _create_remote_image(cls, name, container_format, disk_format):
"""
Create a new remote image and return the ID of the newly-registered
image
"""
name = 'New Remote Image %s' % name
location = CONF.image.http_image
_, image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False,
location=location)
image_id = image['id']
return image_id
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
image_file = StringIO.StringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
_, image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file)
image_id = image['id']
return image_id
@test.attr(type='gate')
def test_index_no_params(self):
# Simple test to see all fixture images returned
_, images_list = self.client.image_list()
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
self.assertIn(image_id, image_list)
@test.attr(type='gate')
def test_index_disk_format(self):
_, images_list = self.client.image_list(disk_format='ami')
for image in images_list:
self.assertEqual(image['disk_format'], 'ami')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.ami_set <= result_set)
self.assertFalse(self.created_set - self.ami_set <= result_set)
@test.attr(type='gate')
def test_index_container_format(self):
_, images_list = self.client.image_list(container_format='bare')
for image in images_list:
self.assertEqual(image['container_format'], 'bare')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.bare_set <= result_set)
self.assertFalse(self.created_set - self.bare_set <= result_set)
@test.attr(type='gate')
def test_index_max_size(self):
_, images_list = self.client.image_list(size_max=42)
for image in images_list:
self.assertTrue(image['size'] <= 42)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size42_set <= result_set)
self.assertFalse(self.created_set - self.size42_set <= result_set)
@test.attr(type='gate')
def test_index_min_size(self):
_, images_list = self.client.image_list(size_min=142)
for image in images_list:
self.assertTrue(image['size'] >= 142)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size142_set <= result_set)
self.assertFalse(self.size42_set <= result_set)
@test.attr(type='gate')
def test_index_status_active_detail(self):
_, images_list = self.client.image_list_detail(status='active',
sort_key='size',
sort_dir='desc')
top_size = images_list[0]['size'] # We have non-zero sized images
for image in images_list:
size = image['size']
self.assertTrue(size <= top_size)
top_size = size
self.assertEqual(image['status'], 'active')
@test.attr(type='gate')
def test_index_name(self):
_, images_list = self.client.image_list_detail(
name='New Remote Image dup')
result_set = set(map(lambda x: x['id'], images_list))
for image in images_list:
self.assertEqual(image['name'], 'New Remote Image dup')
self.assertTrue(self.dup_set <= result_set)
self.assertFalse(self.created_set - self.dup_set <= result_set)
class ListSnapshotImagesTest(base.BaseV1ImageTest):
@classmethod
def resource_setup(cls):
# This test class only uses nova v3 api to create snapshot
# as the similar test which uses nova v2 api already exists
# in nova v2 compute images api tests.
# Since nova v3 doesn't have images api proxy, this test
# class was added in the image api tests.
if not CONF.compute_feature_enabled.api_v3:
skip_msg = ("%s skipped as nova v3 api is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
super(ListSnapshotImagesTest, cls).resource_setup()
cls.servers_client = cls.os.servers_v3_client
cls.servers = []
# We add a few images here to test the listing functionality of
# the images API
cls.snapshot = cls._create_snapshot(
'snapshot', CONF.compute.image_ref,
CONF.compute.flavor_ref)
cls.snapshot_set = set((cls.snapshot,))
image_file = StringIO.StringIO('*' * 42)
_, image = cls.create_image(name="Standard Image",
container_format='ami',
disk_format='ami',
is_public=False, data=image_file)
cls.image_id = image['id']
cls.client.wait_for_image_status(image['id'], 'active')
@classmethod
def resource_cleanup(cls):
for server in getattr(cls, "servers", []):
cls.servers_client.delete_server(server['id'])
super(ListSnapshotImagesTest, cls).resource_cleanup()
@classmethod
def _create_snapshot(cls, name, image_id, flavor, **kwargs):
_, server = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)
cls.servers.append(server)
cls.servers_client.wait_for_server_status(
server['id'], 'ACTIVE')
resp, _ = cls.servers_client.create_image(server['id'], name)
image_id = data_utils.parse_image_id(resp['location'])
cls.created_images.append(image_id)
cls.client.wait_for_image_status(image_id,
'active')
return image_id
@test.attr(type='gate')
@test.services('compute')
def test_index_server_id(self):
# The images should contain images filtered by server id
_, images = self.client.image_list_detail(
{'instance_uuid': self.servers[0]['id']})
result_set = set(map(lambda x: x['id'], images))
self.assertEqual(self.snapshot_set, result_set)
@test.attr(type='gate')
@test.services('compute')
def test_index_type(self):
# The list of servers should be filtered by image type
params = {'image_type': 'snapshot'}
_, images = self.client.image_list_detail(params)
result_set = set(map(lambda x: x['id'], images))
self.assertIn(self.snapshot, result_set)
@test.attr(type='gate')
@test.services('compute')
def test_index_limit(self):
# Verify only the expected number of results are returned
_, images = self.client.image_list_detail(limit=1)
self.assertEqual(1, len(images))
@test.attr(type='gate')
@test.services('compute')
def test_index_by_change_since(self):
# Verify an update image is returned
# Becoming ACTIVE will modify the updated time
# Filter by the image's created time
_, image = self.client.get_image_meta(self.snapshot)
self.assertEqual(self.snapshot, image['id'])
_, images = self.client.image_list_detail(
changes_since=image['updated_at'])
result_set = set(map(lambda x: x['id'], images))
self.assertIn(self.image_id, result_set)
self.assertNotIn(self.snapshot, result_set)
class UpdateImageMetaTest(base.BaseV1ImageTest):
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image.
"""
image_file = StringIO.StringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
_, image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file,
properties={'key1': 'value1'})
image_id = image['id']
return image_id
@test.attr(type='gate')
def test_list_image_metadata(self):
# All metadata key/value pairs for an image should be returned
_, resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'value1'}
self.assertEqual(expected, resp_metadata['properties'])
@test.attr(type='gate')
def test_update_image_metadata(self):
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
_, metadata = self.client.get_image_meta(self.image_id)
self.assertEqual(metadata['properties'], {'key1': 'value1'})
metadata['properties'].update(req_metadata)
_, metadata = self.client.update_image(
self.image_id, properties=metadata['properties'])
_, resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'alt1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata['properties'])
|
|
"""
This module contains the base ``Instance`` classes that concrete classes
inherit from. Specifically, there are three classes:
1. ``Instance``, that just exists as a base type with no functionality
2. ``TextInstance``, which adds a ``words()`` method and a method to convert
strings to indices using a DataIndexer.
3. ``IndexedInstance``, which is a ``TextInstance`` that has had all of its
strings converted into indices.
This class has methods to deal with padding (so that sequences all have the
same length) and converting an ``Instance`` into a set of Numpy arrays
suitable for use with Keras.
As this codebase is dealing mostly with textual question answering, pretty much
all of the concrete ``Instance`` types will have both a ``TextInstance`` and a
corresponding ``IndexedInstance``, which you can see in the individual files
for each ``Instance`` type.
"""
import itertools
from typing import Any, Callable, Dict, List
from ...common.params import Params
from ..tokenizers import tokenizers
from ..data_indexer import DataIndexer
class Instance:
"""
A data instance, used either for training a neural network or for testing one.
Parameters
----------
label : Any
Any kind of label that you might want to predict in a model. Could be a class label, a
tag sequence, a character span in a passage, etc.
index : int, optional
Used for matching instances with other data, such as background
sentences.
"""
def __init__(self, label, index: int=None):
self.label = label
self.index = index
class TextInstance(Instance):
"""
An ``Instance`` that has some attached text, typically either a sentence
or a logical form. This is called a ``TextInstance`` because the
individual tokens here are encoded as strings, and we can
get a list of strings out when we ask what words show up in the instance.
We use these kinds of instances to fit a ``DataIndexer`` (i.e., deciding
which words should be mapped to an unknown token); to use them in training
or testing, we need to first convert them into ``IndexedInstances``.
In order to actually convert text into some kind of indexed sequence,
we rely on a ``TextEncoder``. There are several ``TextEncoder`` subclasses,
that will let you use word token sequences, character sequences, and other
options. By default we use word tokens. You can override this by setting
the ``encoder`` class variable.
"""
tokenizer = tokenizers['words'](Params({}))
def __init__(self, label, index: int=None):
super(TextInstance, self).__init__(label, index)
def _words_from_text(self, text: str) -> Dict[str, List[str]]:
return self.tokenizer.get_words_for_indexer(text)
def _index_text(self, text: str, data_indexer: DataIndexer) -> List[int]:
return self.tokenizer.index_text(text, data_indexer)
def words(self) -> Dict[str, List[str]]:
"""
Returns a list of all of the words in this instance, contained in a
namespace dictionary.
This is mainly used for computing word counts when fitting a word
vocabulary on a dataset. The namespace dictionary allows you to have
several embedding matrices with different vocab sizes, e.g., for words
and for characters (in fact, words and characters are the only use
cases I can think of for now, but this allows you to do other more
crazy things if you want). You can call the namespaces whatever you
want, but if you want the ``DataIndexer`` to work correctly without
namespace arguments, you should use the key 'words' to represent word
tokens.
Returns
-------
namespace : Dictionary of {str: List[str]}
The ``str`` key refers to vocabularies, and the ``List[str]``
should contain the tokens in that vocabulary. For example, you
should use the key ``words`` to represent word tokens, and the
correspoding value in the dictionary would be a list of all the
words in the instance.
"""
raise NotImplementedError
def to_indexed_instance(self, data_indexer: DataIndexer) -> 'IndexedInstance':
"""
Converts the words in this ``Instance`` into indices using
the ``DataIndexer``.
Parameters
----------
data_indexer : DataIndexer
``DataIndexer`` to use in converting the ``Instance`` to
an ``IndexedInstance``.
Returns
-------
indexed_instance : IndexedInstance
A ``TextInstance`` that has had all of its strings converted into
indices.
"""
raise NotImplementedError
@classmethod
def read_from_line(cls, line: str):
"""
Reads an instance of this type from a line.
Parameters
----------
line : str
A line from a data file.
Returns
-------
indexed_instance : IndexedInstance
A ``TextInstance`` that has had all of its strings converted into
indices.
Notes
-----
We throw a ``RuntimeError`` here instead of a ``NotImplementedError``,
because it's not expected that all subclasses will implement this.
"""
# pylint: disable=unused-argument
raise RuntimeError("%s instances can't be read from a line!" % str(cls))
class IndexedInstance(Instance):
"""
An indexed data instance has all word tokens replaced with word indices,
along with some kind of label, suitable for input to a Keras model. An
``IndexedInstance`` is created from an ``Instance`` using a
``DataIndexer``, and the indices here have no recoverable meaning without
the ``DataIndexer``.
For example, we might have the following ``Instance``:
- ``TrueFalseInstance('Jamie is nice, Holly is mean', True, 25)``
After being converted into an ``IndexedInstance``, we might have
the following:
- ``IndexedTrueFalseInstance([1, 6, 7, 1, 6, 8], True, 25)``
This would mean that ``"Jamie"`` and ``"Holly"`` were OOV to the
``DataIndexer``, and the other words were given indices.
"""
@classmethod
def empty_instance(cls):
"""
Returns an empty, unpadded instance of this class. Necessary for option
padding in multiple choice instances.
"""
raise NotImplementedError
def get_padding_lengths(self) -> Dict[str, int]:
"""
Returns the length of this instance in all dimensions that require padding.
Different kinds of instances have different fields that are padded, such as sentence
length, number of background sentences, number of options, etc.
Returns
-------
padding_lengths: Dict[str, int]
A dictionary mapping padding keys (like "num_sentence_words") to lengths.
"""
raise NotImplementedError
def pad(self, padding_lengths: Dict[str, int]):
"""
Add zero-padding to make each data example of equal length for use
in the neural network.
This modifies the current object.
Parameters
----------
padding_lengths: Dict[str, int]
In this dictionary, each ``str`` refers to a type of token (e.g.
``num_sentence_words``), and the corresponding ``int`` is the value. This dictionary
must have the same keys as was returned by
:func:`~IndexedInstance.get_padding_lengths()`. We will use these lengths to pad the
instance in all of the necessary dimensions to the given leangths.
"""
raise NotImplementedError
def as_training_data(self):
"""
Convert this ``IndexedInstance`` to NumPy arrays suitable for use as
training data to Keras models.
Returns
-------
train_data : (inputs, label)
The ``IndexedInstance`` as NumPy arrays to be uesd in Keras.
Note that ``inputs`` might itself be a complex tuple, depending
on the ``Instance`` type.
"""
raise NotImplementedError
@staticmethod
def _get_word_sequence_lengths(word_indices: List) -> Dict[str, int]:
"""
Because ``TextEncoders`` can return complex data structures, we might
actually have several things to pad for a single word sequence. We
check for that and handle it in a single spot here. We return a
dictionary containing 'num_sentence_words', which is the number of
words in word_indices. If the word representations also contain
characters, the dictionary additionally contains a
'num_word_characters' key, with a value corresponding to the longest
word in the sequence.
"""
padding_lengths = {'num_sentence_words': len(word_indices)}
if len(word_indices) > 0 and not isinstance(word_indices[0], int):
if isinstance(word_indices[0], list):
padding_lengths['num_word_characters'] = max([len(word) for word in word_indices])
# There might someday be other cases we're missing here, but we'll punt for now.
return padding_lengths
@staticmethod
def pad_word_sequence(word_sequence: List[int],
padding_lengths: Dict[str, int],
truncate_from_right: bool=True) -> List:
"""
Take a list of indices and pads them.
Parameters
----------
word_sequence : List of int
A list of word indices.
padding_lengths : Dict[str, int]
In this dictionary, each ``str`` refers to a type of token (e.g.
``num_sentence_words``), and the corresponding ``int`` is the value. This dictionary
must have the same dimension as was returned by
:func:`~IndexedInstance.get_padding_lengths()`. We will use these lengths to pad the
instance in all of the necessary dimensions to the given leangths.
truncate_from_right : bool, default=True
If truncating the indices is necessary, this parameter dictates whether we do so on the
left or right.
Returns
-------
padded_word_sequence : List of int
A padded list of word indices.
Notes
-----
The reason we truncate from the right by default is for cases that are questions, with long
set ups. We at least want to get the question encoded, which is always at the end, even if
we've lost much of the question set up. If you want to truncate from the other direction,
you can.
TODO(matt): we should probably switch the default to truncate from the left, and clear up
the naming here - it's easy to get confused about what "truncate from right" means.
"""
default_value = lambda: 0
if 'num_word_characters' in padding_lengths:
default_value = lambda: []
padded_word_sequence = IndexedInstance.pad_sequence_to_length(
word_sequence, padding_lengths['num_sentence_words'], default_value, truncate_from_right)
if 'num_word_characters' in padding_lengths:
desired_length = padding_lengths['num_word_characters']
longest_word = max(padded_word_sequence, key=len)
if desired_length > len(longest_word):
# since we want to pad to greater than the longest word, we add a
# "dummy word" to get the speed of itertools.zip_longest
padded_word_sequence.append([0]*desired_length)
# pad the list of lists to the longest sublist, appending 0's
words_padded_to_longest = list(zip(*itertools.zip_longest(*padded_word_sequence,
fillvalue=0)))
if desired_length > len(longest_word):
# now we remove the "dummy word" if we appended one.
words_padded_to_longest.pop()
# now we need to truncate all of them to our desired length.
# since truncate_from_right is always False, we chop off starting from
# the right.
padded_word_sequence = [list(word[:desired_length])
for word in words_padded_to_longest]
return padded_word_sequence
@staticmethod
def pad_sequence_to_length(sequence: List,
desired_length: int,
default_value: Callable[[], Any]=lambda: 0,
truncate_from_right: bool=True) -> List:
"""
Take a list of indices and pads them to the desired length.
Parameters
----------
word_sequence : List of int
A list of word indices.
desired_length : int
Maximum length of each sequence. Longer sequences
are truncated to this length, and shorter ones are padded to it.
default_value: Callable, default=lambda: 0
Callable that outputs a default value (of any type) to use as
padding values.
truncate_from_right : bool, default=True
If truncating the indices is necessary, this parameter dictates
whether we do so on the left or right.
Returns
-------
padded_word_sequence : List of int
A padded or truncated list of word indices.
Notes
-----
The reason we truncate from the right by default is for
cases that are questions, with long set ups. We at least want to get
the question encoded, which is always at the end, even if we've lost
much of the question set up. If you want to truncate from the other
direction, you can.
"""
if truncate_from_right:
truncated = sequence[-desired_length:]
else:
truncated = sequence[:desired_length]
if len(truncated) < desired_length:
# If the length of the truncated sequence is less than the desired
# length, we need to pad.
padding_sequence = [default_value()] * (desired_length - len(truncated))
if truncate_from_right:
# When we truncate from the right, we add zeroes to the front.
padding_sequence.extend(truncated)
return padding_sequence
else:
# When we do not truncate from the right, we add zeroes to the end.
truncated.extend(padding_sequence)
return truncated
return truncated
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tcpserver.py
------------
TxTrader TCP server module - Implement ASCII line oriented event interface.
Copyright (c) 2015 Reliance Systems Inc. <[email protected]>
Licensed under the MIT license. See LICENSE for details.
"""
from txtrader import VERSION, DATE, LABEL
import sys
import os
from twisted.internet.protocol import Factory
from twisted.internet import reactor, defer
from twisted.protocols import basic
from socket import gethostname
import ujson as json
import traceback
# set 512MB line buffer
LINE_BUFFER_LENGTH = 0x20000000
class tcpserver(basic.NetstringReceiver):
MAX_LENGTH = LINE_BUFFER_LENGTH
def __init__(self):
self.commands = {
'auth': self.cmd_auth,
'help': self.cmd_help,
'quit': self.cmd_disconnect,
'exit': self.cmd_disconnect,
'bye': self.cmd_disconnect,
'status': self.cmd_status,
'getbars': self.cmd_getbars,
'marketorder': self.cmd_market_order,
'stagemarketorder': self.cmd_stage_market_order,
'stoporder': self.cmd_stop_order,
'limitorder': self.cmd_limit_order,
'stoplimitorder': self.cmd_stoplimit_order,
'add': self.cmd_add,
'del': self.cmd_del,
'query': self.cmd_query,
'querydata': self.cmd_query_data,
'symbols': self.cmd_symbols,
'positions': self.cmd_positions,
'orders': self.cmd_orders,
'tickets': self.cmd_tickets,
'executions': self.cmd_executions,
'globalcancel': self.cmd_global_cancel,
'cancel': self.cmd_cancel,
'setaccount': self.cmd_setaccount,
'accounts': self.cmd_accounts,
'shutdown': self.cmd_shutdown,
}
self.authmap = set([])
self.options = {}
def stringReceived(self, line):
line = line.decode().strip()
self.factory.output(
'user command: %s' % ('%s xxxxxxxxxxx' % ' '.join(line.split()[:2]) if line.startswith('auth') else line)
)
if line:
cmd = line.split()[0]
if cmd in self.commands.keys():
try:
response = self.commands[cmd](line)
except Exception as exc:
self.factory.api.error_handler(self, repr(exc))
traceback.print_exc()
response = f'.error: {repr(exc)}'
self.send(response)
self.factory.api.check_exception_halt(exc, self)
else:
if response:
self.send(response)
else:
self.send('.what?')
def send(self, line):
if len(line) > self.MAX_LENGTH:
self.factory.api.force_disconnect(
f"NetstringReceiver: cannot send message of length {len(line)} {repr(line[:64])}..."
)
else:
return self.sendString(line.encode())
def cmd_auth(self, line):
auth, username, password = (line).split()[:3]
options_field = (line[len(auth) + len(username) + len(password) + 3:]).strip()
if not options_field.startswith('{'):
# legacy options are in string format: i.e. 'noquotes notrades'; convert to dict
if options_field == 'noquotes notrades':
# legacy clients sending "noquotes notrades" expect a default of {'order-notification': True}
self.options = {'order-notification': True}
self.factory.api.output(f"Setting legacy client options to: {repr(self.options)}")
else:
self.options = {o: True for o in options_field.strip().split()}
else:
self.options = json.loads(options_field) if options_field else {}
if self.factory.validate(username, password):
self.authmap.add(self.transport.getPeer())
self.factory.api.open_client(self)
return '.Authorized %s' % self.factory.api.channel
else:
self.check_authorized()
def check_authorized(self):
authorized = self.transport.getPeer() in self.authmap
if not authorized:
self.send('.Authorization required!')
self.factory.api.close_client(self)
self.transport.loseConnection()
return authorized
def check_initialized(self):
initialized = self.factory.api.initialized
if not initialized:
self.send('.Initialization not complete!')
self.factory.api.close_client(self)
self.transport.loseConnection()
return initialized
def cmd_shutdown(self, line):
if self.check_authorized():
self.factory.output('client at %s requested shutdown: %s' % (self.transport.getPeer(), line))
self.factory.api.close_client(self)
reactor.callLater(0, reactor.stop)
def cmd_help(self, line):
self.send('.commands: %s' % repr(self.commands.keys()))
def cmd_disconnect(self, line):
self.authmap.discard(self.transport.getPeer())
self.transport.loseConnection()
def cmd_status(self, line):
self.send('.status: %s' % self.factory.api.query_connection_status())
def cmd_setaccount(self, line):
if self.check_authorized() and self.check_initialized():
setaccount, account = line.split()[:2]
self.factory.api.set_account(account, self.send)
def cmd_accounts(self, line):
if self.check_authorized() and self.check_initialized():
self.send('.accounts: %s' % self.factory.api.accounts)
self.factory.api.request_accounts(self.defer_response(self.send_response, 'accounts'))
def cmd_getbars(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol, period, start_date, start_time, end_date, end_time = line.split()[:7]
self.factory.api.query_bars(
symbol, period, ' '.join((start_date, start_time)), ' '.join((end_date, end_time)), self.send
)
def cmd_add(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.factory.api.symbol_enable(symbol, self, self.defer_response(self.send_response, 'symbol'))
#self.send(f".symbol: {symbol} added")
def cmd_del(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.factory.api.symbol_disable(symbol, self, self.defer_response(self.send_response, 'symbol'))
#self.send(f".symbol: {symbol} deleted")
def cmd_query(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.send_response(json.dumps(self._symbol_fields(symbol)), 'symbol')
def cmd_query_data(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.send_response(json.dumps(self._symbol_fields(symbol, raw=True)), 'symbol-data')
def _symbol_fields(self, symbol, raw=False):
if raw:
symbol_fields = self.factory.api.symbols[symbol].rawdata
else:
symbol_fields = self.factory.api.symbols[symbol].export(self.options.get('SYMBOL_FIELDS', None))
return symbol_fields
def cmd_market_order(self, line):
if self.check_authorized() and self.check_initialized():
_, account, route, symbol, qstr = line.split()[:5]
self.factory.api.market_order(account, route, symbol, int(qstr), self.send)
def cmd_stage_market_order(self, line):
if self.check_authorized() and self.check_initialized():
_, tag, account, route, symbol, qstr = line.split()[:6]
self.factory.api.stage_market_order(tag, account, route, symbol, int(qstr), self.send)
def cmd_stop_order(self, line):
if self.check_authorized() and self.check_initialized():
_order, account, route, symbol, price, qstr = line.split()[:6]
self.factory.api.stop_order(account, route, symbol, float(price), int(qstr), self.send)
def cmd_limit_order(self, line):
if self.check_authorized() and self.check_initialized():
_, account, route, symbol, price, qstr = line.split()[:6]
self.factory.api.limit_order(account, route, symbol, float(price), int(qstr), self.send)
def cmd_stoplimit_order(self, line):
if self.check_authorized() and self.check_initialized():
_, account, route, symbol, stop_price, limit_price, qstr = line.split()[:7]
self.factory.api.stoplimit_order(
account, route, symbol, float(stop_price), float(limit_price), int(qstr), self.send
)
def cmd_cancel(self, line):
if self.check_authorized() and self.check_initialized():
_, _id = line.split()[:2]
self.factory.api.cancel_order(_id, self.send)
def cmd_symbols(self, line):
if self.check_authorized() and self.check_initialized():
symbols = {s: self._symbol_fields(s) for s in s.self.factory.api.symbols}
self.send_response(json.dumps(symbols), 'symbols')
def cmd_positions(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_positions(self.defer_response(self.send_response, 'positions'))
def cmd_orders(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_orders(self.defer_response(self.send_response, 'orders'))
def cmd_tickets(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_tickets(self.defer_response(self.send_response, 'tickets'))
def cmd_executions(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_executions(self.defer_response(self.send_response, 'executions'))
def cmd_global_cancel(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_global_cancel()
self.send('.global order cancel requested')
def connectionMade(self):
self.factory.output('client connection from %s' % self.transport.getPeer())
self.authmap.discard(self.transport.getPeer())
self.send(
'.connected: %s %s %s %s on %s' % (self.factory.api.label, str(VERSION), str(DATE), str(LABEL), str(gethostname()))
)
def connectionLost(self, reason):
self.factory.output('client connection from %s lost: %s' % (self.transport.getPeer(), repr(reason)))
self.authmap.discard(self.transport.getPeer())
self.factory.api.close_client(self)
def send_response(self, data, label):
self.send(f'{self.factory.api.channel}.{label}: {data}')
def defer_response(self, sender, command):
d = defer.Deferred()
d.addCallback(sender, command)
d.addErrback(self.api_error)
d.addErrback(self.api_timeout)
return d
def api_timeout(self, failure):
self.send(f'alert: API timeout errback: {repr(failure)}')
return failure
def api_error(self, failure):
self.send(f'alert: API errback: {repr(failure)}')
return failure
class serverFactory(Factory):
protocol = tcpserver
def __init__(self, api):
self.api = api
self.output = api.output
def validate(self, username, password):
return username == self.api.username and password == self.api.password
def buildProtocol(self, addr):
self.output(f'buildProtocol: addr={addr}')
return super().buildProtocol(addr)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module for information retrieval."""
from __future__ import absolute_import, print_function
import errno
import json
import os
import warnings
from elasticsearch import VERSION as ES_VERSION
from elasticsearch import Elasticsearch
from pkg_resources import iter_entry_points, resource_filename, \
resource_isdir, resource_listdir
from werkzeug.utils import cached_property
from . import config
from .cli import index as index_cmd
from .errors import IndexAlreadyExistsError
from .utils import build_alias_name, build_index_from_parts, \
build_index_name, timestamp_suffix
class _SearchState(object):
"""Store connection to elastic client and registered indexes."""
def __init__(self, app,
entry_point_group_mappings=None,
entry_point_group_templates=None,
**kwargs):
"""Initialize state.
:param app: An instance of :class:`~flask.app.Flask`.
:param entry_point_group_mappings:
The entrypoint group name to load mappings.
:param entry_point_group_templates:
The entrypoint group name to load templates.
"""
self.app = app
self.mappings = {}
self.aliases = {}
self._client = kwargs.get('client')
self.entry_point_group_templates = entry_point_group_templates
self._current_suffix = None
if entry_point_group_mappings:
self.load_entry_point_group_mappings(entry_point_group_mappings)
if ES_VERSION[0] in (2, 5):
warnings.warn(
"Elasticsearch v2 and v5 support will be removed.",
DeprecationWarning)
@property
def current_suffix(self):
"""Return the current suffix."""
if self._current_suffix is None:
self._current_suffix = timestamp_suffix()
return self._current_suffix
@cached_property
def templates(self):
"""Generate a dictionary with template names and file paths."""
templates = {}
result = []
if self.entry_point_group_templates:
result = self.load_entry_point_group_templates(
self.entry_point_group_templates) or []
for template in result:
for name, path in template.items():
templates[name] = path
return templates
def register_mappings(self, alias, package_name):
"""Register mappings from a package under given alias.
:param alias: The alias.
:param package_name: The package name.
"""
# For backwards compatibility, we also allow for ES2 mappings to be
# placed at the root level of the specified package path, and not in
# the `<package-path>/v2` directory.
if ES_VERSION[0] == 2:
try:
resource_listdir(package_name, 'v2')
package_name += '.v2'
except (OSError, IOError) as ex:
if getattr(ex, 'errno', 0) != errno.ENOENT:
raise
warnings.warn(
"Having mappings in a path which doesn't specify the "
"Elasticsearch version is deprecated. Please move your "
"mappings to a subfolder named according to the "
"Elasticsearch version which your mappings are intended "
"for. (e.g. '{}/v2/{}')".format(
package_name, alias),
PendingDeprecationWarning)
else:
package_name = '{}.v{}'.format(package_name, ES_VERSION[0])
def _walk_dir(aliases, *parts):
root_name = build_index_from_parts(*parts)
resource_name = os.path.join(*parts)
data = aliases.get(root_name, {})
for filename in resource_listdir(package_name, resource_name):
file_path = os.path.join(resource_name, filename)
if resource_isdir(package_name, file_path):
_walk_dir(data, *(parts + (filename, )))
continue
filename_root, ext = os.path.splitext(filename)
if ext not in {'.json', }:
continue
index_name = build_index_from_parts(
*(parts + (filename_root, ))
)
assert index_name not in data, 'Duplicate index'
filename = resource_filename(
package_name, os.path.join(resource_name, filename))
data[index_name] = filename
self.mappings[index_name] = filename
aliases[root_name] = data
# Start the recursion here:
_walk_dir(self.aliases, alias)
def register_templates(self, module):
"""Register templates from the provided module.
:param module: The templates module.
"""
try:
resource_listdir(module, 'v{}'.format(ES_VERSION[0]))
module = '{}.v{}'.format(module, ES_VERSION[0])
except (OSError, IOError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise OSError(
"Please move your templates to a subfolder named "
"according to the Elasticsearch version "
"which your templates are intended "
"for. (e.g. '{}')".format(version_module))
result = {}
def _walk_dir(*parts):
parts = parts or tuple()
resource_name = os.path.join(*parts) if parts else ''
for filename in resource_listdir(module, resource_name):
file_path = os.path.join(resource_name, filename)
if resource_isdir(module, file_path):
_walk_dir(*(parts + (filename, )))
continue
filename_root, ext = os.path.splitext(filename)
if ext not in {'.json', }:
continue
template_name = build_index_from_parts(
*(parts + (filename_root, ))
)
result[template_name] = resource_filename(
module, os.path.join(resource_name, filename))
# Start the recursion here:
_walk_dir()
return result
def load_entry_point_group_mappings(self, entry_point_group_mappings):
"""Load actions from an entry point group."""
for ep in iter_entry_points(group=entry_point_group_mappings):
self.register_mappings(ep.name, ep.module_name)
def load_entry_point_group_templates(self, entry_point_group_templates):
"""Load actions from an entry point group."""
result = []
for ep in iter_entry_points(group=entry_point_group_templates):
with self.app.app_context():
for template_dir in ep.load()():
result.append(self.register_templates(template_dir))
return result
def _client_builder(self):
"""Build Elasticsearch client."""
client_config = self.app.config.get('SEARCH_CLIENT_CONFIG') or {}
client_config.setdefault(
'hosts', self.app.config.get('SEARCH_ELASTIC_HOSTS'))
return Elasticsearch(**client_config)
@property
def client(self):
"""Return client for current application."""
if self._client is None:
self._client = self._client_builder()
return self._client
def flush_and_refresh(self, index):
"""Flush and refresh one or more indices.
.. warning::
Do not call this method unless you know what you are doing. This
method is only intended to be called during tests.
"""
prefixed_index = build_alias_name(index, app=self.app)
self.client.indices.flush(wait_if_ongoing=True, index=prefixed_index)
self.client.indices.refresh(index=prefixed_index)
self.client.cluster.health(
wait_for_status='yellow', request_timeout=30)
return True
@property
def cluster_version(self):
"""Get version of Elasticsearch running on the cluster."""
versionstr = self.client.info()['version']['number']
return [int(x) for x in versionstr.split('.')]
@property
def active_aliases(self):
"""Get a filtered list of aliases based on configuration.
Returns aliases and their mappings that are defined in the
`SEARCH_MAPPINGS` config variable. If the `SEARCH_MAPPINGS` is set to
`None` (the default), all aliases are included.
"""
whitelisted_aliases = self.app.config.get('SEARCH_MAPPINGS')
if whitelisted_aliases is None:
return self.aliases
else:
return {k: v for k, v in self.aliases.items()
if k in whitelisted_aliases}
def _get_indices(self, tree_or_filename):
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in self._get_indices(value):
yield result
else:
yield name
def create_index(self, index, mapping_path=None, prefix=None, suffix=None,
create_write_alias=True, ignore=None, dry_run=False):
"""Create index with a write alias."""
mapping_path = mapping_path or self.mappings[index]
final_alias = None
final_index = None
index_result = None, None
alias_result = None, None
# To prevent index init --force from creating a suffixed
# index if the current instance is running without suffixes
# make sure there is no index with the same name as the
# alias name (i.e. the index name without the suffix).
with open(mapping_path, 'r') as body:
final_index = build_index_name(
index, prefix=prefix, suffix=suffix, app=self.app)
if create_write_alias:
final_alias = build_alias_name(
index, prefix=prefix, app=self.app)
index_result = (
final_index,
self.client.indices.create(
index=final_index,
body=json.load(body),
ignore=ignore,
) if not dry_run else None
)
if create_write_alias:
alias_result = (
final_alias,
self.client.indices.put_alias(
index=final_index,
name=final_alias,
ignore=ignore,
) if not dry_run else None
)
return index_result, alias_result
def create(self, ignore=None, ignore_existing=False, index_list=None):
"""Yield tuple with created index name and responses from a client."""
ignore = ignore or []
new_indices = {}
actions = []
if ignore_existing and not ignore:
ignore = [400]
elif ignore_existing and 400 not in ignore:
ignore.append(400)
def ensure_not_exists(name):
if not ignore_existing and self.client.indices.exists(name):
raise IndexAlreadyExistsError(
'index/alias with name "{}" already exists'.format(name))
def _build(tree_or_filename, alias=None):
"""Build a list of index/alias actions to perform."""
for name, value in tree_or_filename.items():
if isinstance(value, dict):
_build(value, alias=name)
else:
if index_list and name not in index_list:
continue
index_result, alias_result = \
self.create_index(
name,
ignore=ignore,
dry_run=True
)
ensure_not_exists(index_result[0])
new_indices[name] = index_result[0]
if alias_result[0]:
ensure_not_exists(alias_result[0])
actions.append(dict(
type='create_index',
index=name,
create_write_alias=True
))
else:
actions.append(dict(
type='create_index',
index=name,
create_write_alias=False
))
if alias:
alias_indices = self._get_indices(tree_or_filename)
alias_indices = [
new_indices[i] for i in alias_indices if i in new_indices
]
if alias_indices:
alias_name = build_alias_name(alias, app=self.app)
ensure_not_exists(alias_name)
actions.append(dict(
type='create_alias',
index=alias_indices,
alias=alias_name
))
_build(self.active_aliases)
for action in actions:
if action['type'] == 'create_index':
index_result, alias_result = self.create_index(
action['index'],
create_write_alias=action.get('create_write_alias', True),
ignore=ignore
)
yield index_result
if alias_result[0]:
yield alias_result
elif action['type'] == 'create_alias':
yield action['alias'], self.client.indices.put_alias(
index=action['index'],
name=action['alias'],
ignore=ignore,
)
def put_templates(self, ignore=None):
"""Yield tuple with registered template and response from client."""
ignore = ignore or []
def _replace_prefix(template_path, body):
"""Replace index prefix in template request body."""
pattern = '__SEARCH_INDEX_PREFIX__'
prefix = self.app.config['SEARCH_INDEX_PREFIX'] or ''
if prefix:
assert pattern in body, "You are using the prefix `{0}`, "
"but the template `{1}` does not contain the "
"pattern `{2}`.".format(prefix, template_path, pattern)
return body.replace(pattern, prefix)
def _put_template(template):
"""Put template in search client."""
with open(self.templates[template], 'r') as fp:
body = fp.read()
replaced_body = _replace_prefix(self.templates[template], body)
template_name = build_alias_name(template, app=self.app)
return self.templates[template],\
self.client.indices.put_template(
name=template_name,
body=json.loads(replaced_body),
ignore=ignore,
)
for template in self.templates:
yield _put_template(template)
def delete(self, ignore=None, index_list=None):
"""Yield tuple with deleted index name and responses from a client."""
ignore = ignore or []
def _delete(tree_or_filename, alias=None):
"""Delete indexes and aliases by walking DFS."""
# Iterate over aliases:
for name, value in tree_or_filename.items():
if isinstance(value, dict):
for result in _delete(value, alias=name):
yield result
else:
if index_list and name not in index_list:
continue
# Resolve values to suffixed (or not) indices
prefixed_index = build_alias_name(name, app=self.app)
lookup_response = self.client.indices.get_alias(
index=prefixed_index, ignore=[404])
if 'error' in lookup_response:
indices_to_delete = []
else:
indices_to_delete = list(lookup_response.keys())
if len(indices_to_delete) == 0:
pass
elif len(indices_to_delete) == 1:
yield name, self.client.indices.delete(
index=indices_to_delete[0],
ignore=ignore,
)
else:
warnings.warn((
'Multiple indices found during deletion of '
'{name}: {indices}. Deletion was skipped for them.'
).format(name=name, indices=indices_to_delete))
for result in _delete(self.active_aliases):
yield result
class InvenioSearch(object):
"""Invenio-Search extension."""
def __init__(self, app=None, **kwargs):
"""Extension initialization.
:param app: An instance of :class:`~flask.app.Flask`.
"""
self._clients = {}
if app:
self.init_app(app, **kwargs)
def init_app(self, app,
entry_point_group_mappings='invenio_search.mappings',
entry_point_group_templates='invenio_search.templates',
**kwargs):
"""Flask application initialization.
:param app: An instance of :class:`~flask.app.Flask`.
"""
self.init_config(app)
app.cli.add_command(index_cmd)
state = _SearchState(
app,
entry_point_group_mappings=entry_point_group_mappings,
entry_point_group_templates=entry_point_group_templates,
**kwargs
)
self._state = app.extensions['invenio-search'] = state
@staticmethod
def init_config(app):
"""Initialize configuration.
:param app: An instance of :class:`~flask.app.Flask`.
"""
for k in dir(config):
if k.startswith('SEARCH_'):
app.config.setdefault(k, getattr(config, k))
def __getattr__(self, name):
"""Proxy to state object."""
return getattr(self._state, name, None)
|
|
"""Real-time information about public transport departures in Norway."""
from datetime import datetime, timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
API_CLIENT_NAME = "homeassistant-homeassistant"
ATTRIBUTION = "Data provided by entur.org under NLOD"
CONF_STOP_IDS = "stop_ids"
CONF_EXPAND_PLATFORMS = "expand_platforms"
CONF_WHITELIST_LINES = "line_whitelist"
CONF_OMIT_NON_BOARDING = "omit_non_boarding"
CONF_NUMBER_OF_DEPARTURES = "number_of_departures"
DEFAULT_NAME = "Entur"
DEFAULT_ICON_KEY = "bus"
ICONS = {
"air": "mdi:airplane",
"bus": "mdi:bus",
"metro": "mdi:subway",
"rail": "mdi:train",
"tram": "mdi:tram",
"water": "mdi:ferry",
}
SCAN_INTERVAL = timedelta(seconds=45)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STOP_IDS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXPAND_PLATFORMS, default=True): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
vol.Optional(CONF_WHITELIST_LINES, default=[]): cv.ensure_list,
vol.Optional(CONF_OMIT_NON_BOARDING, default=True): cv.boolean,
vol.Optional(CONF_NUMBER_OF_DEPARTURES, default=2): vol.All(
cv.positive_int, vol.Range(min=2, max=10)
),
}
)
ATTR_STOP_ID = "stop_id"
ATTR_ROUTE = "route"
ATTR_ROUTE_ID = "route_id"
ATTR_EXPECTED_AT = "due_at"
ATTR_DELAY = "delay"
ATTR_REALTIME = "real_time"
ATTR_NEXT_UP_IN = "next_due_in"
ATTR_NEXT_UP_ROUTE = "next_route"
ATTR_NEXT_UP_ROUTE_ID = "next_route_id"
ATTR_NEXT_UP_AT = "next_due_at"
ATTR_NEXT_UP_DELAY = "next_delay"
ATTR_NEXT_UP_REALTIME = "next_real_time"
ATTR_TRANSPORT_MODE = "transport_mode"
def due_in_minutes(timestamp: datetime) -> int:
"""Get the time in minutes from a timestamp."""
if timestamp is None:
return None
diff = timestamp - dt_util.now()
return int(diff.total_seconds() / 60)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Entur public transport sensor."""
from enturclient import EnturPublicTransportData
expand = config.get(CONF_EXPAND_PLATFORMS)
line_whitelist = config.get(CONF_WHITELIST_LINES)
name = config.get(CONF_NAME)
show_on_map = config.get(CONF_SHOW_ON_MAP)
stop_ids = config.get(CONF_STOP_IDS)
omit_non_boarding = config.get(CONF_OMIT_NON_BOARDING)
number_of_departures = config.get(CONF_NUMBER_OF_DEPARTURES)
stops = [s for s in stop_ids if "StopPlace" in s]
quays = [s for s in stop_ids if "Quay" in s]
data = EnturPublicTransportData(
API_CLIENT_NAME,
stops=stops,
quays=quays,
line_whitelist=line_whitelist,
omit_non_boarding=omit_non_boarding,
number_of_departures=number_of_departures,
web_session=async_get_clientsession(hass),
)
if expand:
await data.expand_all_quays()
await data.update()
proxy = EnturProxy(data)
entities = []
for place in data.all_stop_places_quays():
try:
given_name = "{} {}".format(name, data.get_stop_info(place).name)
except KeyError:
given_name = "{} {}".format(name, place)
entities.append(
EnturPublicTransportSensor(proxy, given_name, place, show_on_map)
)
async_add_entities(entities, True)
class EnturProxy:
"""Proxy for the Entur client.
Ensure throttle to not hit rate limiting on the API.
"""
def __init__(self, api):
"""Initialize the proxy."""
self._api = api
@Throttle(timedelta(seconds=15))
async def async_update(self) -> None:
"""Update data in client."""
await self._api.update()
def get_stop_info(self, stop_id: str) -> dict:
"""Get info about specific stop place."""
return self._api.get_stop_info(stop_id)
class EnturPublicTransportSensor(Entity):
"""Implementation of a Entur public transport sensor."""
def __init__(self, api: EnturProxy, name: str, stop: str, show_on_map: bool):
"""Initialize the sensor."""
self.api = api
self._stop = stop
self._show_on_map = show_on_map
self._name = name
self._state = None
self._icon = ICONS[DEFAULT_ICON_KEY]
self._attributes = {}
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def state(self) -> str:
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
self._attributes[ATTR_STOP_ID] = self._stop
return self._attributes
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return "min"
@property
def icon(self) -> str:
"""Icon to use in the frontend."""
return self._icon
async def async_update(self) -> None:
"""Get the latest data and update the states."""
await self.api.async_update()
self._attributes = {}
data = self.api.get_stop_info(self._stop)
if data is None:
self._state = None
return
if self._show_on_map and data.latitude and data.longitude:
self._attributes[CONF_LATITUDE] = data.latitude
self._attributes[CONF_LONGITUDE] = data.longitude
calls = data.estimated_calls
if not calls:
self._state = None
return
self._state = due_in_minutes(calls[0].expected_departure_time)
self._icon = ICONS.get(calls[0].transport_mode, ICONS[DEFAULT_ICON_KEY])
self._attributes[ATTR_ROUTE] = calls[0].front_display
self._attributes[ATTR_ROUTE_ID] = calls[0].line_id
self._attributes[ATTR_EXPECTED_AT] = calls[0].expected_departure_time.strftime(
"%H:%M"
)
self._attributes[ATTR_REALTIME] = calls[0].is_realtime
self._attributes[ATTR_DELAY] = calls[0].delay_in_min
number_of_calls = len(calls)
if number_of_calls < 2:
return
self._attributes[ATTR_NEXT_UP_ROUTE] = calls[1].front_display
self._attributes[ATTR_NEXT_UP_ROUTE_ID] = calls[1].line_id
self._attributes[ATTR_NEXT_UP_AT] = calls[1].expected_departure_time.strftime(
"%H:%M"
)
self._attributes[ATTR_NEXT_UP_IN] = "{} min".format(
due_in_minutes(calls[1].expected_departure_time)
)
self._attributes[ATTR_NEXT_UP_REALTIME] = calls[1].is_realtime
self._attributes[ATTR_NEXT_UP_DELAY] = calls[1].delay_in_min
if number_of_calls < 3:
return
for i, call in enumerate(calls[2:]):
key_name = "departure_#" + str(i + 3)
self._attributes[key_name] = "{}{} {}".format(
"" if bool(call.is_realtime) else "ca. ",
call.expected_departure_time.strftime("%H:%M"),
call.front_display,
)
|
|
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from fabric.api import local
from lib import base
from lib.gobgp import *
from lib.quagga import *
import sys
import os
import time
import nose
from noseplugin import OptionParser, parser_option
from itertools import chain
class GoBGPTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
gobgp_ctn_image_name = parser_option.gobgp_image
base.TEST_PREFIX = parser_option.test_prefix
g1 = GoBGPContainer(name='g1', asn=65000, router_id='192.168.0.1',
ctn_image_name=gobgp_ctn_image_name,
log_level=parser_option.gobgp_log_level)
q1 = QuaggaBGPContainer(name='q1', asn=65001, router_id='192.168.0.2')
q2 = QuaggaBGPContainer(name='q2', asn=65002, router_id='192.168.0.3')
q3 = QuaggaBGPContainer(name='q3', asn=65003, router_id='192.168.0.4')
qs = [q1, q2, q3]
ctns = [g1, q1, q2, q3]
# advertise a route from q1, q2, q3
for idx, q in enumerate(qs):
route = '10.0.{0}.0/24'.format(idx+1)
q.add_route(route)
initial_wait_time = max(ctn.run() for ctn in ctns)
time.sleep(initial_wait_time)
for q in qs:
g1.add_peer(q, reload_config=False, passwd='passwd')
q.add_peer(g1, passwd='passwd', passive=True)
g1.create_config()
g1.reload_config()
cls.gobgp = g1
cls.quaggas = {'q1': q1, 'q2': q2, 'q3': q3}
# test each neighbor state is turned establish
def test_01_neighbor_established(self):
for q in self.quaggas.itervalues():
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q)
def test_02_check_gobgp_global_rib(self):
for q in self.quaggas.itervalues():
# paths expected to exist in gobgp's global rib
routes = q.routes.keys()
timeout = 120
interval = 1
count = 0
while True:
# gobgp's global rib
global_rib = [p['prefix'] for p in self.gobgp.get_global_rib()]
for p in global_rib:
if p in routes:
routes.remove(p)
if len(routes) == 0:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
# check gobgp properly add it's own asn to aspath
def test_03_check_gobgp_adj_out_rib(self):
for q in self.quaggas.itervalues():
for path in self.gobgp.get_adj_rib_out(q):
asns = path['aspath']
self.assertTrue(self.gobgp.asn in asns)
# check routes are properly advertised to all BGP speaker
def test_04_check_quagga_global_rib(self):
interval = 1
timeout = int(120/interval)
for q in self.quaggas.itervalues():
done = False
for _ in range(timeout):
if done:
break
global_rib = q.get_global_rib()
global_rib = [p['prefix'] for p in global_rib]
if len(global_rib) < len(self.quaggas):
time.sleep(interval)
continue
self.assertTrue(len(global_rib) == len(self.quaggas))
for c in self.quaggas.itervalues():
for r in c.routes:
self.assertTrue(r in global_rib)
done = True
if done:
continue
# should not reach here
self.assertTrue(False)
def test_05_add_quagga(self):
q4 = QuaggaBGPContainer(name='q4', asn=65004, router_id='192.168.0.5')
self.quaggas['q4'] = q4
q4.add_route('10.0.4.0/24')
initial_wait_time = q4.run()
time.sleep(initial_wait_time)
self.gobgp.add_peer(q4)
q4.add_peer(self.gobgp)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q4)
def test_06_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_07_stop_one_quagga(self):
q4 = self.quaggas['q4']
q4.stop()
self.gobgp.wait_for(expected_state=BGP_FSM_ACTIVE, peer=q4)
del self.quaggas['q4']
# check gobgp properly send withdrawal message with q4's route
def test_08_check_global_rib(self):
self.test_02_check_gobgp_global_rib()
self.test_04_check_quagga_global_rib()
def test_09_add_distant_relative(self):
q1 = self.quaggas['q1']
q2 = self.quaggas['q2']
q3 = self.quaggas['q3']
q5 = QuaggaBGPContainer(name='q5', asn=65005, router_id='192.168.0.6')
initial_wait_time = q5.run()
time.sleep(initial_wait_time)
for q in [q2, q3]:
q5.add_peer(q)
q.add_peer(q5)
med200 = {'name': 'med200',
'type': 'permit',
'match': '0.0.0.0/0',
'direction': 'out',
'med': 200}
q2.add_policy(med200, self.gobgp)
med100 = {'name': 'med100',
'type': 'permit',
'match': '0.0.0.0/0',
'direction': 'out',
'med': 100}
q3.add_policy(med100, self.gobgp)
q5.add_route('10.0.6.0/24')
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q2)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q3)
q2.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
q3.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q5)
timeout = 120
interval = 1
count = 0
while True:
paths = self.gobgp.get_adj_rib_out(q1, '10.0.6.0/24')
if len(paths) > 0:
path = paths[0]
print "{0}'s nexthop is {1}".format(path['nlri']['prefix'],
path['nexthop'])
n_addrs = [i[1].split('/')[0] for i in self.gobgp.ip_addrs]
if path['nexthop'] in n_addrs:
break
time.sleep(interval)
count += interval
if count >= timeout:
raise Exception('timeout')
def test_10_originate_path(self):
self.gobgp.add_route('10.10.0.0/24')
dst = self.gobgp.get_global_rib('10.10.0.0/24')
self.assertTrue(len(dst) == 1)
self.assertTrue(len(dst[0]['paths']) == 1)
path = dst[0]['paths'][0]
self.assertTrue(path['nexthop'] == '0.0.0.0')
self.assertTrue(len(path['aspath']) == 0)
def test_11_check_adj_rib_out(self):
for q in self.quaggas.itervalues():
paths = self.gobgp.get_adj_rib_out(q, '10.10.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
peer_info = self.gobgp.peers[q]
local_addr = peer_info['local_addr'].split('/')[0]
self.assertTrue(path['nexthop'] == local_addr)
self.assertTrue(path['aspath'] == [self.gobgp.asn])
def test_12_disable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.disable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_IDLE, peer=q1)
time.sleep(3)
for route in q1.routes.iterkeys():
dst = self.gobgp.get_global_rib(route)
self.assertTrue(len(dst) == 0)
for q in self.quaggas.itervalues():
if q is q1:
continue
paths = self.gobgp.get_adj_rib_out(q, route)
self.assertTrue(len(paths) == 0)
def test_13_enable_peer(self):
q1 = self.quaggas['q1']
self.gobgp.enable_peer(q1)
self.gobgp.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=q1)
def test_14_check_adj_rib_out(self):
self.test_11_check_adj_rib_out()
def test_15_check_active_connection(self):
g1 = self.gobgp
g2 = GoBGPContainer(name='g2', asn=65000, router_id='192.168.0.5',
ctn_image_name=self.gobgp.image,
log_level=parser_option.gobgp_log_level)
time.sleep(g2.run())
self.quaggas['g2'] = g2
g2.add_peer(g1, passive=True)
g1.add_peer(g2)
g1.wait_for(expected_state=BGP_FSM_ESTABLISHED, peer=g2)
def test_16_check_local_pref_and_med_handling(self):
g1 = self.gobgp
g1.add_route('10.20.0.0/24', local_pref=1000, med=2000)
# iBGP peer
g2 = self.quaggas['g2']
paths = g2.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
self.assertTrue(len(paths[0]['paths']) == 1)
path = paths[0]['paths'][0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
self.assertTrue(local_pref['value'] == 1000)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
# eBGP peer
q1 = self.quaggas['q1']
paths = q1.get_global_rib('10.20.0.0/24')
self.assertTrue(len(paths) == 1)
path = paths[0]
local_pref = extract_path_attribute(path, BGP_ATTR_TYPE_LOCAL_PREF)
# local_pref's default value is 100
self.assertTrue(local_pref['value'] == 100)
med = extract_path_attribute(path, BGP_ATTR_TYPE_MULTI_EXIT_DISC)
self.assertTrue(med['metric'] == 2000)
if __name__ == '__main__':
if os.geteuid() is not 0:
print "you are not root."
sys.exit(1)
output = local("which docker 2>&1 > /dev/null ; echo $?", capture=True)
if int(output) is not 0:
print "docker not found"
sys.exit(1)
nose.main(argv=sys.argv, addplugins=[OptionParser()],
defaultTest=sys.argv[0])
|
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''A gatherer for the TotalRecall brand of HTML templates with replaceable
portions. We wanted to reuse extern.tclib.api.handlers.html.TCHTMLParser
but this proved impossible due to the fact that the TotalRecall HTML templates
are in general quite far from parseable HTML and the TCHTMLParser derives
from HTMLParser.HTMLParser which requires relatively well-formed HTML. Some
examples of "HTML" from the TotalRecall HTML templates that wouldn't be
parseable include things like:
<a [PARAMS]>blabla</a> (not parseable because attributes are invalid)
<table><tr><td>[LOTSOFSTUFF]</tr></table> (not parseable because closing
</td> is in the HTML [LOTSOFSTUFF]
is replaced by)
The other problem with using general parsers (such as TCHTMLParser) is that
we want to make sure we output the TotalRecall template with as little changes
as possible in terms of whitespace characters, layout etc. With any parser
that generates a parse tree, and generates output by dumping the parse tree,
we would always have little inconsistencies which could cause bugs (the
TotalRecall template stuff is quite brittle and can break if e.g. a tab
character is replaced with spaces).
The solution, which may be applicable to some other HTML-like template
languages floating around Google, is to create a parser with a simple state
machine that keeps track of what kind of tag it's inside, and whether it's in
a translateable section or not. Translateable sections are:
a) text (including [BINGO] replaceables) inside of tags that
can contain translateable text (which is all tags except
for a few)
b) text inside of an 'alt' attribute in an <image> element, or
the 'value' attribute of a <submit>, <button> or <text>
element.
The parser does not build up a parse tree but rather a "skeleton" which
is a list of nontranslateable strings intermingled with grit.clique.MessageClique
objects. This simplifies the parser considerably compared to a regular HTML
parser. To output a translated document, each item in the skeleton is
printed out, with the relevant Translation from each MessageCliques being used
for the requested language.
This implementation borrows some code, constants and ideas from
extern.tclib.api.handlers.html.TCHTMLParser.
'''
import re
import types
from grit import clique
from grit import exception
from grit import util
from grit import tclib
from grit.gather import interface
# HTML tags which break (separate) chunks.
_BLOCK_TAGS = ['script', 'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'br',
'body', 'style', 'head', 'title', 'table', 'tr', 'td', 'th',
'ul', 'ol', 'dl', 'nl', 'li', 'div', 'object', 'center',
'html', 'link', 'form', 'select', 'textarea',
'button', 'option', 'map', 'area', 'blockquote', 'pre',
'meta', 'xmp', 'noscript', 'label', 'tbody', 'thead',
'script', 'style', 'pre', 'iframe', 'img', 'input', 'nowrap']
# HTML tags which may appear within a chunk.
_INLINE_TAGS = ['b', 'i', 'u', 'tt', 'code', 'font', 'a', 'span', 'small',
'key', 'nobr', 'url', 'em', 's', 'sup', 'strike',
'strong']
# HTML tags within which linebreaks are significant.
_PREFORMATTED_TAGS = ['textarea', 'xmp', 'pre']
# An array mapping some of the inline HTML tags to more meaningful
# names for those tags. This will be used when generating placeholders
# representing these tags.
_HTML_PLACEHOLDER_NAMES = { 'a' : 'link', 'br' : 'break', 'b' : 'bold',
'i' : 'italic', 'li' : 'item', 'ol' : 'ordered_list', 'p' : 'paragraph',
'ul' : 'unordered_list', 'img' : 'image', 'em' : 'emphasis' }
# We append each of these characters in sequence to distinguish between
# different placeholders with basically the same name (e.g. BOLD1, BOLD2).
# Keep in mind that a placeholder name must not be a substring of any other
# placeholder name in the same message, so we can't simply count (BOLD_1
# would be a substring of BOLD_10).
_SUFFIXES = '123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# Matches whitespace in an HTML document. Also matches HTML comments, which are
# treated as whitespace.
_WHITESPACE = re.compile(r'(\s| |\\n|\\r|<!--\s*desc\s*=.*?-->)+',
re.DOTALL)
# Finds a non-whitespace character
_NON_WHITESPACE = re.compile(r'\S')
# Matches two or more in a row (a single   is not changed into
# placeholders because different languages require different numbers of spaces
# and placeholders must match exactly; more than one is probably a "special"
# whitespace sequence and should be turned into a placeholder).
_NBSP = re.compile(r' ( )+')
# Matches nontranslateable chunks of the document
_NONTRANSLATEABLES = re.compile(r'''
<\s*script.+?<\s*/\s*script\s*>
|
<\s*style.+?<\s*/\s*style\s*>
|
<!--.+?-->
|
<\?IMPORT\s.+?> # import tag
|
<\s*[a-zA-Z_]+:.+?> # custom tag (open)
|
<\s*/\s*[a-zA-Z_]+:.+?> # custom tag (close)
|
<!\s*[A-Z]+\s*([^>]+|"[^"]+"|'[^']+')*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches a tag and its attributes
_ELEMENT = re.compile(r'''
# Optional closing /, element name
<\s*(?P<closing>/)?\s*(?P<element>[a-zA-Z0-9]+)\s*
# Attributes and/or replaceables inside the tag, if any
(?P<atts>(
\s*([a-zA-Z_][-:.a-zA-Z_0-9]*) # Attribute name
(\s*=\s*(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?
|
\s*\[(\$?\~)?([A-Z0-9-_]+?)(\~\$?)?\]
)*)
\s*(?P<empty>/)?\s*> # Optional empty-tag closing /, and tag close
''',
re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches elements that may have translateable attributes. The value of these
# special attributes is given by group 'value1' or 'value2'. Note that this
# regexp demands that the attribute value be quoted; this is necessary because
# the non-tree-building nature of the parser means we don't know when we're
# writing out attributes, so we wouldn't know to escape spaces.
_SPECIAL_ELEMENT = re.compile(r'''
<\s*(
input[^>]+?value\s*=\s*(\'(?P<value3>[^\']*)\'|"(?P<value4>[^"]*)")
[^>]+type\s*=\s*"?'?(button|reset|text|submit)'?"?
|
(
table[^>]+?title\s*=
|
img[^>]+?alt\s*=
|
input[^>]+?type\s*=\s*"?'?(button|reset|text|submit)'?"?[^>]+?value\s*=
)
\s*(\'(?P<value1>[^\']*)\'|"(?P<value2>[^"]*)")
)[^>]*?>
''', re.MULTILINE | re.DOTALL | re.VERBOSE | re.IGNORECASE)
# Matches stuff that is translateable if it occurs in the right context
# (between tags). This includes all characters and character entities.
# Note that this also matches which needs to be handled as whitespace
# before this regexp is applied.
_CHARACTERS = re.compile(r'''
(
\w
|
[\!\@\#\$\%\^\*\(\)\-\=\_\+\[\]\{\}\\\|\;\:\'\"\,\.\/\?\`\~]
|
&(\#[0-9]+|\#x[0-9a-fA-F]+|[A-Za-z0-9]+);
)+
''', re.MULTILINE | re.DOTALL | re.VERBOSE)
# Matches Total Recall's "replaceable" tags, which are just any text
# in capitals enclosed by delimiters like [] or [~~] or [$~~$] (e.g. [HELLO],
# [~HELLO~] and [$~HELLO~$]).
_REPLACEABLE = re.compile(r'\[(\$?\~)?(?P<name>[A-Z0-9-_]+?)(\~\$?)?\]',
re.MULTILINE)
# Matches the silly [!]-prefixed "header" that is used in some TotalRecall
# templates.
_SILLY_HEADER = re.compile(r'\[!\]\ntitle\t(?P<title>[^\n]+?)\n.+?\n\n',
re.MULTILINE | re.DOTALL)
# Matches a comment that provides a description for the message it occurs in.
_DESCRIPTION_COMMENT = re.compile(
r'<!--\s*desc\s*=\s*(?P<description>.+?)\s*-->', re.DOTALL)
_DEBUG = 0
def _DebugPrint(text):
if _DEBUG:
print text.encode('utf-8')
class HtmlChunks(object):
'''A parser that knows how to break an HTML-like document into a list of
chunks, where each chunk is either translateable or non-translateable.
The chunks are unmodified sections of the original document, so concatenating
the text of all chunks would result in the original document.'''
def InTranslateable(self):
return self.last_translateable != -1
def Rest(self):
return self.text_[self.current:]
def StartTranslateable(self):
assert not self.InTranslateable()
if self.current != 0:
# Append a nontranslateable chunk
chunk_text = self.text_[self.chunk_start : self.last_nontranslateable + 1]
# Needed in the case where document starts with a translateable.
if len(chunk_text) > 0:
self.AddChunk(False, chunk_text)
self.chunk_start = self.last_nontranslateable + 1
self.last_translateable = self.current
self.last_nontranslateable = -1
def EndTranslateable(self):
assert self.InTranslateable()
# Append a translateable chunk
self.AddChunk(True,
self.text_[self.chunk_start : self.last_translateable + 1])
self.chunk_start = self.last_translateable + 1
self.last_translateable = -1
self.last_nontranslateable = self.current
def AdvancePast(self, match):
self.current += match.end()
def AddChunk(self, translateable, text):
'''Adds a chunk to self, removing linebreaks and duplicate whitespace
if appropriate.
'''
if translateable and not self.last_element_ in _PREFORMATTED_TAGS:
text = text.replace('\n', ' ')
text = text.replace('\r', ' ')
text = text.replace(' ', ' ')
text = text.replace(' ', ' ')
m = _DESCRIPTION_COMMENT.search(text)
if m:
self.last_description = m.group('description')
# remove the description from the output text
text = _DESCRIPTION_COMMENT.sub('', text)
if translateable:
description = self.last_description
self.last_description = ''
else:
description = ''
if text != '':
self.chunks_.append((translateable, text, description))
def Parse(self, text):
'''Parses self.text_ into an intermediate format stored in self.chunks_
which is translateable and nontranslateable chunks. Also returns
self.chunks_
Return:
[chunk1, chunk2, chunk3, ...] (instances of class Chunk)
'''
#
# Chunker state
#
self.text_ = text
# A list of tuples (is_translateable, text) which represents the document
# after chunking.
self.chunks_ = []
# Start index of the last chunk, whether translateable or not
self.chunk_start = 0
# Index of the last for-sure translateable character if we are parsing
# a translateable chunk, -1 to indicate we are not in a translateable chunk.
# This is needed so that we don't include trailing whitespace in the
# translateable chunk (whitespace is neutral).
self.last_translateable = -1
# Index of the last for-sure nontranslateable character if we are parsing
# a nontranslateable chunk, -1 if we are not in a nontranslateable chunk.
# This is needed to make sure we can group e.g. "<b>Hello</b> there"
# together instead of just "Hello</b> there" which would be much worse
# for translation.
self.last_nontranslateable = -1
# Index of the character we're currently looking at.
self.current = 0
# The name of the last block element parsed.
self.last_element_ = ''
# The last explicit description we found.
self.last_description = ''
while self.current < len(self.text_):
_DebugPrint('REST: %s' % self.text_[self.current:self.current+60])
# First try to match whitespace
m = _WHITESPACE.match(self.Rest())
if m:
# Whitespace is neutral, it just advances 'current' and does not switch
# between translateable/nontranslateable. If we are in a
# nontranslateable section that extends to the current point, we extend
# it to include the whitespace. If we are in a translateable section,
# we do not extend it until we find
# more translateable parts, because we never want a translateable chunk
# to end with whitespace.
if (not self.InTranslateable() and
self.last_nontranslateable == self.current - 1):
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Then we try to match nontranslateables
m = _NONTRANSLATEABLES.match(self.Rest())
if m:
if self.InTranslateable():
self.EndTranslateable()
self.last_nontranslateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Now match all other HTML element tags (opening, closing, or empty, we
# don't care).
m = _ELEMENT.match(self.Rest())
if m:
element_name = m.group('element').lower()
if element_name in _BLOCK_TAGS:
self.last_element_ = element_name
if self.InTranslateable():
self.EndTranslateable()
# Check for "special" elements, i.e. ones that have a translateable
# attribute, and handle them correctly. Note that all of the
# "special" elements are block tags, so no need to check for this
# if the tag is not a block tag.
sm = _SPECIAL_ELEMENT.match(self.Rest())
if sm:
# Get the appropriate group name
for group in sm.groupdict().keys():
if sm.groupdict()[group]:
break
# First make a nontranslateable chunk up to and including the
# quote before the translateable attribute value
self.AddChunk(False, self.text_[
self.chunk_start : self.current + sm.start(group)])
# Then a translateable for the translateable bit
self.AddChunk(True, self.Rest()[sm.start(group) : sm.end(group)])
# Finally correct the data invariant for the parser
self.chunk_start = self.current + sm.end(group)
self.last_nontranslateable = self.current + m.end() - 1
elif self.InTranslateable():
# We're in a translateable and the tag is an inline tag, so we
# need to include it in the translateable.
self.last_translateable = self.current + m.end() - 1
self.AdvancePast(m)
continue
# Anything else we find must be translateable, so we advance one character
# at a time until one of the above matches.
if not self.InTranslateable():
self.StartTranslateable()
else:
self.last_translateable = self.current
self.current += 1
# Close the final chunk
if self.InTranslateable():
self.AddChunk(True, self.text_[self.chunk_start : ])
else:
self.AddChunk(False, self.text_[self.chunk_start : ])
return self.chunks_
def HtmlToMessage(html, include_block_tags=False, description=''):
'''Takes a bit of HTML, which must contain only "inline" HTML elements,
and changes it into a tclib.Message. This involves escaping any entities and
replacing any HTML code with placeholders.
If include_block_tags is true, no error will be given if block tags (e.g.
<p> or <br>) are included in the HTML.
Args:
html: 'Hello <b>[USERNAME]</b>, how <i>are</i> you?'
include_block_tags: False
Return:
tclib.Message('Hello START_BOLD1USERNAMEEND_BOLD, '
'howNBSPSTART_ITALICareEND_ITALIC you?',
[ Placeholder('START_BOLD', '<b>', ''),
Placeholder('USERNAME', '[USERNAME]', ''),
Placeholder('END_BOLD', '</b>', ''),
Placeholder('START_ITALIC', '<i>', ''),
Placeholder('END_ITALIC', '</i>', ''), ])
'''
# Approach is:
# - first placeholderize, finding <elements>, [REPLACEABLES] and
# - then escape all character entities in text in-between placeholders
parts = [] # List of strings (for text chunks) and tuples (ID, original)
# for placeholders
count_names = {} # Map of base names to number of times used
end_names = {} # Map of base names to stack of end tags (for correct nesting)
def MakeNameClosure(base, type = ''):
'''Returns a closure that can be called once all names have been allocated
to return the final name of the placeholder. This allows us to minimally
number placeholders for non-overlap.
Also ensures that END_XXX_Y placeholders have the same Y as the
corresponding BEGIN_XXX_Y placeholder when we have nested tags of the same
type.
Args:
base: 'phname'
type: '' | 'begin' | 'end'
Return:
Closure()
'''
name = base
if type != '':
name = ('%s_%s' % (type, base)).upper()
if name in count_names.keys():
count_names[name] += 1
else:
count_names[name] = 1
def MakeFinalName(name_ = name, index = count_names[name] - 1):
if (type.lower() == 'end' and
base in end_names.keys() and len(end_names[base])):
return end_names[base].pop(-1) # For correct nesting
if count_names[name_] != 1:
name_ = '%s_%s' % (name_, _SUFFIXES[index])
# We need to use a stack to ensure that the end-tag suffixes match
# the begin-tag suffixes. Only needed when more than one tag of the
# same type.
if type == 'begin':
end_name = ('END_%s_%s' % (base, _SUFFIXES[index])).upper()
if base in end_names.keys():
end_names[base].append(end_name)
else:
end_names[base] = [end_name]
return name_
return MakeFinalName
current = 0
while current < len(html):
m = _NBSP.match(html[current:])
if m:
parts.append((MakeNameClosure('SPACE'), m.group()))
current += m.end()
continue
m = _REPLACEABLE.match(html[current:])
if m:
# Replaceables allow - but placeholders don't, so replace - with _
ph_name = MakeNameClosure('X_%s_X' % m.group('name').replace('-', '_'))
parts.append((ph_name, m.group()))
current += m.end()
continue
m = _SPECIAL_ELEMENT.match(html[current:])
if m:
if not include_block_tags:
raise exception.BlockTagInTranslateableChunk(html)
element_name = 'block' # for simplification
# Get the appropriate group name
for group in m.groupdict().keys():
if m.groupdict()[group]:
break
parts.append((MakeNameClosure(element_name, 'begin'),
html[current : current + m.start(group)]))
parts.append(m.group(group))
parts.append((MakeNameClosure(element_name, 'end'),
html[current + m.end(group) : current + m.end()]))
current += m.end()
continue
m = _ELEMENT.match(html[current:])
if m:
element_name = m.group('element').lower()
if not include_block_tags and not element_name in _INLINE_TAGS:
raise exception.BlockTagInTranslateableChunk(html[current:])
if element_name in _HTML_PLACEHOLDER_NAMES: # use meaningful names
element_name = _HTML_PLACEHOLDER_NAMES[element_name]
# Make a name for the placeholder
type = ''
if not m.group('empty'):
if m.group('closing'):
type = 'end'
else:
type = 'begin'
parts.append((MakeNameClosure(element_name, type), m.group()))
current += m.end()
continue
if len(parts) and isinstance(parts[-1], types.StringTypes):
parts[-1] += html[current]
else:
parts.append(html[current])
current += 1
msg_text = ''
placeholders = []
for part in parts:
if isinstance(part, types.TupleType):
final_name = part[0]()
original = part[1]
msg_text += final_name
placeholders.append(tclib.Placeholder(final_name, original, '(HTML code)'))
else:
msg_text += part
msg = tclib.Message(text=msg_text, placeholders=placeholders,
description=description)
content = msg.GetContent()
for ix in range(len(content)):
if isinstance(content[ix], types.StringTypes):
content[ix] = util.UnescapeHtml(content[ix], replace_nbsp=False)
return msg
class TrHtml(interface.GathererBase):
'''Represents a document or message in the template format used by
Total Recall for HTML documents.'''
def __init__(self, text):
'''Creates a new object that represents 'text'.
Args:
text: '<html>...</html>'
'''
super(type(self), self).__init__()
self.text_ = text
self.have_parsed_ = False
self.skeleton_ = [] # list of strings and MessageClique objects
def GetText(self):
'''Returns the original text of the HTML document'''
return self.text_
def GetCliques(self):
'''Returns the message cliques for each translateable message in the
document.'''
return filter(lambda x: isinstance(x, clique.MessageClique), self.skeleton_)
def Translate(self, lang, pseudo_if_not_available=True,
skeleton_gatherer=None, fallback_to_english=False):
'''Returns this document with translateable messages filled with
the translation for language 'lang'.
Args:
lang: 'en'
pseudo_if_not_available: True
Return:
'ID_THIS_SECTION TYPE\n...BEGIN\n "Translated message"\n......\nEND
Raises:
grit.exception.NotReady() if used before Parse() has been successfully
called.
grit.exception.NoSuchTranslation() if 'pseudo_if_not_available' is false
and there is no translation for the requested language.
'''
if len(self.skeleton_) == 0:
raise exception.NotReady()
# TODO(joi) Implement support for skeleton gatherers here.
out = []
for item in self.skeleton_:
if isinstance(item, types.StringTypes):
out.append(item)
else:
msg = item.MessageForLanguage(lang,
pseudo_if_not_available,
fallback_to_english)
for content in msg.GetContent():
if isinstance(content, tclib.Placeholder):
out.append(content.GetOriginal())
else:
# We escape " characters to increase the chance that attributes
# will be properly escaped.
out.append(util.EscapeHtml(content, True))
return ''.join(out)
# Parsing is done in two phases: First, we break the document into
# translateable and nontranslateable chunks. Second, we run through each
# translateable chunk and insert placeholders for any HTML elements, unescape
# escaped characters, etc.
def Parse(self):
if self.have_parsed_:
return
self.have_parsed_ = True
text = self.text_
# First handle the silly little [!]-prefixed header because it's not
# handled by our HTML parsers.
m = _SILLY_HEADER.match(text)
if m:
self.skeleton_.append(text[:m.start('title')])
self.skeleton_.append(self.uberclique.MakeClique(
tclib.Message(text=text[m.start('title'):m.end('title')])))
self.skeleton_.append(text[m.end('title') : m.end()])
text = text[m.end():]
chunks = HtmlChunks().Parse(text)
for chunk in chunks:
if chunk[0]: # Chunk is translateable
self.skeleton_.append(self.uberclique.MakeClique(
HtmlToMessage(chunk[1], description=chunk[2])))
else:
self.skeleton_.append(chunk[1])
# Go through the skeleton and change any messages that consist solely of
# placeholders and whitespace into nontranslateable strings.
for ix in range(len(self.skeleton_)):
got_text = False
if isinstance(self.skeleton_[ix], clique.MessageClique):
msg = self.skeleton_[ix].GetMessage()
for item in msg.GetContent():
if (isinstance(item, types.StringTypes) and _NON_WHITESPACE.search(item)
and item != ' '):
got_text = True
break
if not got_text:
self.skeleton_[ix] = msg.GetRealContent()
# Static method
def FromFile(html, extkey=None, encoding = 'utf-8'):
'''Creates a TrHtml object from the contents of 'html' which are decoded
using 'encoding'. Returns a new TrHtml object, upon which Parse() has not
been called.
Args:
html: file('') | 'filename.html'
extkey: ignored
encoding: 'utf-8' (note that encoding is ignored if 'html' is not a file
name but instead an open file or file-like object)
Return:
TrHtml(text_of_file)
'''
if isinstance(html, types.StringTypes):
html = util.WrapInputStream(file(html, 'r'), encoding)
doc = html.read()
# Ignore the BOM character if the document starts with one.
if len(doc) and doc[0] == u'\ufeff':
doc = doc[1:]
return TrHtml(doc)
FromFile = staticmethod(FromFile)
|
|
import base64
import collections
import datetime
import logging
import re
import sys
import time
from urllib import urlencode
import urlparse
import configparser
import dateutil.parser
import requests
import rethinkdb as r
from termcolor import colored
from db.github_repos import PluginGithubRepos, DotfilesGithubRepos
import db.plugins
import db.util
import util
r_conn = db.util.r_conn
try:
import secrets
_GITHUB_API_TOKEN = getattr(secrets, 'GITHUB_PERSONAL_ACCESS_TOKEN', None)
except ImportError:
_GITHUB_API_TOKEN = None
_NO_GITHUB_API_TOKEN_MESSAGE = """
*******************************************************************************
* Warning: GitHub API token not found in secrets.py. Scraping will be severely
* rate-limited. See secrets.py.example to obtain a GitHub personal access token
*******************************************************************************
"""
if not _GITHUB_API_TOKEN:
logging.warn(colored(_NO_GITHUB_API_TOKEN_MESSAGE, 'red'))
ReposByManager = collections.namedtuple('ReposByManager', ['vundle', 'neobundle', 'vimplug'])
###############################################################################
# General utilities for interacting with the GitHub API.
class ApiRateLimitExceededError(Exception):
def __init__(self, headers):
self.headers = headers
def __str__(self):
return repr(self.headers)
def get_api_page(url_or_path, query_params=None, page=1, per_page=100):
"""Get a page from GitHub's v3 API.
Arguments:
url_or_path: The API method to call or the full URL.
query_params: A dict of additional query parameters
page: Page number
per_page: How many results to return per page. Max is 100.
Returns:
A tuple: (Response object, JSON-decoded dict of the response)
Raises: ApiRateLimitExceededError
"""
split_url = urlparse.urlsplit(url_or_path)
query = {
'page': page,
'per_page': per_page,
}
if _GITHUB_API_TOKEN:
query['access_token'] = _GITHUB_API_TOKEN
query.update(dict(urlparse.parse_qsl(split_url.query)))
query.update(query_params or {})
url = urlparse.SplitResult(scheme='https', netloc='api.github.com',
path=split_url.path, query=urlencode(query),
fragment=split_url.fragment).geturl()
res = requests.get(url)
if res.status_code == 403 and res.headers['X-RateLimit-Remaining'] == '0':
raise ApiRateLimitExceededError(res.headers)
return res, res.json()
def get_requests_left():
"""Retrieve how many API requests are remaining"""
_, data = get_api_page('rate_limit')
return data['rate']['remaining']
def maybe_wait_until_api_limit_resets(response_headers):
"""If we're about to exceed our API limit, sleeps until our API limit is
reset.
"""
if response_headers['X-RateLimit-Remaining'] == '0':
reset_timestamp = response_headers['X-RateLimit-Reset']
reset_date = datetime.datetime.fromtimestamp(int(reset_timestamp))
now = datetime.datetime.now()
seconds_to_wait = (reset_date - now).seconds + 1
print "Sleeping %s seconds for API limit to reset." % seconds_to_wait
time.sleep(seconds_to_wait)
###############################################################################
# Routines for scraping Vim plugin repos from GitHub.
def get_plugin_data(owner, repo_name, repo_data, readme_data=None):
"""Populate info relevant to a plugin from a GitHub repo.
This should not be used to fetch info from the vim-scripts user's repos.
Arguments:
owner: The repo's owner's login, eg. "gmarik"
repo_name: The repo name, eg. "vundle"
repo_data: GitHub API /repos response for this repo
readme_data: (optional) GitHub API /readme response for this repo
scrape_fork: Whether to bother scraping this repo if it's a fork
Returns:
A dict of properties that can be inserted as a row in the plugins table
"""
assert owner != 'vim-scripts'
if not readme_data:
_, readme_data = get_api_page('repos/%s/%s/readme' % (
owner, repo_name))
readme_base64_decoded = base64.b64decode(readme_data.get('content', ''))
readme = unicode(readme_base64_decoded, 'utf-8', errors='ignore')
readme_filename = readme_data.get('name', '')
# TODO(david): We used to extract the vim.org ID from the homepage if it
# were a vim.org URL, but that became too unreliable as many different
# repos would all claim to have the same vim.org homepage, when
# sometimes those repos were of different plugins. But it's still
# useful information in heuristic matching, just can't be used as
# a key.
homepage = repo_data['homepage']
repo_created_date = dateutil.parser.parse(repo_data['created_at'])
# Fetch commits so we can get the update/create dates.
_, commits_data = get_api_page('repos/%s/%s/commits' % (owner, repo_name),
per_page=100)
if commits_data and isinstance(commits_data, list) and len(commits_data):
# Unfortunately repo_data['updated_at'] and repo_data['pushed_at'] are
# wildy misrepresentative of the last time someone made a commit to the
# repo.
updated_date_text = commits_data[0]['commit']['author']['date']
updated_date = dateutil.parser.parse(updated_date_text)
# To get the creation date, we use the heuristic of min(repo creation
# date, 100th latest commit date). We do this because repo creation
# date can be later than the date of the first commit, which is
# particularly pervasive for vim-scripts repos. Fortunately, most
# vim-scripts repos don't have more than 100 commits, and also we get
# creation_date for vim-scripts repos when scraping vim.org.
early_commit_date_text = commits_data[-1]['commit']['author']['date']
early_commit_date = dateutil.parser.parse(early_commit_date_text)
created_date = min(repo_created_date, early_commit_date)
else:
updated_date = dateutil.parser.parse(repo_data['updated_at'])
created_date = repo_created_date
# Fetch owner info to get author name.
owner_login = repo_data['owner']['login']
_, owner_data = get_api_page('users/%s' % owner_login)
author = owner_data.get('name') or owner_data.get('login')
return {
'created_at': util.to_timestamp(created_date),
'updated_at': util.to_timestamp(updated_date),
'vimorg_id': None,
'github_repo_id': str(repo_data['id']),
'github_owner': owner,
'github_repo_name': repo_name,
'github_author': author,
'github_stars': repo_data['watchers'],
'github_homepage': homepage,
'github_short_desc': repo_data['description'],
'github_readme': readme,
'github_readme_filename': readme_filename,
}
def _add_submission_data(plugin, submission):
"""Updates a plugin with info from a user submission."""
if (plugin.get('category', 'uncategorized') == 'uncategorized' and
submission.get('category', 'uncategorized') != 'uncategorized'):
plugin['category'] = submission['category']
if not plugin.get('tags') and submission.get('tags'):
db.plugins.update_tags(plugin, submission['tags'])
# TODO(david): Simplify/break-up this function.
def scrape_plugin_repos(num):
"""Scrapes the num plugin repos that have been least recently scraped."""
MIN_FORK_USERS = 3
query = r.table('plugin_github_repos').filter({'is_blacklisted': False})
# We don't want to scrape forks that not many people use.
query = query.filter(r.not_((r.row['is_fork'] == True) & ( # NOQA
r.row['plugin_manager_users'] < MIN_FORK_USERS)),
default=True)
# Only scrape repos that don't redirect to other ones (probably renamed).
query = query.filter(r.row['redirects_to'] == '')
# We scrape vim-scripts separately using the batch /users/:user/repos call
query = query.filter(r.row['owner'] != 'vim-scripts')
query = query.order_by('last_scraped_at').limit(num)
repos = query.run(r_conn())
# TODO(david): Print stats at the end: # successfully scraped, # not found,
# # redirects, etc.
for repo in repos:
repo_name = repo['repo_name']
repo_owner = repo['owner']
# Print w/o newline.
print " scraping %s/%s ..." % (repo_owner, repo_name),
sys.stdout.flush()
# Attempt to fetch data about the plugin.
res, repo_data = get_api_page('repos/%s/%s' % (repo_owner, repo_name))
# If the API call 404s, then see if the repo has been renamed by
# checking for a redirect in a non-API call.
if res.status_code == 404:
res = requests.head('https://github.com/%s/%s' % (
repo_owner, repo_name))
if res.status_code == 301:
location = res.headers.get('location')
valid_repo_url = re.compile("^https://github.com/[^/]+/[^/]+")
if not valid_repo_url.match(location):
print 'redirects to invalid GitHub repo URL: %s' % location
continue
_, redirect_owner, redirect_repo_name = location.rsplit('/', 2)
repo['redirects_to'] = '%s/%s' % (redirect_owner,
redirect_repo_name)
# Make sure we insert the new location of the repo, which will
# be scraped in a future run.
PluginGithubRepos.upsert_with_owner_repo({
'owner': redirect_owner,
'repo_name': redirect_repo_name,
# TODO(david): Should append to a list
'redirects_from': ('%s/%s' % (repo_owner, repo_name)),
})
# And now change the GitHub repo location of the plugin that
# the old repo location pointed to
query = r.table('plugins').get_all(
[repo_owner, repo_name], index='github_owner_repo')
db_plugin = db.util.get_first(query)
if db_plugin:
db_plugin['github_owner'] = redirect_owner
db_plugin['github_repo_name'] = redirect_repo_name
db.plugins.insert(db_plugin, conflict='replace')
print 'redirects to %s/%s.' % (redirect_owner,
redirect_repo_name)
else:
# TODO(david): Insert some metadata in the github repo that
# this is not found
print 'not found.'
plugin_data = None
else:
plugin_data = get_plugin_data(repo_owner, repo_name, repo_data)
repo['repo_data'] = repo_data
repo['repo_id'] = str(repo_data.get('id', repo['repo_id']))
PluginGithubRepos.log_scrape(repo)
# If this is a fork, note it and ensure we know about original repo.
if repo_data.get('fork'):
repo['is_fork'] = True
PluginGithubRepos.upsert_with_owner_repo({
'owner': repo_data['parent']['owner']['login'],
'repo_name': repo_data['parent']['name'],
})
PluginGithubRepos.upsert_with_owner_repo(repo)
# For most cases we don't care about forked repos, unless the forked
# repo is used by others.
if repo_data.get('fork') and (
repo.get('plugin_manager_users', 0) < MIN_FORK_USERS):
print 'skipping fork of %s' % repo_data['parent']['full_name']
continue
if plugin_data:
# Insert the number of plugin manager users across all names/owners
# of this repo.
# TODO(david): Try to also use repo_id for this (but not all repos
# have it), or look at multiple levels of redirects.
plugin_manager_users = repo.get('plugin_manager_users', 0)
other_repos = r.table('plugin_github_repos').get_all(
'%s/%s' % (repo_owner, repo_name),
index='redirects_to').run(r_conn())
for other_repo in other_repos:
if other_repo['id'] == repo['id']:
continue
plugin_manager_users += other_repo.get(
'plugin_manager_users', 0)
plugin_data['github_bundles'] = plugin_manager_users
if repo.get('from_submission'):
_add_submission_data(plugin_data, repo['from_submission'])
db.plugins.add_scraped_data(plugin_data, repo,
submission=repo.get('from_submission'))
print 'done.'
def scrape_vim_scripts_repos(num):
"""Scrape at least num repos from the vim-scripts GitHub user."""
_, user_data = get_api_page('users/vim-scripts')
# Calculate how many pages of repositories there are.
num_repos = user_data['public_repos']
num_pages = (num_repos + 99) / 100 # ceil(num_repos / 100.0)
num_inserted = 0
num_scraped = 0
for page in range(1, num_pages + 1):
if num_scraped >= num:
break
_, repos_data = get_api_page('users/vim-scripts/repos', page=page)
for repo_data in repos_data:
# Scrape plugin-relevant data. We don't need much info from
# vim-scripts because it's a mirror of vim.org.
# vimorg_id is required for associating with the corresponding
# vim.org-scraped plugin.
vimorg_id = util.get_vimorg_id_from_url(repo_data['homepage'])
assert vimorg_id
repo_name = repo_data['name']
repo = PluginGithubRepos.get_with_owner_repo('vim-scripts',
repo_name)
num_bundles = repo['plugin_manager_users'] if repo else 0
db.plugins.add_scraped_data({
'vimorg_id': vimorg_id,
'github_vim_scripts_repo_name': repo_name,
'github_vim_scripts_stars': repo_data['watchers'],
'github_vim_scripts_bundles': num_bundles,
})
# Also add to our index of known GitHub plugins.
inserted = PluginGithubRepos.upsert_with_owner_repo({
'owner': 'vim-scripts',
'repo_name': repo_name,
'repo_data': repo_data,
})
num_inserted += int(inserted)
num_scraped += 1
print ' scraped %s repos' % num_scraped
print "\nScraped %s vim-scripts GitHub repos; inserted %s new ones." % (
num_scraped, num_inserted)
###############################################################################
# Code to scrape GitHub dotfiles repos to extract plugins used.
# TODO(david): Write up a blurb on how all of this works.
# The following are names of repos and locations where we search for
# Vundle/Pathogen plugin references. They were found by manually going through
# search results of
# github.com/search?q=scrooloose%2Fsyntastic&ref=searchresults&type=Code
# TODO(david): It would be good to add "vim", "settings", and "config", but
# there are too many false positives that need to be filtered out.
_DOTFILE_REPO_NAMES = ['vimrc', 'vimfile', 'vim-file', 'vimconf',
'vim-conf', 'dotvim', 'vim-setting', 'myvim', 'dotfile',
'config-files', 'plug']
_VIMRC_FILENAMES = ['vimrc', 'bundle', 'vundle.vim', 'vundles.vim',
'vim.config', 'plugins.vim', 'plug.vim']
_VIM_DIRECTORIES = ['vim', 'config', 'home']
# Regexes for extracting plugin references from dotfile repos. See
# github_test.py for examples of what they match and don't.
# Matches eg. "Bundle 'gmarik/vundle'" or "Bundle 'taglist'"
# [^\S\n] means whitespace except newline: stackoverflow.com/a/3469155/392426
_PLUGIN_REGEX_TEMPLATE = r'^[^\S\n]*%s[^\S\n]*[\'"]([^\'"\n\r]+)[\'"]'
_VUNDLE_PLUGIN_REGEX = re.compile(_PLUGIN_REGEX_TEMPLATE %
'(?:Bundle|Plugin)', re.MULTILINE)
_NEOBUNDLE_PLUGIN_REGEX = re.compile(_PLUGIN_REGEX_TEMPLATE %
'(?:NeoBundle|NeoBundleFetch|NeoBundleLazy)', re.MULTILINE)
_VIMPLUG_PLUGIN_REGEX = re.compile(_PLUGIN_REGEX_TEMPLATE %
'(?:Plug)', re.MULTILINE)
# Extracts owner and repo name from a bundle spec -- a git repo URI, implicity
# github.com if host not given.
# eg. ('gmarik', 'vundle') or (None, 'taglist')
_BUNDLE_OWNER_REPO_REGEX = re.compile(
r'(?:([^:\'"/]+)/)?([^\'"\n\r/]+?)(?:\.git|/)?$')
# Matches a .gitmodules section heading that's likely of a Pathogen bundle.
_SUBMODULE_IS_BUNDLE_REGEX = re.compile(r'submodule.+(bundles?)|(vim.plugins).+',
re.IGNORECASE)
def _extract_bundles_with_regex(file_contents, bundle_plugin_regex):
"""Extracts plugin repos from contents of a file using a given regex.
Arguments:
file_contents: A string of the contents of the file to search through.
bundle_plugin_regex: A regex to use to match all lines referencing
plugin repos.
Returns:
A list of tuples (owner, repo_name) referencing GitHub repos.
"""
bundles = bundle_plugin_regex.findall(file_contents)
if not bundles:
return []
plugin_repos = []
for bundle in bundles:
match = _BUNDLE_OWNER_REPO_REGEX.search(bundle)
if match and len(match.groups()) == 2:
owner, repo = match.groups()
owner = 'vim-scripts' if owner is None else owner
plugin_repos.append((owner, repo))
else:
logging.error(colored(
'Failed to extract owner/repo from "%s"' % bundle, 'red'))
return plugin_repos
def _extract_bundle_repos_from_file(file_contents):
"""Extracts Vundle Neobundle and Plug plugins from contents of a vimrc-like
file.
Arguments:
file_contents: A string of the contents of the file to search through.
Returns:
A named tuple with a key for each plugin manager. Each value is a list
of tuples of the form (owner, repo_name) referencing a GitHub repo.
"""
vundle_repos = _extract_bundles_with_regex(file_contents,
_VUNDLE_PLUGIN_REGEX)
neobundle_repos = _extract_bundles_with_regex(file_contents,
_NEOBUNDLE_PLUGIN_REGEX)
vimplug_repos = _extract_bundles_with_regex(file_contents,
_VIMPLUG_PLUGIN_REGEX)
return ReposByManager(vundle_repos, neobundle_repos, vimplug_repos)
def _extract_bundle_repos_from_dir(dir_data, depth=0):
"""Extracts vim plugin bundles from a GitHub dotfiles directory.
Will recursively search through directories likely to contain vim config
files (lots of people seem to like putting their vim config in a "vim"
subdirectory).
Arguments:
dir_data: API response from GitHub of a directory or repo's contents.
depth: Current recursion depth (0 = top-level repo).
Returns:
A tuple (Vundle repos, NeoBundle repos). Each element is a list of
tuples of the form (owner, repo_name) referencing a GitHub repo.
"""
# First, look for top-level files that are likely to contain references to
# vim plugins.
files = [f for f in dir_data if f['type'] == 'file']
for file_data in files:
filename = file_data['name'].lower()
if 'gvimrc' in filename:
continue
if not any((f in filename) for f in _VIMRC_FILENAMES):
continue
# Ok, there could potentially be references to vim plugins here.
_, file_contents = get_api_page(file_data['url'])
contents_decoded = base64.b64decode(file_contents.get('content', ''))
repos_by_manger = _extract_bundle_repos_from_file(contents_decoded)
if any(repos_by_manger):
return repos_by_manger
if depth >= 3:
return ReposByManager([], [], [])
# No plugins were found, so look in subdirectories that could potentially
# have vim config files.
dirs = [f for f in dir_data if f['type'] == 'dir']
for dir_data in dirs:
filename = dir_data['name'].lower()
if not any((f in filename) for f in _VIM_DIRECTORIES):
continue
# Ok, there could potentially be vim config files in here.
_, subdir_data = get_api_page(dir_data['url'])
repos_by_manger = _extract_bundle_repos_from_dir(subdir_data, depth + 1)
if any(repos_by_manger):
return repos_by_manger
return ReposByManager([], [], [])
def _extract_pathogen_repos(repo_contents):
"""Extracts Pathogen plugin repos from a GitHub dotfiles repository.
This currently just extracts plugins if they are checked in as submodules,
because it's easy to extract repo URLs from the .gitmodules file but
difficult to determine the repo URL of a plugin that's just cloned in.
Arguments:
repo_contents: API response from GitHub of a directory or repo's
contents.
Returns:
A list of tuples (owner, repo_name) referencing GitHub repos.
"""
gitmodules = filter(lambda f: f['type'] == 'file' and
f['name'].lower() == '.gitmodules', repo_contents)
if not gitmodules:
return []
_, file_contents = get_api_page(gitmodules[0]['url'])
contents_decoded = base64.b64decode(file_contents.get('content', ''))
contents_unicode = unicode(contents_decoded, 'utf-8', errors='ignore')
parser = configparser.ConfigParser(interpolation=None)
try:
parser.read_string(unicode(contents_unicode))
except configparser.Error:
logging.exception(colored(
'Could not parse the .gitmodules file of %s.' %
file_contents['url'], 'red'))
return []
plugin_repos = []
for section, config in parser.items():
if not _SUBMODULE_IS_BUNDLE_REGEX.search(section):
continue
if not config.get('url'):
continue
# The parser sometimes over-parses the value
url = config['url'].split('\n')[0]
match = _BUNDLE_OWNER_REPO_REGEX.search(url)
if match and len(match.groups()) == 2 and match.group(1):
owner, repo = match.groups()
plugin_repos.append((owner, repo))
else:
logging.error(colored(
'Failed to extract owner/repo from "%s"' % url, 'red'))
return plugin_repos
def _get_plugin_repos_from_dotfiles(repo_data, search_keyword):
"""Search for references to vim plugin repos from a dotfiles repository,
and insert them into DB.
Arguments:
repo_data: API response from GitHub of a repository.
search_keyword: The keyword used that found this repo.
"""
owner_repo = repo_data['full_name']
# Print w/o newline.
print " scraping %s ..." % owner_repo,
sys.stdout.flush()
res, contents_data = get_api_page('repos/%s/contents' % owner_repo)
if res.status_code == 404 or not isinstance(contents_data, list):
print "contents not found"
return
repos_by_manager = _extract_bundle_repos_from_dir(contents_data)
vundle_repos = repos_by_manager.vundle
neobundle_repos = repos_by_manager.neobundle
vimplug_repos = repos_by_manager.vimplug
pathogen_repos = _extract_pathogen_repos(contents_data)
owner, repo_name = owner_repo.split('/')
db_repo = DotfilesGithubRepos.get_with_owner_repo(owner, repo_name)
pushed_date = dateutil.parser.parse(repo_data['pushed_at'])
def stringify_repo(owner_repo_tuple):
return '/'.join(owner_repo_tuple)
repo = dict(db_repo or {}, **{
'owner': owner,
'pushed_at': util.to_timestamp(pushed_date),
'repo_name': repo_name,
'search_keyword': search_keyword,
'vundle_repos': map(stringify_repo, vundle_repos),
'neobundle_repos': map(stringify_repo, neobundle_repos),
'vimplug_repos': map(stringify_repo, vimplug_repos),
'pathogen_repos': map(stringify_repo, pathogen_repos),
})
DotfilesGithubRepos.log_scrape(repo)
DotfilesGithubRepos.upsert_with_owner_repo(repo)
print 'found %s Vundles, %s NeoBundles, %s VimPlugs, %s Pathogens' % (
len(vundle_repos), len(neobundle_repos),
len(vimplug_repos), len(pathogen_repos))
return {
'vundle_repos_count': len(vundle_repos),
'neobundle_repos_count': len(neobundle_repos),
'vimplug_repos_count': len(vimplug_repos),
'pathogen_repos_count': len(pathogen_repos),
}
def scrape_dotfiles_repos(num):
"""Scrape at most num dotfiles repos from GitHub for references to Vim
plugin repos.
We perform a search on GitHub repositories that are likely to contain
Vundle and Pathogen bundles instead of a code search matching
Vundle/Pathogen commands (which has higher precision and recall), because
GitHub's API requires code search to be limited to
a user/repo/organization. :(
"""
# Earliest allowable updated date to start scraping from (so we won't be
# scraping repos that were last pushed before this date).
EARLIEST_PUSHED_DATE = datetime.datetime(2013, 1, 1)
repos_scraped = 0
scraped_counter = collections.Counter()
for repo_name in _DOTFILE_REPO_NAMES:
latest_repo = DotfilesGithubRepos.get_latest_with_keyword(repo_name)
if latest_repo and latest_repo.get('pushed_at'):
last_pushed_date = max(datetime.datetime.utcfromtimestamp(
latest_repo['pushed_at']), EARLIEST_PUSHED_DATE)
else:
last_pushed_date = EARLIEST_PUSHED_DATE
# We're going to scrape all repos updated after the latest updated repo
# in our DB, starting with the least recently updated. This maintains
# the invariant that we have scraped all repos pushed before the latest
# push date (and after EARLIEST_PUSHED_DATE).
while True:
start_date_iso = last_pushed_date.isoformat()
search_params = {
'q': '%s in:name pushed:>%s' % (repo_name, start_date_iso),
'sort': 'updated',
'order': 'asc',
}
per_page = 100
response, search_data = get_api_page('search/repositories',
query_params=search_params, page=1, per_page=per_page)
items = search_data.get('items', [])
for item in items:
try:
stats = _get_plugin_repos_from_dotfiles(item, repo_name)
except ApiRateLimitExceededError:
logging.exception('API rate limit exceeded.')
return repos_scraped, scraped_counter
except Exception:
logging.exception('Error scraping dotfiles repo %s' %
item['full_name'])
stats = {}
scraped_counter.update(stats)
# If we've scraped the number repos desired, we can quit.
repos_scraped += 1
if repos_scraped >= num:
return repos_scraped, scraped_counter
# If we're about to exceed the rate limit (20 requests / min),
# sleep until the limit resets.
maybe_wait_until_api_limit_resets(response.headers)
# If we've scraped all repos with this name, move on to the next
# repo name.
if len(items) < per_page:
break
else:
last_pushed_date = dateutil.parser.parse(
items[-1]['pushed_at'])
return repos_scraped, scraped_counter
|
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test methods in twisted.internet.threads and reactor thread APIs.
"""
import sys, os, time
from twisted.trial import unittest
from twisted.internet import reactor, defer, interfaces, threads, protocol, error
from twisted.python import failure, threadable, log, threadpool
class ReactorThreadsTestCase(unittest.TestCase):
"""
Tests for the reactor threading API.
"""
def test_suggestThreadPoolSize(self):
"""
Try to change maximum number of threads.
"""
reactor.suggestThreadPoolSize(34)
self.assertEquals(reactor.threadpool.max, 34)
reactor.suggestThreadPoolSize(4)
self.assertEquals(reactor.threadpool.max, 4)
def _waitForThread(self):
"""
The reactor's threadpool is only available when the reactor is running,
so to have a sane behavior during the tests we make a dummy
L{threads.deferToThread} call.
"""
return threads.deferToThread(time.sleep, 0)
def test_callInThread(self):
"""
Test callInThread functionality: set a C{threading.Event}, and check
that it's not in the main thread.
"""
def cb(ign):
waiter = threading.Event()
result = []
def threadedFunc():
result.append(threadable.isInIOThread())
waiter.set()
reactor.callInThread(threadedFunc)
waiter.wait(120)
if not waiter.isSet():
self.fail("Timed out waiting for event.")
else:
self.assertEquals(result, [False])
return self._waitForThread().addCallback(cb)
def test_callFromThread(self):
"""
Test callFromThread functionality: from the main thread, and from
another thread.
"""
def cb(ign):
firedByReactorThread = defer.Deferred()
firedByOtherThread = defer.Deferred()
def threadedFunc():
reactor.callFromThread(firedByOtherThread.callback, None)
reactor.callInThread(threadedFunc)
reactor.callFromThread(firedByReactorThread.callback, None)
return defer.DeferredList(
[firedByReactorThread, firedByOtherThread],
fireOnOneErrback=True)
return self._waitForThread().addCallback(cb)
def test_wakerOverflow(self):
"""
Try to make an overflow on the reactor waker using callFromThread.
"""
def cb(ign):
self.failure = None
waiter = threading.Event()
def threadedFunction():
# Hopefully a hundred thousand queued calls is enough to
# trigger the error condition
for i in xrange(100000):
try:
reactor.callFromThread(lambda: None)
except:
self.failure = failure.Failure()
break
waiter.set()
reactor.callInThread(threadedFunction)
waiter.wait(120)
if not waiter.isSet():
self.fail("Timed out waiting for event")
if self.failure is not None:
return defer.fail(self.failure)
return self._waitForThread().addCallback(cb)
def _testBlockingCallFromThread(self, reactorFunc):
"""
Utility method to test L{threads.blockingCallFromThread}.
"""
waiter = threading.Event()
results = []
errors = []
def cb1(ign):
def threadedFunc():
try:
r = threads.blockingCallFromThread(reactor, reactorFunc)
except Exception, e:
errors.append(e)
else:
results.append(r)
waiter.set()
reactor.callInThread(threadedFunc)
return threads.deferToThread(waiter.wait, self.getTimeout())
def cb2(ign):
if not waiter.isSet():
self.fail("Timed out waiting for event")
return results, errors
return self._waitForThread().addCallback(cb1).addBoth(cb2)
def test_blockingCallFromThread(self):
"""
Test blockingCallFromThread facility: create a thread, call a function
in the reactor using L{threads.blockingCallFromThread}, and verify the
result returned.
"""
def reactorFunc():
return defer.succeed("foo")
def cb(res):
self.assertEquals(res[0][0], "foo")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_asyncBlockingCallFromThread(self):
"""
Test blockingCallFromThread as above, but be sure the resulting
Deferred is not already fired.
"""
def reactorFunc():
d = defer.Deferred()
reactor.callLater(0.1, d.callback, "egg")
return d
def cb(res):
self.assertEquals(res[0][0], "egg")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_errorBlockingCallFromThread(self):
"""
Test error report for blockingCallFromThread.
"""
def reactorFunc():
return defer.fail(RuntimeError("bar"))
def cb(res):
self.assert_(isinstance(res[1][0], RuntimeError))
self.assertEquals(res[1][0].args[0], "bar")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_asyncErrorBlockingCallFromThread(self):
"""
Test error report for blockingCallFromThread as above, but be sure the
resulting Deferred is not already fired.
"""
def reactorFunc():
d = defer.Deferred()
reactor.callLater(0.1, d.errback, RuntimeError("spam"))
return d
def cb(res):
self.assert_(isinstance(res[1][0], RuntimeError))
self.assertEquals(res[1][0].args[0], "spam")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
class Counter:
index = 0
problem = 0
def add(self):
"""A non thread-safe method."""
next = self.index + 1
# another thread could jump in here and increment self.index on us
if next != self.index + 1:
self.problem = 1
raise ValueError
# or here, same issue but we wouldn't catch it. We'd overwrite
# their results, and the index will have lost a count. If
# several threads get in here, we will actually make the count
# go backwards when we overwrite it.
self.index = next
class DeferredResultTestCase(unittest.TestCase):
"""
Test twisted.internet.threads.
"""
def setUp(self):
reactor.suggestThreadPoolSize(8)
def tearDown(self):
reactor.suggestThreadPoolSize(0)
def testCallMultiple(self):
L = []
N = 10
d = defer.Deferred()
def finished():
self.assertEquals(L, range(N))
d.callback(None)
threads.callMultipleInThread([
(L.append, (i,), {}) for i in xrange(N)
] + [(reactor.callFromThread, (finished,), {})])
return d
def test_deferredResult(self):
"""
L{threads.deferToThread} executes the function passed, and correctly
handles the positional and keyword arguments given.
"""
d = threads.deferToThread(lambda x, y=5: x + y, 3, y=4)
d.addCallback(self.assertEquals, 7)
return d
def test_deferredFailure(self):
"""
Check that L{threads.deferToThread} return a failure object
with an appropriate exception instance when the called
function raises an exception.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
d = threads.deferToThread(raiseError)
return self.assertFailure(d, NewError)
def test_deferredFailureAfterSuccess(self):
"""
Check that a successfull L{threads.deferToThread} followed by a one
that raises an exception correctly result as a failure.
"""
# set up a condition that causes cReactor to hang. These conditions
# can also be set by other tests when the full test suite is run in
# alphabetical order (test_flow.FlowTest.testThreaded followed by
# test_internet.ReactorCoreTestCase.testStop, to be precise). By
# setting them up explicitly here, we can reproduce the hang in a
# single precise test case instead of depending upon side effects of
# other tests.
#
# alas, this test appears to flunk the default reactor too
d = threads.deferToThread(lambda: None)
d.addCallback(lambda ign: threads.deferToThread(lambda: 1/0))
return self.assertFailure(d, ZeroDivisionError)
class DeferToThreadPoolTestCase(unittest.TestCase):
"""
Test L{twisted.internet.threads.deferToThreadPool}.
"""
def setUp(self):
self.tp = threadpool.ThreadPool(0, 8)
self.tp.start()
def tearDown(self):
self.tp.stop()
def test_deferredResult(self):
"""
L{threads.deferToThreadPool} executes the function passed, and
correctly handles the positional and keyword arguments given.
"""
d = threads.deferToThreadPool(reactor, self.tp,
lambda x, y=5: x + y, 3, y=4)
d.addCallback(self.assertEqual, 7)
return d
def test_deferredFailure(self):
"""
Check that L{threads.deferToThreadPool} return a failure object with an
appropriate exception instance when the called function raises an
exception.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
d = threads.deferToThreadPool(reactor, self.tp, raiseError)
return self.assertFailure(d, NewError)
_callBeforeStartupProgram = """
import time
import %(reactor)s
%(reactor)s.install()
from twisted.internet import reactor
def threadedCall():
print 'threaded call'
reactor.callInThread(threadedCall)
# Spin very briefly to try to give the thread a chance to run, if it
# is going to. Is there a better way to achieve this behavior?
for i in xrange(100):
time.sleep(0.0)
"""
class ThreadStartupProcessProtocol(protocol.ProcessProtocol):
def __init__(self, finished):
self.finished = finished
self.out = []
self.err = []
def outReceived(self, out):
self.out.append(out)
def errReceived(self, err):
self.err.append(err)
def processEnded(self, reason):
self.finished.callback((self.out, self.err, reason))
class StartupBehaviorTestCase(unittest.TestCase):
"""
Test cases for the behavior of the reactor threadpool near startup
boundary conditions.
In particular, this asserts that no threaded calls are attempted
until the reactor starts up, that calls attempted before it starts
are in fact executed once it has started, and that in both cases,
the reactor properly cleans itself up (which is tested for
somewhat implicitly, by requiring a child process be able to exit,
something it cannot do unless the threadpool has been properly
torn down).
"""
def testCallBeforeStartupUnexecuted(self):
progname = self.mktemp()
progfile = file(progname, 'w')
progfile.write(_callBeforeStartupProgram % {'reactor': reactor.__module__})
progfile.close()
def programFinished((out, err, reason)):
if reason.check(error.ProcessTerminated):
self.fail("Process did not exit cleanly (out: %s err: %s)" % (out, err))
if err:
log.msg("Unexpected output on standard error: %s" % (err,))
self.failIf(out, "Expected no output, instead received:\n%s" % (out,))
def programTimeout(err):
err.trap(error.TimeoutError)
proto.signalProcess('KILL')
return err
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
d = defer.Deferred().addCallbacks(programFinished, programTimeout)
proto = ThreadStartupProcessProtocol(d)
reactor.spawnProcess(proto, sys.executable, ('python', progname), env)
return d
if interfaces.IReactorThreads(reactor, None) is None:
for cls in (ReactorThreadsTestCase,
DeferredResultTestCase,
StartupBehaviorTestCase):
cls.skip = "No thread support, nothing to test here."
else:
import threading
if interfaces.IReactorProcess(reactor, None) is None:
for cls in (StartupBehaviorTestCase,):
cls.skip = "No process support, cannot run subprocess thread tests."
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UFlow augmentation.
This library contains various augmentation functions.
"""
# pylint:disable=g-importing-member
from functools import partial
from math import pi
import gin
import gin.tf
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from uflow import uflow_utils
def apply_augmentation(images, flow=None, mask=None,
crop_height=640, crop_width=640):
"""Applies photometric and geometric augmentations to images and flow."""
# ensure sequence length of two, to be able to unstack images
images = tf.ensure_shape(images, (2, None, None, None))
# apply geometric augmentation functions
images, flow, mask = geometric_augmentation(
images, flow, mask, crop_height, crop_width)
# apply photometric augmentation functions
images_aug = photometric_augmentation(images)
# return flow and mask if available
if flow is not None:
return images_aug, images, flow, mask
return images_aug, images
@gin.configurable
def photometric_augmentation(images,
augment_color_swap=True,
augment_hue_shift=True,
augment_saturation=False,
augment_brightness=False,
augment_contrast=False,
augment_gaussian_noise=False,
augment_brightness_individual=False,
augment_contrast_individual=False,
max_delta_hue=0.5,
min_bound_saturation=0.8,
max_bound_saturation=1.2,
max_delta_brightness=0.1,
min_bound_contrast=0.8,
max_bound_contrast=1.2,
min_bound_gaussian_noise=0.0,
max_bound_gaussian_noise=0.02,
max_delta_brightness_individual=0.02,
min_bound_contrast_individual=0.95,
max_bound_contrast_individual=1.05):
"""Applies photometric augmentations to an image pair."""
# Randomly permute colors by rolling and reversing.
# This covers all permutations.
if augment_color_swap:
r = tf.random.uniform([], maxval=3, dtype=tf.int32)
images = tf.roll(images, r, axis=-1)
r = tf.equal(tf.random.uniform([], maxval=2, dtype=tf.int32), 1)
images = tf.cond(pred=r,
true_fn=lambda: tf.reverse(images, axis=[-1]),
false_fn=lambda: images)
if augment_hue_shift:
images = tf.image.random_hue(images, max_delta_hue)
if augment_saturation:
images = tf.image.random_saturation(
images, min_bound_saturation, max_bound_saturation)
if augment_brightness:
images = tf.image.random_brightness(images, max_delta_brightness)
if augment_contrast:
images = tf.image.random_contrast(
images, min_bound_contrast, max_bound_contrast)
if augment_gaussian_noise:
sigma = tf.random.uniform([],
minval=min_bound_gaussian_noise,
maxval=max_bound_gaussian_noise,
dtype=tf.float32)
noise = tf.random.normal(
tf.shape(input=images), stddev=sigma, dtype=tf.float32)
images = images + noise
# perform relative photometric augmentation (individually per image)
image_1, image_2 = tf.unstack(images)
if augment_brightness_individual:
image_1 = tf.image.random_contrast(
image_1, min_bound_contrast_individual, max_bound_contrast_individual)
image_2 = tf.image.random_contrast(
image_2, min_bound_contrast_individual, max_bound_contrast_individual)
if augment_contrast_individual:
image_1 = tf.image.random_brightness(
image_1, max_delta_brightness_individual)
image_2 = tf.image.random_brightness(
image_2, max_delta_brightness_individual)
# crop values to ensure values in [0,1] (some augmentations can violate this)
image_1 = tf.clip_by_value(image_1, 0.0, 1.0)
image_2 = tf.clip_by_value(image_2, 0.0, 1.0)
return tf.stack([image_1, image_2])
@gin.configurable
def geometric_augmentation(images,
flow=None,
mask=None,
crop_height=640,
crop_width=640,
augment_flip_left_right=False,
augment_flip_up_down=False,
augment_scale=False,
augment_relative_scale=False,
augment_rotation=False,
augment_relative_rotation=False,
augment_crop_offset=False,
min_bound_scale=0.9,
max_bound_scale=1.5,
min_bound_relative_scale=0.95,
max_bound_relative_scale=1.05,
max_rotation_deg=15,
max_relative_rotation_deg=3,
max_relative_crop_offset=5):
"""Apply geometric augmentations to an image pair and corresponding flow."""
# apply geometric augmentation
if augment_flip_left_right:
images, flow, mask = random_flip_left_right(images, flow, mask)
if augment_flip_up_down:
images, flow, mask = random_flip_up_down(images, flow, mask)
if augment_scale:
images, flow, mask = random_scale(
images,
flow,
mask,
min_scale=min_bound_scale,
max_scale=max_bound_scale)
if augment_relative_scale:
images, flow, mask = random_scale_second(
images, flow, mask,
min_scale=min_bound_relative_scale, max_scale=max_bound_relative_scale)
if augment_rotation:
images, flow, mask = random_rotation(
images, flow, mask,
max_rotation=max_rotation_deg, not_empty_crop=True)
if augment_relative_rotation:
images, flow, mask = random_rotation_second(
images, flow, mask,
max_rotation=max_relative_rotation_deg, not_empty_crop=True)
# always perform random cropping
if not augment_crop_offset:
max_relative_crop_offset = 0
images, flow, mask = random_crop(
images, flow, mask, crop_height, crop_width,
relative_offset=max_relative_crop_offset)
# return flow and mask if available
return images, flow, mask
def _center_crop(images, height, width):
"""Performs a center crop with the given heights and width."""
# ensure height, width to be int
height = tf.cast(height, tf.int32)
width = tf.cast(width, tf.int32)
# get current size
images_shape = tf.shape(images)
current_height = images_shape[-3]
current_width = images_shape[-2]
# compute required offset
offset_height = tf.cast((current_height - height) / 2, tf.int32)
offset_width = tf.cast((current_width - width) / 2, tf.int32)
# perform the crop
images = tf.image.crop_to_bounding_box(
images, offset_height, offset_width, height, width)
return images
def _positions_center_origin(height, width):
"""Returns image coordinates where the origin at the image center."""
h = tf.range(0.0, height, 1)
w = tf.range(0.0, width, 1)
center_h = tf.cast(height, tf.float32) / 2.0 - 0.5
center_w = tf.cast(width, tf.float32) / 2.0 - 0.5
return tf.stack(tf.meshgrid(h - center_h, w - center_w, indexing='ij'), -1)
def rotate(img, angle_radian, is_flow, mask=None):
"""Rotate an image or flow field."""
def _rotate(img, mask=None):
if angle_radian == 0.0:
# early return if no resizing is required
if mask is not None:
return img, mask
else:
return img
if mask is not None:
# multiply with mask, to ensure non-valid locations are zero
img = tf.math.multiply(img, mask)
# rotate img
img_rotated = tfa_image.rotate(
img, angle_radian, interpolation='BILINEAR')
# rotate mask (will serve as normalization weights)
mask_rotated = tfa_image.rotate(
mask, angle_radian, interpolation='BILINEAR')
# normalize sparse flow field and mask
img_rotated = tf.math.multiply(
img_rotated, tf.math.reciprocal_no_nan(mask_rotated))
mask_rotated = tf.math.multiply(
mask_rotated, tf.math.reciprocal_no_nan(mask_rotated))
else:
img_rotated = tfa_image.rotate(
img, angle_radian, interpolation='BILINEAR')
if is_flow:
# If image is a flow image, scale flow values to be consistent with the
# rotation.
cos = tf.math.cos(angle_radian)
sin = tf.math.sin(angle_radian)
rotation_matrix = tf.reshape([cos, sin, -sin, cos], [2, 2])
img_rotated = tf.linalg.matmul(img_rotated, rotation_matrix)
if mask is not None:
return img_rotated, mask_rotated
return img_rotated
# Apply resizing at the right shape.
shape = img.shape.as_list()
if len(shape) == 3:
if mask is not None:
img_rotated, mask_rotated = _rotate(img[None], mask[None])
return img_rotated[0], mask_rotated[0]
else:
return _rotate(img[None])[0]
elif len(shape) == 4:
# Input at the right shape.
return _rotate(img, mask)
else:
raise ValueError('Cannot rotate an image of shape', shape)
def random_flip_left_right(images, flow=None, mask=None):
"""Performs a random left/right flip."""
# 50/50 chance
perform_flip = tf.equal(tf.random.uniform([], maxval=2, dtype=tf.int32), 1)
# apply flip
images = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(images, axis=[-2]),
false_fn=lambda: images)
if flow is not None:
flow = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(flow, axis=[-2]),
false_fn=lambda: flow)
mask = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(mask, axis=[-2]),
false_fn=lambda: mask)
# correct sign of flow
sign_correction = tf.reshape([1.0, -1.0], [1, 1, 2])
flow = tf.cond(pred=perform_flip,
true_fn=lambda: flow * sign_correction,
false_fn=lambda: flow)
return images, flow, mask
def random_flip_up_down(images, flow=None, mask=None):
"""Performs a random up/down flip."""
# 50/50 chance
perform_flip = tf.equal(tf.random.uniform([], maxval=2, dtype=tf.int32), 1)
# apply flip
images = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(images, axis=[-3]),
false_fn=lambda: images)
if flow is not None:
flow = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(flow, axis=[-3]),
false_fn=lambda: flow)
mask = tf.cond(pred=perform_flip,
true_fn=lambda: tf.reverse(mask, axis=[-3]),
false_fn=lambda: mask)
# correct sign of flow
sign_correction = tf.reshape([-1.0, 1.0], [1, 1, 2])
flow = tf.cond(pred=perform_flip,
true_fn=lambda: flow * sign_correction,
false_fn=lambda: flow)
return images, flow, mask
def random_scale(images, flow=None, mask=None, min_scale=1.0, max_scale=1.0):
"""Performs a random scaling in the given range."""
# choose a random scale factor and compute new resolution
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
scale = tf.random.uniform([],
minval=min_scale,
maxval=max_scale,
dtype=tf.float32)
new_height = tf.cast(
tf.math.ceil(tf.cast(orig_height, tf.float32) * scale), tf.int32)
new_width = tf.cast(
tf.math.ceil(tf.cast(orig_width, tf.float32) * scale), tf.int32)
# rescale the images (and flow)
images = uflow_utils.resize(images, new_height, new_width, is_flow=False)
if flow is not None:
flow, mask = uflow_utils.resize(
flow, new_height, new_width, is_flow=True, mask=mask)
return images, flow, mask
def random_scale_second(
images, flow=None, mask=None, min_scale=1.0, max_scale=1.0):
"""Performs a random scaling on the second image in the given range."""
# choose a random scale factor and compute new resolution
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
scale = tf.random.uniform(
[], minval=min_scale, maxval=max_scale, dtype=tf.float32)
new_height = tf.cast(
tf.math.ceil(tf.cast(orig_height, tf.float32) * scale), tf.int32)
new_width = tf.cast(
tf.math.ceil(tf.cast(orig_width, tf.float32) * scale), tf.int32)
# rescale only the second image
image_1, image_2 = tf.unstack(images)
image_2 = uflow_utils.resize(image_2, new_height, new_width, is_flow=False)
# crop either first or second image to have matching dimensions
if scale < 1.0:
image_1 = _center_crop(image_1, new_height, new_width)
else:
image_2 = _center_crop(image_2, orig_height, orig_width)
images = tf.stack([image_1, image_2])
if flow is not None:
# get current locations (with the origin in the image center)
positions = _positions_center_origin(orig_height, orig_width)
# compute scale factor of the actual new image resolution
scale_flow_h = tf.cast(new_height, tf.float32) / tf.cast(
orig_height, tf.float32)
scale_flow_w = tf.cast(new_width, tf.float32) / tf.cast(
orig_width, tf.float32)
scale_flow = tf.stack([scale_flow_h, scale_flow_w])
# compute augmented flow (multiply by mask to zero invalid flow locations)
flow = ((positions + flow) * scale_flow - positions) * mask
if scale < 1.0:
# in case we downsample the image we crop the reference image to keep the
# same shape
flow = _center_crop(flow, new_height, new_width)
mask = _center_crop(mask, new_height, new_width)
return images, flow, mask
def random_crop(images, flow=None, mask=None, crop_height=None, crop_width=None,
relative_offset=0):
"""Performs a random crop with the given height and width."""
# early return if crop_height or crop_width is not specified
if crop_height is None or crop_width is None:
return images, flow, mask
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
# check if crop size fits the image size
scale = 1.0
ratio = tf.cast(crop_height, tf.float32) / tf.cast(orig_height, tf.float32)
scale = tf.math.maximum(scale, ratio)
ratio = tf.cast(crop_width, tf.float32) / tf.cast(orig_width, tf.float32)
scale = tf.math.maximum(scale, ratio)
# compute minimum required hight
new_height = tf.cast(
tf.math.ceil(tf.cast(orig_height, tf.float32) * scale), tf.int32)
new_width = tf.cast(
tf.math.ceil(tf.cast(orig_width, tf.float32) * scale), tf.int32)
# perform resize (scales with 1 if not required)
images = uflow_utils.resize(images, new_height, new_width, is_flow=False)
# compute joint offset
max_offset_h = new_height - tf.cast(crop_height, dtype=tf.int32)
max_offset_w = new_width - tf.cast(crop_width, dtype=tf.int32)
joint_offset_h = tf.random.uniform([], maxval=max_offset_h+1, dtype=tf.int32)
joint_offset_w = tf.random.uniform([], maxval=max_offset_w+1, dtype=tf.int32)
# compute relative offset
min_relative_offset_h = tf.math.maximum(
joint_offset_h - relative_offset, 0)
max_relative_offset_h = tf.math.minimum(
joint_offset_h + relative_offset, max_offset_h)
min_relative_offset_w = tf.math.maximum(
joint_offset_w - relative_offset, 0)
max_relative_offset_w = tf.math.minimum(
joint_offset_w + relative_offset, max_offset_w)
relative_offset_h = tf.random.uniform(
[], minval=min_relative_offset_h, maxval=max_relative_offset_h+1,
dtype=tf.int32)
relative_offset_w = tf.random.uniform(
[], minval=min_relative_offset_w, maxval=max_relative_offset_w+1,
dtype=tf.int32)
# crop both images
image_1, image_2 = tf.unstack(images)
image_1 = tf.image.crop_to_bounding_box(
image_1, offset_height=joint_offset_h, offset_width=joint_offset_w,
target_height=crop_height, target_width=crop_width)
image_2 = tf.image.crop_to_bounding_box(
image_2, offset_height=relative_offset_h, offset_width=relative_offset_w,
target_height=crop_height, target_width=crop_width)
images = tf.stack([image_1, image_2])
if flow is not None:
# perform resize (scales with 1 if not required)
flow, mask = uflow_utils.resize(
flow, new_height, new_width, is_flow=True, mask=mask)
# crop flow and mask
flow = tf.image.crop_to_bounding_box(
flow,
offset_height=joint_offset_h,
offset_width=joint_offset_w,
target_height=crop_height,
target_width=crop_width)
mask = tf.image.crop_to_bounding_box(
mask,
offset_height=joint_offset_h,
offset_width=joint_offset_w,
target_height=crop_height,
target_width=crop_width)
# correct flow for relative shift (/crop)
flow_delta = tf.stack(
[tf.cast(relative_offset_h - joint_offset_h, tf.float32),
tf.cast(relative_offset_w - joint_offset_w, tf.float32)])
flow = (flow - flow_delta) * mask
return images, flow, mask
def random_rotation(
images, flow=None, mask=None, max_rotation=10, not_empty_crop=True):
"""Performs a random rotation with the specified maximum rotation."""
angle_radian = tf.random.uniform(
[], minval=-max_rotation, maxval=max_rotation,
dtype=tf.float32) * pi / 180.0
images = rotate(images, angle_radian, is_flow=False, mask=None)
if not_empty_crop:
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
# introduce abbreviations for shorter notation
cos = tf.math.cos(angle_radian % pi)
sin = tf.math.sin(angle_radian % pi)
h = tf.cast(orig_height, tf.float32)
w = tf.cast(orig_width, tf.float32)
# compute required scale factor
scale = tf.cond(tf.math.less(angle_radian % pi, pi/2.0),
lambda: tf.math.maximum((w/h)*sin+cos, (h/w)*sin+cos),
lambda: tf.math.maximum((w/h)*sin-cos, (h/w)*sin-cos))
new_height = tf.math.floor(h / scale)
new_width = tf.math.floor(w / scale)
# crop image again to original size
offset_height = tf.cast((h - new_height) / 2, tf.int32)
offset_width = tf.cast((w - new_width) / 2, tf.int32)
images = tf.image.crop_to_bounding_box(
images,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
if flow is not None:
flow, mask = rotate(flow, angle_radian, is_flow=True, mask=mask)
if not_empty_crop:
# crop flow and mask again to original size
flow = tf.image.crop_to_bounding_box(
flow,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
mask = tf.image.crop_to_bounding_box(
mask,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
return images, flow, mask
def random_rotation_second(
images, flow=None, mask=None, max_rotation=10, not_empty_crop=True):
"""Performs a random rotation on only the second image."""
angle_radian = tf.random.uniform(
[], minval=-max_rotation, maxval=max_rotation, dtype=tf.float32)*pi/180.0
image_1, image_2 = tf.unstack(images)
image_2 = rotate(image_2, angle_radian, is_flow=False, mask=None)
images = tf.stack([image_1, image_2])
if not_empty_crop:
orig_height = tf.shape(images)[-3]
orig_width = tf.shape(images)[-2]
# introduce abbreviations for shorter notation
cos = tf.math.cos(angle_radian % pi)
sin = tf.math.sin(angle_radian % pi)
h = tf.cast(orig_height, tf.float32)
w = tf.cast(orig_width, tf.float32)
# compute required scale factor
scale = tf.cond(tf.math.less(angle_radian % pi, pi/2.0),
lambda: tf.math.maximum((w/h)*sin+cos, (h/w)*sin+cos),
lambda: tf.math.maximum((w/h)*sin-cos, (h/w)*sin-cos))
new_height = tf.math.floor(h / scale)
new_width = tf.math.floor(w / scale)
# crop image again to original size
offset_height = tf.cast((h-new_height)/2, tf.int32)
offset_width = tf.cast((w-new_width)/2, tf.int32)
images = tf.image.crop_to_bounding_box(
images,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
if flow is not None:
# get current locations (with the origin in the image center)
positions = _positions_center_origin(orig_height, orig_width)
# compute augmented flow (multiply by mask to zero invalid flow locations)
cos = tf.math.cos(angle_radian)
sin = tf.math.sin(angle_radian)
rotation_matrix = tf.reshape([cos, sin, -sin, cos], [2, 2])
flow = (tf.linalg.matmul((positions+flow), rotation_matrix)-positions)*mask
if not_empty_crop:
# crop flow and mask again to original size
flow = tf.image.crop_to_bounding_box(
flow,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
mask = tf.image.crop_to_bounding_box(
mask,
offset_height=offset_height,
offset_width=offset_width,
target_height=tf.cast(new_height, tf.int32),
target_width=tf.cast(new_width, tf.int32))
return images, flow, mask
def build_selfsup_transformations(num_flow_levels=3,
seq_len=2,
crop_height=0,
crop_width=0,
max_shift_height=0,
max_shift_width=0,
resize=True):
"""Apply augmentations to a list of student images."""
def transform(images, i_or_ij, is_flow, crop_height, crop_width,
shift_heights, shift_widths, resize):
# Expect (i, j) for flows and masks and i for images.
if isinstance(i_or_ij, int):
i = i_or_ij
# Flow needs i and j.
assert not is_flow
else:
i, j = i_or_ij
if is_flow:
shifts = tf.stack([shift_heights, shift_widths], axis=-1)
flow_offset = shifts[i] - shifts[j]
images = images + tf.cast(flow_offset, tf.float32)
shift_height = shift_heights[i]
shift_width = shift_widths[i]
height = images.shape[-3]
width = images.shape[-2]
# Assert that the cropped bounding box does not go out of the image frame.
op1 = tf.compat.v1.assert_greater_equal(crop_height + shift_height, 0)
op2 = tf.compat.v1.assert_greater_equal(crop_width + shift_width, 0)
op3 = tf.compat.v1.assert_less_equal(height - crop_height + shift_height,
height)
op4 = tf.compat.v1.assert_less_equal(width - crop_width + shift_width,
width)
op5 = tf.compat.v1.assert_greater(
height,
2 * crop_height,
message='Image height is too small for cropping.')
op6 = tf.compat.v1.assert_greater(
width, 2 * crop_width, message='Image width is too small for cropping.')
with tf.control_dependencies([op1, op2, op3, op4, op5, op6]):
images = images[:, crop_height + shift_height:height - crop_height +
shift_height, crop_width + shift_width:width -
crop_width + shift_width, :]
if resize:
images = uflow_utils.resize(images, height, width, is_flow=is_flow)
images.set_shape((images.shape[0], height, width, images.shape[3]))
else:
images.set_shape((images.shape[0], height - 2 * crop_height,
width - 2 * crop_width, images.shape[3]))
return images
max_divisor = 2**(num_flow_levels - 1)
assert crop_height % max_divisor == 0
assert crop_width % max_divisor == 0
assert max_shift_height <= crop_height
assert max_shift_width <= crop_width
# Compute random shifts for different images in a sequence.
if max_shift_height > 0 or max_shift_width > 0:
max_rand = max_shift_height // max_divisor
shift_height_at_highest_level = tf.random.uniform([seq_len],
minval=-max_rand,
maxval=max_rand + 1,
dtype=tf.int32)
shift_heights = shift_height_at_highest_level * max_divisor
max_rand = max_shift_height // max_divisor
shift_width_at_highest_level = tf.random.uniform([seq_len],
minval=-max_rand,
maxval=max_rand + 1,
dtype=tf.int32)
shift_widths = shift_width_at_highest_level * max_divisor
transform_fns = []
for level in range(num_flow_levels):
if max_shift_height == 0 and max_shift_width == 0:
shift_heights = [0, 0]
shift_widths = [0, 0]
else:
shift_heights = shift_heights // (2**level)
shift_widths = shift_widths // (2**level)
fn = partial(
transform,
crop_height=crop_height // (2**level),
crop_width=crop_width // (2**level),
shift_heights=shift_heights,
shift_widths=shift_widths,
resize=resize)
transform_fns.append(fn)
assert len(transform_fns) == num_flow_levels
return transform_fns
|
|
#!/usr/bin/python3
## Autocompletion plugin
# @package hive_autocomplete_plugin
# @author Vincent Yahna
#
# Plugin that autocompletes
# parameters, object names, elements
# attributes, and enum values for HIVE files.
# Assumption: Each tag has less than 9000 characters
# (for optimization purposes)
import sublime, sublime_plugin
from .Module_DataDictionary import *
from .Module_XMLTagIterator import *
## Dictionary containing mappings of objects to parameters and
# mapping of elements to subelements and attributes.
# Autocompletion and help info plugins store a reference to this object
DATA_DICTIONARY = None #cannot initialize dictionary at plugin load time
#Option names and default values
settings_file = 'hive.sublime-settings'
##the sublime selector that autocompletion should
#occur in. text.xml is for xml files
AUTOCOMPLETION_SELECTOR = "text.xml"
#enums
## context for object types
OBJECT_TYPE_CONTEXT = 1 #context for object type autocompletion
## context for object types with no quotes typed
OBJECT_TYPE_CONTEXT_NO_QUOTES = 2
## context for param names
PARAM_NAME_CONTEXT = 3
## context for param names with no quotes typed
PARAM_NAME_CONTEXT_NO_QUOTES = 4
## context for param values
PARAM_VALUE_CONTEXT = 5
## context for param values with no quotes typed
PARAM_VALUE_CONTEXT_NO_QUOTES = 6
## context for an element
ELEMENT_CONTEXT = 7
## context for an attribute
ATTRIBUTE_CONTEXT = 8
## context for an attribute value other
# than object type, param name, and param value
ATTRIBUTE_VALUE_CONTEXT = 9
## context for an attribute value with no quotes typed
ATTRIBUTE_VALUE_CONTEXT_NO_QUOTES = 10
## context for an object type where colons
# are not used as a word separator
# but are present in the prefix
OBJECT_TYPE_COLON_CONTEXT = 11
## context for an object type where colons
# are not used as a word separator
# but are present in the prefix
# and no quotes have been typed
OBJECT_TYPE_COLON_CONTEXT_NO_QUOTES = 12
CONTEXT_NAMES = [
"None",
"OBJECT_TYPE_CONTEXT",
"OBJECT_TYPE_CONTEXT_NO_QUOTES",
"PARAM_NAME_CONTEXT",
"PARAM_NAME_CONTEXT_NO_QUOTES",
"PARAM_VALUE_CONTEXT",
"PARAM_VALUE_CONTEXT_NO_QUOTES",
"ELEMENT_CONTEXT",
"ATTRIBUTE_CONTEXT",
"ATTRIBUTE_VALUE_CONTEXT",
"ATTRIBUTE_VALUE_CONTEXT_NO_QUOTES",
"OBJECT_TYPE_COLON_CONTEXT",
"OBJECT_TYPE_COLON_CONTEXT_NO_QUOTES"
]
# Check if we are running on a Windows operating system
os_is_windows = os.name == 'nt'
# The default name of the clang-format executable
default_binary = 'HiveAPIQuery.exe' if os_is_windows else 'HiveAPIQuery'
def loadSettings():
global queryBinary
global inhibitComp
settings = sublime.load_settings(settings_file)
inhibitComp = settings.get("inhibit_other_completions", True)
queryBinary = settings.get("hive_api_query", default_binary)
# This function taken from Stack Overflow response:
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
# Display input panel to update the path.
def updateQueryPath():
loadSettings()
w = sublime.active_window()
w.show_input_panel("Path to HiveAPIQuery: ", queryBinary, setQueryPath, None, None)
def setQueryPath(path):
settings = sublime.load_settings(settings_file)
settings.set("hive_api_query", path)
sublime.save_settings(settings_file)
loadSettings()
def checkQueryBinary():
# Make sure we could find the API Binary
if which(queryBinary) == None:
# Ask if they want to set it
if sublime.ok_cancel_dialog("HiveAPIQuery binary was not found. Do you want to set a new path?"):
updateQueryPath()
## Once sublime has finished loading, the dictionary can be initialized
# with the information in the settings file.
# Sublime executes plugin_loaded once the api is ready to use
def plugin_loaded():
global DATA_DICTIONARY
loadSettings()
checkQueryBinary()
DATA_DICTIONARY = DataDictionary(queryBinary)
#///////////////////////////////////////////////GLOBAL METHODS/////////////////////////////////////////////////////////////////////
## Method for determining whether the given tokens form the beginning of a parameter tag.
# @param tokens - a list of strings
# @returns True if the first two tokens are '<' and 'param'
# and False otherwise
def isParamTag(tokens):
return len(tokens) >= 2 and tokens[0] == '<' and tokens[1] == 'param'
## Method for determining whether the given tokens form the beginning of an object tag.
# @param tokens - a list of strings
# @returns True if the first two tokens are '<' and 'object'
# and False otherwise
def isObjectTag(tokens):
return len(tokens) >=2 and tokens[0] == '<' and tokens[1] == 'object'
## Method that gives the element of the tag
# at the location.
# @param view - a view object
# @param location - an integer for indexing into the view
# @returns a string or None if no element is found
def getCurrentElementType(view, location):
index = location -1 #initialize index to location left of cursor
#get range from current cursor to either the beginning of file or a tag <
while(index >= 0 and view.substr(index) != '<' and view.substr(index) != '>'):
index -= 1
if(index < 0 or view.substr(index) != '<'):
return None #the cursor is not in an XML tag
currentRegion = sublime.Region(index, location)
currentTag = view.substr(currentRegion)
tokens = currentTag.replace('=', ' = ').replace('<', ' < ').replace('\"', ' \" ').split()
if(len(tokens) <= 1):
return None
return tokens[1]
## method that gets the value of the current param element's name attribute.
# This method does not check whether the current tag is a param tag;
# It just gets the value of a name attribute if available.
# @param view - a sublime view object
# @param location - an integer index into the view
# @returns a string or None
def getCurrentParamName(view, location):
index = location -1 #initialize index to location left of cursor
#get range from current cursor to either the beginning of file or a tag <
while(index >= 0 and view.substr(index) != '<' and view.substr(index) != '>'):
index -= 1
if(index < 0 or view.substr(index) != '<'):
return None #the cursor is not in an XML tag
currentRegion = sublime.Region(index, location)
currentTag = view.substr(currentRegion)
tokens = currentTag.replace('=', ' = ').replace('<', ' < ').replace('\"', ' \" ').split()
index = 0
while(index < len(tokens) and tokens[index] != 'name'):
index = index + 1
if(index + 3 < len(tokens)):
return tokens[index + 3]
else:
return None
## Method that determines the context of the current cursor
# @param view - a sublime view object
# @param location - index for cursor location in the view
# @param prefix - the current word being typed, which should
# be removed from the list of tokens
# @returns an enum value
def getContext(view, location, prefix=""):
index = location -1 #initialize index to location left of cursor
charCount = 0
#get range from current cursor to either the beginning of file or a tag <
while(index >= 0 and view.substr(index) != '<' and view.substr(index) != '>' and charCount < 9000):
index -= 1
#for optimization, stop looking for < and > after charCount reaches 9000
charCount += 1
if(view.substr(index) != '<'):
return 0 #the cursor is not in an XML tag
currentRegion = sublime.Region(index, location)
#currentTag may be an incomplete tag (no '>' )
currentTag = view.substr(currentRegion)
tokens = currentTag.replace('=', ' = ').replace('<', ' < ').replace('\"', ' \" ').split()
# print("Prefix [%s] CurrTag [%s], Tokens[%s]" % (prefix, currentTag, tokens))
context = 0
colonsNotWordSeparator = False
#user might have begun typing the word to be autocompleted, so remove word if so
if(len(tokens) != 0 and tokens[-1] == prefix):
tokens.pop()
#if the prefix is not found in tokens because of colons not being a word separator
#then mark the colonsNotWordSeparator flag and pop the last token which ends with prefix
elif(len(tokens) != 0 and view.substr(location - len(prefix)-1) == ':' and tokens[-1].endswith(prefix)):
tokens.pop()
colonsNotWordSeparator = True
if(len(tokens) >= 5 and tokens[-1] == '\"'
and tokens[-2] == '=' and tokens[-3] == 'type'
and isObjectTag(tokens)):
if(colonsNotWordSeparator):
context = OBJECT_TYPE_COLON_CONTEXT
else:
context = OBJECT_TYPE_CONTEXT
elif(len(tokens) >=4 and tokens[-1] == '='
and tokens[-2] == 'type' and isObjectTag(tokens)):
if(colonsNotWordSeparator):
context = OBJECT_TYPE_COLON_CONTEXT_NO_QUOTES
else:
context = OBJECT_TYPE_CONTEXT_NO_QUOTES
elif(len(tokens) >= 5 and tokens[-1] == '\"'
and tokens[-2] == '=' and tokens[-3] == 'name'
and isParamTag(tokens)):
context = PARAM_NAME_CONTEXT
elif(len(tokens) >=4 and tokens[-1] == '='
and tokens[-2] == 'name' and isParamTag(tokens)):
context = PARAM_NAME_CONTEXT_NO_QUOTES
elif(len(tokens) >= 3 and tokens[-1] == '"'
and tokens[-2] == '=' and tokens[-3] == 'value'
and isParamTag(tokens)):
context = PARAM_VALUE_CONTEXT
elif(len(tokens) >= 2 and tokens[-1] == '=' and tokens[-2] == 'value' and isParamTag(tokens)):
context = PARAM_VALUE_CONTEXT_NO_QUOTES
elif(tokens.count('\"') % 2 == 1):
#odd number means inside of a quote
#This is here to protect against lists of values
#for attributes.
context = ATTRIBUTE_VALUE_CONTEXT
elif(len(tokens) >= 1 and tokens[-1] == '<'):
context = ELEMENT_CONTEXT
elif(len(tokens) >= 3 and tokens[0] == '<' and tokens[-1] == '\"' and tokens[-2] == '='):
context = ATTRIBUTE_VALUE_CONTEXT
elif(len(tokens) >= 2 and tokens[0] == '<'
and tokens[-1] != '='):
context = ATTRIBUTE_CONTEXT
elif(len(tokens) >= 2 and tokens[0] == '<' and tokens[-1] == '='):
context = ATTRIBUTE_VALUE_CONTEXT_NO_QUOTES
return context
## returns the object type as a string
# that a parameter belongs to
# or none if no object tag is found.
# Does not check if the tag is an object tag,
# it simply looks for an attribute called type.
# @param view - a sublime view object
# @param location -integer index into view
# @returns a string
def getParentObjectName(view, location):
tags = XMLTagIterator(view, location)
parentTag = tags.getParent()
if(parentTag is not None):
tokens = view.substr(parentTag).replace('=', ' = ').replace('<', ' < ').replace('\"', ' \" ').replace('>', ' > ').split()
else:
return None #no parent found
#get the object type
i = 0
while(i < len(tokens) and tokens[i] != 'type'):
i = i + 1
if(i + 3 < len(tokens)):
return tokens[i + 3] #+ 3 to account for = and " after type
else:
return None #object type is not contained in the tag
## Get the element type of the tag governing
# the current tag.
# Works similarly to getParentObjectName.
# Each tag before the current one is assessed
# to determine where the parent tag is.
# @returns a string of the element name
# or 'root' if there is no governing tag (i.e. the root tag)
# or None if the tag found has less than 2 tokens (which shouldn't be possible)
def getParentTagType(view, location):
tags = XMLTagIterator(view, location)
parentTag = tags.getParent()
if(parentTag is not None):
tokens = view.substr(parentTag).replace('=', ' = ').replace('<', ' < ').replace('\"', ' \" ').replace('>', ' > ').split()
else:
return 'root' #no parent found
if(len(tokens) >= 2) :
return tokens[1] #return element (might return > for an incomplete tag)
else:
return None #tokens has less than two items (this shouldn't be possible)
## A method that finds the prefix for an object type
# when colons are word separators.
# @param view - a sublime view object
# @param location - the location in the view where the
# object type ends
# @param suffix - the current part of the object type that
# is being typed (the prefix for autocompletion)
# @returns a string
def getObjectTypePrefix(view, location, suffix):
index = location - 1
while(index >= 0 and view.substr(index) != '=' and view.substr(index) != '\"'):
index -=1
return view.substr(sublime.Region(index + 1, location)).rstrip(suffix)
## Filters the object completions list based on
# a prefix and trims the words based on the prefix.
# @param completions - a list of trigger-completions pairs.
# Should not include quotation marks.
# @param prefix - a string
def filterObjectTypeCompletions(completions, prefix):
i = len(completions) - 1
while(i >= 0):
if(completions[i][0].startswith(prefix)):
completions[i][0] = completions[i][0].replace(prefix,"",1)
completions[i][1] = completions[i][1].replace(prefix,"",1)
else:
completions.pop(i)
i -= 1
#//////////////////////////END GLOBAL METHODS/////////////////////////////////////////////////////////////////////////////////////////////////////////////////
## An auto complete plugin
# that gives context specfic completions for hive files.
# All other completions are inhibited while using this plugin
class HiveAutoComplete(sublime_plugin.EventListener):
## Constructor
def __init__(self):
## reference to global DATA_DICTIONARY
self.DD = DATA_DICTIONARY #DATA_DICTIONARY will be None at initialization time
## Method that feeds Sublime's autocomplete lists
# returns a list of trigger-completion pairs
# and possibly a flag preventing sublime from
# adding its own completions.
# Only the first cursor is checked
def on_query_completions(self, view, prefix, locations):
# we delay loading the dictionary until here because
# we need to make sure sublime is done loading
# and has executed plugin_loaded
if(self.DD is None):
self.DD = DATA_DICTIONARY
items = []
settings = view.settings()
inXML = view.score_selector(locations[0], AUTOCOMPLETION_SELECTOR)
#impede plugin if not in an xml file
if not inXML:
return items
context = getContext(view, locations[0], prefix)
# print("Context = %s(%d)" % (CONTEXT_NAMES[context], context))
if self.DD is None:
return items
if(context == OBJECT_TYPE_CONTEXT):
items = self.DD.getObjectCompletions()
elif(context == OBJECT_TYPE_CONTEXT_NO_QUOTES):
items = self.DD.getObjectCompletions(addQuotes=True)
elif(context == PARAM_NAME_CONTEXT):
items = self.DD.getParamCompletions(getParentObjectName(view, locations[0]))
elif(context == PARAM_NAME_CONTEXT_NO_QUOTES):
items = self.DD.getParamCompletions(getParentObjectName(view, locations[0]), addQuotes=True)
elif(context == ELEMENT_CONTEXT):
items = self.DD.getElementCompletions(getParentTagType(view, locations[0]))
elif(context == ATTRIBUTE_CONTEXT):
#get element type of current tag
element = getCurrentElementType(view, locations[0])
items = self.DD.getAttributeCompletions(element)
elif(context == PARAM_VALUE_CONTEXT):
valPrefix = getObjectTypePrefix(view, locations[0], prefix)
paramName = getCurrentParamName(view, locations[0])
parent = getParentObjectName(view, locations[0])
items = self.DD.getParamValueCompletions(paramName, parent, valPrefix)
elif(context == PARAM_VALUE_CONTEXT_NO_QUOTES):
valPrefix = getObjectTypePrefix(view, locations[0], prefix)
# Force add the quotes
view.run_command("add_quotes", {"end": locations[0] +1, "start": locations[0] - (len(valPrefix) + len(prefix)) } )
#move the cursor back before the last quote
view.run_command("move", {"by": "characters", "forward": False})
paramName = getCurrentParamName(view, locations[0])
parent = getParentObjectName(view, locations[0])
items = self.DD.getParamValueCompletions(paramName, parent, valPrefix, addQuotes=True)
elif(context == ATTRIBUTE_VALUE_CONTEXT):
pass
elif(context ==ATTRIBUTE_VALUE_CONTEXT_NO_QUOTES):
pass
elif(context == OBJECT_TYPE_COLON_CONTEXT or context == OBJECT_TYPE_COLON_CONTEXT_NO_QUOTES):
objPrefix = getObjectTypePrefix(view, locations[0], prefix)
if(context == OBJECT_TYPE_COLON_CONTEXT_NO_QUOTES):
#quotes must manually be inserted
view.run_command("add_quotes", {"end": locations[0] +1, "start": locations[0] - (len(objPrefix) + len(prefix)) } )
#move the cursor back before the last quote
view.run_command("move", {"by": "characters", "forward": False})
if(objPrefix.endswith("::")):
items = self.DD.getObjectCompletions(prefix=objPrefix)
# filterObjectTypeCompletions(items, objPrefix)
items.sort()
# If there are no items, or we are not preventin other auto complets from being shown
# then just return the items.
if len(items) == 0 or not inhibitComp:
return items;
return items
# return (items, sublime.INHIBIT_WORD_COMPLETIONS)
## Plugin that adds quotes at two points
class AddQuotesCommand(sublime_plugin.TextCommand):
## method executed when the plugin runs.
# @param edit - a sublime edit object
# @param start - the first place to add quotes
# @param end - the second place to add quotes
def run(self, edit, start, end):
self.view.insert(edit, start, '\"')
self.view.insert(edit, end, '\"')
# Called to set the path to the HiveAPIQuery binary
class HiveApiQuerySetPathCommand(sublime_plugin.WindowCommand):
def run(self):
updateQueryPath()
|
|
import happyforms
from django import forms
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.utils.timezone import make_naive, now
from product_details import product_details
from django_statsd.clients import statsd
from pytz import common_timezones, timezone
from remo.base.datetimewidgets import SplitSelectDateTimeWidget
from remo.base.templatetags.helpers import get_full_name
from remo.base.utils import get_date, get_object_or_none, validate_datetime
from remo.events.models import EventMetric
from remo.events.templatetags.helpers import get_event_link
from remo.profiles.models import FunctionalArea
from remo.reports import ACTIVITY_POST_EVENT_METRICS
from remo.reports.models import Activity, Campaign, NGReport
from remo.remozilla.models import Bug
from models import Event, EventComment, EventMetricOutcome
class MinBaseInlineFormSet(forms.models.BaseInlineFormSet):
"""Inline form-set support for minimum number of filled forms."""
def __init__(self, *args, **kwargs):
"""Init formset with minimum number of 2 forms."""
self.min_forms = kwargs.pop('min_forms', 2)
super(MinBaseInlineFormSet, self).__init__(*args, **kwargs)
def _count_filled_forms(self):
"""Count valid, filled forms, with delete == False."""
valid_forms = 0
for form in self.forms:
if (form.is_valid() and len(form.cleaned_data)):
if form.cleaned_data['DELETE'] is False:
valid_forms += 1
return valid_forms
def clean(self):
"""Make sure that we have at least min_forms filled."""
if (self.min_forms > self._count_filled_forms()):
raise ValidationError('You must fill at least %d forms' % self.min_forms)
return super(MinBaseInlineFormSet, self).clean()
class BaseEventMetricsFormset(MinBaseInlineFormSet):
"""Inline form-set support for event metrics."""
def __init__(self, *args, **kwargs):
self.clone = kwargs.pop('clone', None)
super(BaseEventMetricsFormset, self).__init__(*args, **kwargs)
def clean(self):
"""Check for unique metrics inside formset."""
super(BaseEventMetricsFormset, self).clean()
if any(self.errors):
# Do not check unless are fields are valid
return
# Disable adding new forms in post event form.
if self.instance.is_past_event and self.instance.has_new_metrics and not self.clone:
if self.extra_forms and len(self.extra_forms) > 2:
error_msg = 'You cannot add new metrics in a past event.'
raise ValidationError(error_msg)
if [key for key in self.cleaned_data if key.get('DELETE')]:
error_msg = 'You cannot delete metrics in a past event.'
raise ValidationError(error_msg)
metrics = []
field_error_msg = 'This metric has already been selected.'
for i, form in enumerate(self.forms):
if 'metric' in form.cleaned_data:
metric = form.cleaned_data['metric']
if metric in metrics:
self.errors[i]['metric'] = field_error_msg
metrics.append(metric)
def save_existing(self, form, instance, commit=True):
"""Override save_existing on cloned event to save metrics"""
if self.clone:
form.instance.id = None
return self.save_new(form)
return super(BaseEventMetricsFormset, self).save_existing(form, instance, commit)
def save(self, *args, **kwargs):
"""Override save on cloned events."""
if self.clone:
for form in self.initial_forms:
form.changed_data.append('id')
return super(BaseEventMetricsFormset, self).save()
class EventMetricsForm(happyforms.ModelForm):
"""EventMetrics form."""
metric = forms.ModelChoiceField(queryset=EventMetric.active_objects.all(),
empty_label='Please select an event metric.')
expected_outcome = forms.IntegerField(error_messages={'invalid': 'Please enter a number.'})
class Meta:
model = EventMetricOutcome
fields = ('metric', 'expected_outcome')
def __init__(self, *args, **kwargs):
"""Dynamically initialize form."""
self.clone = kwargs.pop('clone', None)
super(EventMetricsForm, self).__init__(*args, **kwargs)
if self.instance.id:
# Dynamic queryset for active metrics in saved events
current_metrics = self.instance.event.metrics.all()
metrics_query = Q(active=True) | Q(pk__in=current_metrics)
qs = EventMetric.objects.filter(metrics_query)
self.fields['metric'].queryset = qs
def save(self, *args, **kwargs):
"""Override save method to handle metrics cloning."""
if self.clone:
self.instance.pk = None
self.instance.outcome = None
self.instance.details = ''
return super(EventMetricsForm, self).save(*args, **kwargs)
class PostEventMetricsForm(EventMetricsForm):
"""PostEventMetrics form."""
outcome = forms.IntegerField(error_messages={'invalid': 'Please enter a number.'})
class Meta(EventMetricsForm.Meta):
fields = ('metric', 'expected_outcome', 'outcome', 'details')
def __init__(self, *args, **kwargs):
"""Make expected outcome readonly."""
super(PostEventMetricsForm, self).__init__(*args, **kwargs)
if self.instance.expected_outcome:
self.fields['expected_outcome'].widget.attrs['readonly'] = True
class EventForm(happyforms.ModelForm):
"""Form of an event."""
categories = forms.ChoiceField(choices=[])
country = forms.ChoiceField(
choices=[],
error_messages={'required': 'Please select one option from the list.'})
swag_bug_form = forms.CharField(required=False)
budget_bug_form = forms.CharField(required=False)
estimated_attendance = forms.IntegerField(
validators=[MinValueValidator(1)],
error_messages={'invalid': 'Please enter a number.'})
owner = forms.IntegerField(required=False)
timezone = forms.ChoiceField(choices=zip(common_timezones, common_timezones))
start = forms.DateTimeField(required=False)
end = forms.DateTimeField(required=False)
campaign = forms.ModelChoiceField(queryset=Campaign.active_objects.all())
def __init__(self, *args, **kwargs):
"""Initialize form.
Dynamically set choices for country field.
"""
if 'editable_owner' in kwargs:
self.editable_owner = kwargs['editable_owner']
del(kwargs['editable_owner'])
self.clone = kwargs.pop('clone', None)
self.user = kwargs.pop('user', None)
super(EventForm, self).__init__(*args, **kwargs)
# Dynamic categories field.
categories_query = FunctionalArea.objects.filter(Q(active=True))
if self.instance.id and self.instance.categories.all():
categories_query |= categories_query.filter(Q(id__in=self.instance.categories.all()))
initial_category = self.instance.categories.all()[0]
self.fields['categories'].initial = initial_category.id
categories = ([('', 'Please select a functional area')]
+ list(categories_query.values_list('id', 'name')))
self.fields['categories'].choices = categories
# Intiatives/Campaign field
self.fields['campaign'].empty_label = 'Please select an initiative.'
# Dynamic countries field.
countries = product_details.get_regions('en').values()
countries.sort()
country_choices = ([('', 'Country or Region')]
+ [(country, country) for country in countries])
self.fields['country'].choices = country_choices
# Dynamic owner field.
initial_user = self.instance.owner
if self.clone:
initial_user = self.user
if self.editable_owner:
self.fields['owner_form'] = forms.ModelChoiceField(
queryset=User.objects.filter(userprofile__registration_complete=True,
groups__name='Rep').order_by('first_name'),
empty_label='Owner', initial=initial_user.id)
else:
self.fields['owner_form'] = forms.CharField(required=False,
initial=get_full_name(initial_user),
widget=forms.TextInput(
attrs={'readonly': 'readonly',
'class': 'input-text big'}))
instance = self.instance
# Dynamically set the year portion of the datetime widget
start_year = min(getattr(self.instance.start, 'year', now().year), now().year - 1)
end_year = min(getattr(self.instance.end, 'year', now().year), now().year - 1)
self.fields['start_form'] = forms.DateTimeField(widget=SplitSelectDateTimeWidget(
years=range(start_year, start_year + 10), minute_step=5),
validators=[validate_datetime])
self.fields['end_form'] = forms.DateTimeField(widget=SplitSelectDateTimeWidget(
years=range(end_year, end_year + 10), minute_step=5),
validators=[validate_datetime])
# Make times local to venue
if self.instance.start:
start = make_naive(instance.local_start, timezone(instance.timezone))
self.fields['start_form'].initial = start
if self.instance.end:
end = make_naive(instance.local_end, timezone(instance.timezone))
self.fields['end_form'].initial = end
# Use of intermediate fields to translate between bug.id and
# bug.bug_id
if instance.budget_bug:
self.fields['budget_bug_form'].initial = instance.budget_bug.bug_id
if instance.swag_bug:
self.fields['swag_bug_form'].initial = instance.swag_bug.bug_id
def clean(self):
"""Clean form."""
super(EventForm, self).clean()
cdata = self.cleaned_data
cdata['budget_bug'] = cdata.get('budget_bug_form', None)
cdata['swag_bug'] = cdata.get('swag_bug_form', None)
if self.editable_owner:
cdata['owner'] = cdata.get('owner_form', None)
else:
cdata['owner'] = self.instance.owner
# Check if keys exists in cleaned data.
if not all(k in cdata for k in ('start_form', 'end_form')):
raise ValidationError('Please correct the form errors.')
# Set timezone
t = timezone(cdata['timezone'])
start = make_naive(cdata['start_form'], timezone(settings.TIME_ZONE))
cdata['start'] = t.localize(start)
end = make_naive(cdata['end_form'], timezone(settings.TIME_ZONE))
cdata['end'] = t.localize(end)
# Do not allow cloning with a past date
if cdata['start'] < now() and self.clone:
msg = 'Start date in a cloned event cannot be in the past.'
self._errors['start_form'] = self.error_class([msg])
if cdata['start'] >= cdata['end']:
msg = 'Start date should come before end date.'
self._errors['start_form'] = self.error_class([msg])
# Check that there is a cateogry selected
if not cdata.get('categories'):
msg = 'You need to select one functional area for this event.'
self._errors['categories'] = self.error_class([msg])
return cdata
def _clean_bug(self, bug_id):
"""Get or create Bug with bug_id and component. """
if bug_id == '':
return None
try:
bug_id = int(bug_id)
except ValueError:
raise ValidationError('Please provide a number')
bug, created = Bug.objects.get_or_create(bug_id=bug_id)
return bug
def clean_categories(self):
category_id = self.cleaned_data['categories']
return get_object_or_none(FunctionalArea, id=category_id)
def clean_swag_bug_form(self):
"""Clean swag_bug_form field."""
data = self.cleaned_data['swag_bug_form']
return self._clean_bug(data)
def clean_budget_bug_form(self):
"""Clean budget_bug_form field."""
data = self.cleaned_data['budget_bug_form']
return self._clean_bug(data)
def save(self, commit=True):
"""Override save method for custom functionality."""
event = super(EventForm, self).save(commit=False)
if self.clone:
event.pk = None
event.has_new_metrics = True
event.actual_attendance = None
event.times_edited = 0
event.owner = self.user
elif self.instance.pk:
# It's not either a clone event nor a new one,
# please increment number of event edits
event.times_edited += 1
event.save()
# Clear all relations in order to force only one field
event.categories.clear()
event.categories.add(self.cleaned_data['categories'])
return event
class Meta:
model = Event
fields = ['name', 'start', 'end', 'venue', 'region', 'owner',
'country', 'city', 'lat', 'lon', 'external_link',
'planning_pad_url', 'timezone', 'estimated_attendance',
'description', 'extra_content', 'hashtag', 'mozilla_event',
'swag_bug', 'budget_bug', 'campaign']
widgets = {'lat': forms.HiddenInput(attrs={'id': 'lat'}),
'lon': forms.HiddenInput(attrs={'id': 'lon'}),
'start': SplitSelectDateTimeWidget(),
'end': SplitSelectDateTimeWidget()}
class PostEventForm(EventForm):
"""Post event form."""
actual_attendance = forms.IntegerField(validators=[MinValueValidator(1)],
error_messages={'invalid': 'Please enter a number.'})
def save(self, *args, **kwargs):
"""Create post event data report."""
event = super(PostEventForm, self).save()
activity = Activity.objects.get(name=ACTIVITY_POST_EVENT_METRICS)
reports = NGReport.objects.filter(event=event, activity=activity)
if not reports:
up = event.owner.userprofile
attrs = {
'activity': activity,
'report_date': get_date(),
'longitude': up.lon,
'latitude': up.lat,
'location': '%s, %s, %s' % (up.city, up.region, up.country),
'link': get_event_link(event),
'is_passive': True,
'event': event,
'user': event.owner
}
report = NGReport.objects.create(**attrs)
report.functional_areas.add(*event.categories.all())
statsd.incr('reports.create_passive_post_event_metrics')
return event
class Meta(EventForm.Meta):
fields = EventForm.Meta.fields + ['actual_attendance']
def __init__(self, *args, **kwargs):
"""Make estimated attendance readonly."""
super(PostEventForm, self).__init__(*args, **kwargs)
self.fields['estimated_attendance'].widget.attrs['readonly'] = True
class EventCommentForm(happyforms.ModelForm):
"""Form of an event comment."""
class Meta:
model = EventComment
fields = ['comment']
class EventMetricForm(happyforms.ModelForm):
"""Form for EventMetric Model."""
class Meta:
model = EventMetric
fields = ['name', 'active']
|
|
# -*- coding: utf-8 -*-
"""Writer for setup configuration and script files."""
from __future__ import unicode_literals
import glob
import io
import os
import textwrap
from l2tdevtools.dependency_writers import interface
class SetupCfgWriter(interface.DependencyFileWriter):
"""Setup configuration file writer."""
PATH = 'setup.cfg'
_DOC_FILES = ('ACKNOWLEDGEMENTS', 'AUTHORS', 'LICENSE', 'README')
_PROJECTS_WITH_SDIST_TEST_DATA = (
'dfvfs', 'dfwinreg', 'plaso')
_TEMPLATE_DIRECTORY = os.path.join('data', 'templates', 'setup.cfg')
def _GenerateFromTemplate(self, template_filename, template_mappings):
"""Generates file context based on a template file.
Args:
template_filename (str): path of the template file.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
Returns:
str: output based on the template string.
Raises:
RuntimeError: if the template cannot be formatted.
"""
template_filename = os.path.join(
self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_filename)
return super(SetupCfgWriter, self)._GenerateFromTemplate(
template_filename, template_mappings)
def Write(self):
"""Writes a setup.cfg file."""
doc_files = [
doc_file for doc_file in self._DOC_FILES if os.path.isfile(doc_file)]
formatted_doc_files = []
for index, doc_file in enumerate(sorted(doc_files)):
if index == 0:
line = 'doc_files = {0:s}'.format(doc_file)
else:
line = ' {0:s}'.format(doc_file)
formatted_doc_files.append(line)
python3_dependencies = self._dependency_helper.GetRPMRequires(
python_version=3)
formatted_requires = []
for index, dependency in enumerate(python3_dependencies):
if index == 0:
line = 'requires = {0:s}'.format(dependency)
else:
line = ' {0:s}'.format(dependency)
formatted_requires.append(line)
formatted_requires.append('')
template_mappings = {
'doc_files': '\n'.join(formatted_doc_files),
'maintainer': self._project_definition.maintainer,
'requires': '\n'.join(formatted_requires)}
file_content = []
template_data = self._GenerateFromTemplate('metadata', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_SDIST_TEST_DATA:
template_data = self._GenerateFromTemplate(
'sdist_test_data', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate('bdist_rpm', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate('bdist_wheel', template_mappings)
file_content.append(template_data)
file_content = ''.join(file_content)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
class SetupPyWriter(interface.DependencyFileWriter):
"""Setup script file writer."""
PATH = os.path.join('setup.py')
_DOC_FILES = ('ACKNOWLEDGEMENTS', 'AUTHORS', 'LICENSE', 'README')
_PROJECTS_WITH_PACKAGE_DATA = (
'dfvfs', 'dfwinreg', 'dtformats', 'plaso', 'winregrc')
_PROJECTS_WITH_PYTHON3_AS_DEFAULT = ('plaso', )
_PROJECTS_WITH_SDIST_TEST_DATA = (
'dfvfs', 'dfwinreg', 'plaso')
_TEMPLATE_DIRECTORY = os.path.join('data', 'templates', 'setup.py')
def _DetermineSubmoduleLevels(self, project_name):
"""Determines the number of submodule levels.
Args:
project_name (str): name of the project.
Return:
int: number of submodule levels.
"""
submodule_glob = project_name
submodule_levels = 0
while submodule_levels < 10:
submodule_glob = '{0:s}/*'.format(submodule_glob)
submodule_paths = [
path for path in glob.glob(submodule_glob)
if os.path.isdir(path) and os.path.basename(path) != '__pycache__']
if not submodule_paths:
break
submodule_levels += 1
return submodule_levels
def _GenerateFromTemplate(self, template_filename, template_mappings):
"""Generates file context based on a template file.
Args:
template_filename (str): path of the template file.
template_mappings (dict[str, str]): template mappings, where the key
maps to the name of a template variable.
Returns:
str: output based on the template string.
Raises:
RuntimeError: if the template cannot be formatted.
"""
template_filename = os.path.join(
self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_filename)
return super(SetupPyWriter, self)._GenerateFromTemplate(
template_filename, template_mappings)
def Write(self):
"""Writes a setup.py file."""
# Width is 80 characters minus 4 spaces, 2 single quotes and 1 comma.
text_wrapper = textwrap.TextWrapper(drop_whitespace=False, width=73)
description_short = text_wrapper.wrap(
self._project_definition.description_short)
description_short = '\n'.join([
' \'{0:s}\''.format(line) for line in description_short])
description_long = text_wrapper.wrap(
self._project_definition.description_long)
description_long = '\n'.join([
' \'{0:s}\''.format(line) for line in description_long])
if self._project_definition.name in self._PROJECTS_WITH_PYTHON3_AS_DEFAULT:
shebang = '#!/usr/bin/env python3'
else:
shebang = '#!/usr/bin/env python'
if self._project_definition.name in ('artifacts', 'plaso'):
data_files_path = 'share/{0:s}'.format(self._project_definition.name)
else:
data_files_path = 'share/{0:s}/data'.format(self._project_definition.name)
doc_files = [
doc_file for doc_file in self._DOC_FILES if os.path.isfile(doc_file)]
maintainer = self._project_definition.maintainer
maintainer, _, maintainer_email = maintainer.rpartition('<')
maintainer_email, _, _ = maintainer_email.rpartition('>')
if self._project_definition.status == 'experimental':
development_status = 'Development Status :: 2 - Pre-Alpha'
elif self._project_definition.status == 'alpha':
development_status = 'Development Status :: 3 - Alpha'
elif self._project_definition.status == 'beta':
development_status = 'Development Status :: 4 - Beta'
elif self._project_definition.status == 'stable':
development_status = 'Development Status :: 5 - Production/Stable'
else:
development_status = ''
packages_exclude = ['tests', 'tests.*', 'utils']
if os.path.isdir('docs'):
packages_exclude.append('docs')
data_directory = None
if os.path.isdir('data'):
data_directory = 'data'
scripts_directory = None
if os.path.isdir('scripts'):
scripts_directory = 'scripts'
elif os.path.isdir('tools'):
scripts_directory = 'tools'
if scripts_directory:
packages_exclude.append(scripts_directory)
packages_exclude = ', '.join([
'\'{0:s}\''.format(exclude) for exclude in sorted(packages_exclude)])
submodule_levels = self._DetermineSubmoduleLevels(
self._project_definition.name)
python3_package_module_prefix = '%{{{{python3_sitelib}}}}/{0:s}'.format(
self._project_definition.name)
python3_package_files = [
'{0:s}/*.py'.format(python3_package_module_prefix)]
yaml_glob = os.path.join(python3_package_module_prefix[21:], '*.yaml')
if glob.glob(yaml_glob):
python3_package_files.append(
'{0:s}/*.yaml'.format(python3_package_module_prefix))
for _ in range(submodule_levels):
python3_package_module_prefix = '{0:s}/*'.format(
python3_package_module_prefix)
python3_package_files.append(
'{0:s}/*.py'.format(python3_package_module_prefix))
yaml_glob = os.path.join(python3_package_module_prefix[21:], '*.yaml')
if glob.glob(yaml_glob):
python3_package_files.append(
'{0:s}/*.yaml'.format(python3_package_module_prefix))
python3_package_files.extend([
'%{{python3_sitelib}}/{0:s}*.egg-info/*',
'',
'%exclude %{{_prefix}}/share/doc/*'])
python3_package_module_prefix = '%{{{{python3_sitelib}}}}/{0:s}'.format(
self._project_definition.name)
python3_package_files.append(
'%exclude {0:s}/__pycache__/*'.format(python3_package_module_prefix))
for _ in range(submodule_levels):
python3_package_module_prefix = '{0:s}/*'.format(
python3_package_module_prefix)
python3_package_files.append(
'%exclude {0:s}/__pycache__/*'.format(python3_package_module_prefix))
if not data_directory and scripts_directory:
python3_package_files.append('%exclude %{{_bindir}}/*.py')
python3_package_files = ',\n'.join([
' \'{0:s}\''.format(package_file)
for package_file in python3_package_files])
python3_package_files = python3_package_files.format(
self._project_definition.name)
rpm_doc_files = [
doc_file for doc_file in doc_files if doc_file != 'LICENSE']
rpm_license_file = 'LICENSE'
template_mappings = {
'data_files_path': data_files_path,
'doc_files': ', '.join([
'\'{0:s}\''.format(doc_file) for doc_file in doc_files]),
'description_long': description_long,
'description_short': description_short,
'development_status': development_status,
'homepage_url': self._project_definition.homepage_url,
'maintainer': maintainer.strip(),
'maintainer_email': maintainer_email.strip(),
'packages_exclude': packages_exclude,
'project_name_description': self._project_definition.name_description,
'project_name': self._project_definition.name,
'python3_package_files': python3_package_files,
'rpm_doc_files': ' '.join(rpm_doc_files),
'rpm_license_file': rpm_license_file,
'shebang': shebang,
'scripts_directory': scripts_directory,
}
if self._project_definition.name in self._PROJECTS_WITH_PACKAGE_DATA:
if self._project_definition.name == 'dfvfs':
package_data_paths = ['dfvfs.lib']
elif self._project_definition.name == 'plaso':
package_data_paths = [
'plaso.parsers', 'plaso.parsers.esedb_plugins',
'plaso.parsers.olecf_plugins', 'plaso.parsers.plist_plugins',
'plaso.parsers.winreg_plugins']
elif self._project_definition.name == 'winreg-kb':
package_data_paths = ['winregrc']
else:
package_data_paths = [self._project_definition.name]
template_mappings['package_data_paths'] = ',\n'.join([
' \'{0:s}\': [\'*.yaml\']'.format(path)
for path in package_data_paths])
file_content = []
if scripts_directory:
template_data = self._GenerateFromTemplate(
'header_scripts', template_mappings)
else:
template_data = self._GenerateFromTemplate(
'header', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'header_setuptools', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_SDIST_TEST_DATA:
template_data = self._GenerateFromTemplate(
'import_sdist', template_mappings)
file_content.append(template_data)
for template_file in ('import_module', 'bdist_msi', 'bdist_rpm-start'):
template_data = self._GenerateFromTemplate(
template_file, template_mappings)
file_content.append(template_data)
if data_directory and scripts_directory:
template_file = 'bdist_rpm-with_data_and_tools'
elif data_directory:
template_file = 'bdist_rpm-with_data'
else:
template_file = 'bdist_rpm'
template_data = self._GenerateFromTemplate(template_file, template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_header', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_SDIST_TEST_DATA:
template_file = 'setup_cmdclass_sdist'
else:
template_file = 'setup_cmdclass'
template_data = self._GenerateFromTemplate(template_file, template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_classifiers', template_mappings)
file_content.append(template_data)
if self._project_definition.name in self._PROJECTS_WITH_PACKAGE_DATA:
template_data = self._GenerateFromTemplate(
'setup_package_data', template_mappings)
file_content.append(template_data)
if scripts_directory:
template_data = self._GenerateFromTemplate(
'setup_scripts', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_data_files', template_mappings)
file_content.append(template_data)
if data_directory:
template_data = self._GenerateFromTemplate(
'setup_data_files-with_data', template_mappings)
file_content.append(template_data)
template_data = self._GenerateFromTemplate(
'setup_footer', template_mappings)
file_content.append(template_data)
file_content = ''.join(file_content)
with io.open(self.PATH, 'w', encoding='utf-8') as file_object:
file_object.write(file_content)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import warnings
from typing import Any, Callable, Iterable, Optional, overload
import prestodb
from prestodb.exceptions import DatabaseError
from prestodb.transaction import IsolationLevel
from airflow import AirflowException
from airflow.configuration import conf
from airflow.hooks.dbapi import DbApiHook
from airflow.models import Connection
class PrestoException(Exception):
"""Presto exception"""
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == 'false':
return False
elif value.lower() == 'true':
return True
return value
class PrestoHook(DbApiHook):
"""
Interact with Presto through prestodb.
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = 'presto_conn_id'
default_conn_name = 'presto_default'
conn_type = 'presto'
hook_name = 'Presto'
def get_conn(self) -> Connection:
"""Returns a connection object"""
db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]
extra = db.extra_dejson
auth = None
if db.password and extra.get('auth') == 'kerberos':
raise AirflowException("Kerberos authorization doesn't support password.")
elif db.password:
auth = prestodb.auth.BasicAuthentication(db.login, db.password)
elif extra.get('auth') == 'kerberos':
auth = prestodb.auth.KerberosAuthentication(
config=extra.get('kerberos__config', os.environ.get('KRB5_CONFIG')),
service_name=extra.get('kerberos__service_name'),
mutual_authentication=_boolify(extra.get('kerberos__mutual_authentication', False)),
force_preemptive=_boolify(extra.get('kerberos__force_preemptive', False)),
hostname_override=extra.get('kerberos__hostname_override'),
sanitize_mutual_error_response=_boolify(
extra.get('kerberos__sanitize_mutual_error_response', True)
),
principal=extra.get('kerberos__principal', conf.get('kerberos', 'principal')),
delegate=_boolify(extra.get('kerberos__delegate', False)),
ca_bundle=extra.get('kerberos__ca_bundle'),
)
presto_conn = prestodb.dbapi.connect(
host=db.host,
port=db.port,
user=db.login,
source=db.extra_dejson.get('source', 'airflow'),
http_scheme=db.extra_dejson.get('protocol', 'http'),
catalog=db.extra_dejson.get('catalog', 'hive'),
schema=db.schema,
auth=auth,
isolation_level=self.get_isolation_level(), # type: ignore[func-returns-value]
)
if extra.get('verify') is not None:
# Unfortunately verify parameter is available via public API.
# The PR is merged in the presto library, but has not been released.
# See: https://github.com/prestosql/presto-python-client/pull/31
presto_conn._http_session.verify = _boolify(extra['verify'])
return presto_conn
def get_isolation_level(self) -> Any:
"""Returns an isolation level"""
db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]
isolation_level = db.extra_dejson.get('isolation_level', 'AUTOCOMMIT').upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
@staticmethod
def _strip_sql(sql: str) -> str:
return sql.strip().rstrip(';')
@overload
def get_records(self, sql: str = "", parameters: Optional[dict] = None):
"""Get a set of records from Presto
:param sql: SQL statement to be executed.
:param parameters: The parameters to render the SQL query with.
"""
@overload
def get_records(self, sql: str = "", parameters: Optional[dict] = None, hql: str = ""):
""":sphinx-autoapi-skip:"""
def get_records(self, sql: str = "", parameters: Optional[dict] = None, hql: str = ""):
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
try:
return super().get_records(self._strip_sql(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
@overload
def get_first(self, sql: str = "", parameters: Optional[dict] = None) -> Any:
"""Returns only the first row, regardless of how many rows the query returns.
:param sql: SQL statement to be executed.
:param parameters: The parameters to render the SQL query with.
"""
@overload
def get_first(self, sql: str = "", parameters: Optional[dict] = None, hql: str = "") -> Any:
""":sphinx-autoapi-skip:"""
def get_first(self, sql: str = "", parameters: Optional[dict] = None, hql: str = "") -> Any:
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
try:
return super().get_first(self._strip_sql(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
@overload
def get_pandas_df(self, sql: str = "", parameters=None, **kwargs):
"""Get a pandas dataframe from a sql query.
:param sql: SQL statement to be executed.
:param parameters: The parameters to render the SQL query with.
"""
@overload
def get_pandas_df(self, sql: str = "", parameters=None, hql: str = "", **kwargs):
""":sphinx-autoapi-skip:"""
def get_pandas_df(self, sql: str = "", parameters=None, hql: str = "", **kwargs):
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self._strip_sql(sql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data, **kwargs)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame(**kwargs)
return df
@overload
def run(
self,
sql: str = "",
autocommit: bool = False,
parameters: Optional[dict] = None,
handler: Optional[Callable] = None,
) -> None:
"""Execute the statement against Presto. Can be used to create views."""
@overload
def run(
self,
sql: str = "",
autocommit: bool = False,
parameters: Optional[dict] = None,
handler: Optional[Callable] = None,
hql: str = "",
) -> None:
""":sphinx-autoapi-skip:"""
def run(
self,
sql: str = "",
autocommit: bool = False,
parameters: Optional[dict] = None,
handler: Optional[Callable] = None,
hql: str = "",
) -> None:
""":sphinx-autoapi-skip:"""
if hql:
warnings.warn(
"The hql parameter has been deprecated. You should pass the sql parameter.",
DeprecationWarning,
stacklevel=2,
)
sql = hql
return super().run(sql=self._strip_sql(sql), parameters=parameters, handler=handler)
def insert_rows(
self,
table: str,
rows: Iterable[tuple],
target_fields: Optional[Iterable[str]] = None,
commit_every: int = 0,
replace: bool = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
'Transactions are not enable in presto connection. '
'Please use the isolation_level property to enable it. '
'Falling back to insert all rows in one transaction.'
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every)
|
|
#!/usr/bin/env python2.7
import h5py, os, sys, csv
import argparse
# General
from glob import glob
import string
from cStringIO import StringIO
from collections import defaultdict
from Bio import SeqIO
from math import log10, log
import numpy as np
# Plotting
import matplotlib.pyplot as plt
# Fast5Tools
from fast5tools.f5class import *
from fast5tools.f5ops import *
from fast5tools.helperops import *
from fast5tools.fileListClass import *
from fast5tools.plotops import *
from fast5tools.edgeRops import *
#################################################
## Argument Parser
#################################################
parser = argparse.ArgumentParser(description = """
Given path(s) to fast5 file(s) and/or directories of fast5s,
- get kmer count tables
- see differentially-represented kmers (given reference set)
Note:
IF you have the right rpy2 setup,
you can calculate differentially represented kmers using EdgeR from bioconductor.
- http://bioconductor.org/packages/2.5/bioc/html/edgeR.html
The EdgeR analysis is derived from Brad Chapman's example at:
https://github.com/chapmanb/bcbb/blob/master/stats/count_diffexp.py
You can always do the median-normalization analysis.
Instead of TMM, the median is subtracted from each count,
followed by normalizing it to the MAD.
John Urban (2015, 2016, 2017, 2018)
""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('fast5', metavar='fast5', nargs='+',
type= str,
help='''Paths to as many fast5 files and/or directories filled with fast5 files as you want.
Assumes all fast5 files have '.fast5' extension.
If inside dir of dirs with .fast5 files, then can just do "*" to get all files from all dirs.''')
parser.add_argument('-k', '--k', type=int, default=6,
help = '''Kmer-size. Default: 6.''')
parser.add_argument('--revcomp', default=False, action='store_true',
help = '''Also count kmers from reverse complement of each sequence.''')
parser_not_fast5 = parser.add_mutually_exclusive_group()
parser_not_fast5.add_argument('--fasta', '-fa', action='store_true', default=False,
help='''Looking at a FASTA file or list of FASTA files, not FAST5s''')
parser_not_fast5.add_argument('--fastq', '-fq', action='store_true', default=False,
help='''Looking at a FASTQ file or list of FASTQ files, not FAST5s''')
#nargs='+',
parser.add_argument('--reference',
type=str, default=None, help='''All files after this flag and before the next, are interpreted as Reference fastA/Q/5 files.
NOTE: Unlike the default datasets that can contain as many files/dirs/fofns as you'd like,
this can only be pointed at 1 object UNLESS you put everything between quotation marks, which allows you to
specify as many reference files/dirs/FOFNs as you'd like.
E.g. --reference "fast5dir1/ fast5dir2/" ''')
parser_ref_not_fast5 = parser.add_mutually_exclusive_group()
parser_ref_not_fast5.add_argument('--reffasta', '-rfa', action='store_true', default=False,
help='''The reference dataset is a FASTA file or list of FASTA files, not FAST5s''')
parser_ref_not_fast5.add_argument('--reffastq', '-rfq', action='store_true', default=False,
help='''The reference dataset is a FASTQ file or list of FASTQ files, not FAST5s''')
parser.add_argument('-r', '--readtype', default='template',
type= str,
help='''Choose type of fasta to get.
Choices: 'template', 'complement', '2d', 'molecule', 'all', 'MoleQual'.
Default: template.
Molecule returns single fasta for each fast5 by following rules:
if 2d present, return 2d.
elif complement present with no 2d, return longer of template or complement.
elif only template present, return template.
'MoleQual' is similar to molecule.
It differs only in choosing between template and complement when a 2D is not present.
Instead of choosing the longer one, it chooses the one with a higher quality mean quality score.''')
parser.add_argument('-bcv', '--square-root-dispersion', dest='bcv', type=float, default=0.2,
help='''When there are no replicates in edgeR, dispersion must be determined by the user.
The default is 0.2. Other values to try could be 0.01-0.4 (or any).
p-values will be sensitive to choice of bcv. Fold change will not.''')
parser.add_argument('--minlen', type=int, default=0, help='''Only report reads >= minlen. Default: 0 bp.''')
parser.add_argument('--maxlen', type=int, default=int(3e9), help='''Only report reads <= maxlen. Default: 3 billion bp.''')
parser.add_argument('--minq', type=float, default=0, help='''Only report reads with mean quality scores >= Q. Default: 0.''')
parser.add_argument('--maxq', type=float, default=int(10e3), help='''Only report reads with mean quality scores <= Q.
Default: 10000 (this is orders of magnitude higher than normal max which are always < 20)''')
parser.add_argument('-n', '--nfiles', type=int, default=1000000000000,
help = '''This defaults to 1000000000000 in order to use all files (will you ever need to look at more than that?).
However, you can downsample with this option by adjusting this number to get info from the first N files.
Use --random to select N at random from the list.
Aim this script at a specific file for that file's contents.''')
parser.add_argument('-R', '--random', action='store_true', default=False,
help = '''Randomize what files are looked at.''')
parser.add_argument('-S', '--randomseed', type=int, default=False,
help = '''Randomize what files are looked at, but use given seed for reproducibility.''')
parser.add_argument('--filesused', type=str, default='qual_v_pos', help='''
''')
parser.add_argument('-o', '--outdir', type=str, default="./",
help = '''....''')
parser.add_argument('--filename', type=str, default='kmer_counts.txt', help='''
For output. Default: kmer_counts.txt. (Formerly defaulted to None).
If a filename is given, filesused will be reported in a similarly named file ending with .filesused.fofn
When --reference used, files will have similar name with reference_ prepended.''')
parser.add_argument('--plotfilesuffix', type=str, default=None, help='''
Suffix and extension for output plots. Default None (PDFs output in outdir using hard-coded prefixes).
Plots will be in specified outdir.
The minimum information to give is the extension (no dot needed) -- e.g. png, jpg, pdf.
Example1: myexperiment.png ((plots will be named plotprefix_myexperiment.png))
Example2: .jpg ((plots will be named plotprefix_.jpg))
Example3: jpg ((plots will be named plotprefix.jpg))
Example4: when None (default), plots will be named plotprefix.pdf''')
parser.add_argument('--notarlite', action='store_true', default=False, help=''' The default methof (called tarlite) extracts 1 file from a given tarchive at a time, processes, and deletes it.
This options says to turn tarlite off resulting in extracting entire tarchive before proceeding (and finally deleting).
It is possible that --notarlite is faster, but at the expense of exceeding file number limits or disk storage quotas.
Nonetheless, the difference in speed is a lot smaller than the difference in space needed.
For example, not using tarlite will require >2*tarchive amount of disk space (i.e. the tar.gz and its extracted contents).
The tarlite method only requires the disk space already taken by the tarchive and enough for 1 additional file at a time.
A corollary is that tarlite just needs to be allowed to form 1 (or a few) files compared to what could be thousands to millions.
''')
parser.add_argument('--tarlite', action='store_true', default=False, help='''This legacy option is outdated.
However, it is kept here to avoid breaking pipelines that make use of it.
The tarlite approach is now default. Specifying this will not change that default behavior.
It will just prevent pipelines from breaking.
However, not specifying this will still also result in the tarlite approach.
Use --notarlite to turn it off.''')
args = parser.parse_args()
## run_kmer_counting() used to be defined here. I moved it to helperops in the kmer section.
## That required adding imports including fast5tools imports.
## If that interferes w/ other scripts down the road, then I will change back or make a specific kmerops file.
if __name__ == "__main__":
# Process Args
args.outdir = process_outdir(args.outdir)
outfile = args.outdir + args.filename if (args.filename is not None) else None
## Execute
kmerdict, filesused = run_kmer_counting(initial_list=args.fast5, \
k=args.k, \
readtype=args.readtype, \
revcomp=args.revcomp, \
nfiles=args.nfiles, \
random=args.random, \
randomseed=args.randomseed, \
notarlite=args.notarlite, \
fasta=args.fasta, \
fastq=args.fastq, \
minlen=args.minlen, \
maxlen=args.maxlen, \
minq=args.minq, \
maxq=args.maxq)
## Write
writekmer(kmerdict, outfile)
## Files used
process_filesused(trigger=args.filename, filesused=filesused, outdir=args.outdir)
## Reference?
do_comparison = False
if args.reference is not None:
do_comparison = True
## Out
refoutfile = args.outdir + 'reference_' + args.filename if (args.filename is not None) else None
## Convert into list:
args.reference = args.reference.strip().split()
refdict, refsused = run_kmer_counting(initial_list=args.reference, \
k=args.k, \
readtype=args.readtype, \
revcomp=args.revcomp, \
nfiles=args.nfiles, \
random=args.random, \
randomseed=args.randomseed, \
notarlite=args.notarlite, \
fasta=args.fasta, \
fastq=args.fastq, \
minlen=args.minlen, \
maxlen=args.maxlen, \
minq=args.minq, \
maxq=args.maxq)
## Write
writekmer(refdict, refoutfile)
## Files used
trigger = 'reference_'+args.filename if args.filename is not None else None
process_filesused(trigger=trigger, filesused=refsused, outdir=args.outdir)
## TODO:
## Text file with both kmers, raw counts, medNorm counts, TMM counts, other EdgeR table stuff
## Fix up EdgeR class to not necessarily make the dict -- can access all values from the R objects directly (have started to implement this)
##
## COMPARITIVE ANALYSES
if do_comparison:
## Median Normalization Approach
results = MedNormAnalysis(kmerdict, refdict)
if has_R:
#EdgeR
edgeR_results = EdgeR(kmerdict, refdict)
## DETERMINE PLOT SUFFIX
if args.plotfilesuffix is None:
sfx = '.pdf'
else:
if '.' in args.plotfilesuffix:
## assumes format words.ext, makes sfx = _words.ext
sfx = '_' + args.plotfilesuffix
else:
## assumes image type specified (e.g. pdf, jpg, png)
sfx = '.' + args.plotfilesuffix
make_name = make_name_function(pfx=args.outdir, sfx=sfx)
## PLOTTING
singleTableKmerPlot(kmerdict, saveas=make_name('test_kmer_plot'))
singleTableKmerHist(kmerdict, saveas=make_name('test_kmer_hist'))
if args.reference is not None:
singleTableKmerPlot(refdict, saveas=make_name('ref_kmer_plot'))
singleTableKmerHist(refdict, saveas=make_name('ref_kmer_hist'))
twoTableKmerScatterPlot(kmerdict, refdict, saveas=make_name('raw_scatter_test_v_ref'))
twoTableKmer_MA_Plot(results, saveas=make_name('medNorm_MA_plot'))
general_scatter(x=results.get_norm_count_avg(), y=results.get_norm_count_diff(), words=results.get_genes(), saveas=make_name('medNorm_scaledToRef_avgCount_v_CountDiffs'), xlab='Average Normalized Counts', ylab='Difference: norm_test - norm_ref')
general_scatter(x=results.get_ref_zscores(), y=results.get_test_zscores(), words=results.get_genes(), saveas=make_name('robust_zscore_scatter_test_v_ref'), xlab='Reference Z-scores', ylab='Test Z-scores')
alphabeticalPlot(results.get_logfc(), results.get_genes(), saveas=make_name('alpabeticalKmers_v_mednormlogFC'))
gcPlot(results.get_logfc(), results.get_genes(), saveas=make_name('kmerGCcontent_v_mednormlogFC'))
compressionPlot(results.get_logfc(), results.get_genes(), saveas=make_name('kmerCompressionLength_v_mednormlogFC'))
if has_R:
twoTableKmerScatterPlotEdgeR(edgeR_results, saveas=make_name('TMM_scatter_test_v_ref'))
volcanoPlot(edgeR_results.get_logfc(), edgeR_results.get_pvalues(), edgeR_results.get_k(), saveas=make_name('TMM_volcano'))
smearPlot(edgeR_results.get_logfc(), edgeR_results.get_logcpm(), edgeR_results.get_k(), saveas=make_name('TMM_MA'))
alphabeticalPlot(edgeR_results.get_logfc(), edgeR_results.get_k(), saveas=make_name('alpabeticalKmers_v_TMMlogFC'))
gcPlot(edgeR_results.get_logfc(), edgeR_results.get_k(), saveas=make_name('kmerGCcontent_v_TMMlogFC'))
compressionPlot(edgeR_results.get_logfc(), edgeR_results.get_k(), saveas=make_name('kmerCompressionLength_v_TMMlogFC'))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import pprint
import re
import codecs
import json
"""
Clean, format the osm data into a JSON format for import into mongodb
"""
# REGEX to check for all lower case characters in a string
lower = re.compile(r'^([a-z]|_)*$')
# REGEX to check for colon values
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
# REGEX to check for mongodb specific characters
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
'''
street_type_re
Regex which scans for the following items listed below to determine
if we have a match for a street name which is shortened, abbriviated and
is only comparing the last word at the end of the string.
https://docs.python.org/2/library/re.html
\b assert position at a word boundary (^\w|\w$|\W\w|\w\W)
\S+ match any non-white space character [^\r\n\t\f ]
Quantifier: + Between one and unlimited times, as many times as possible, giving back as needed [greedy]
\.? matches the character . literally
Quantifier: ? Between zero and one time, as many times as possible, giving back as needed [greedy]
$ assert position at end of the string
https://www.regex101.com/#python
'''
street_type_re = re.compile(r'\b\S+\.?$', re.IGNORECASE)
# The expected street names
expected_street_names = ["Avenue", "Boulevard", "Circle", "Commons", "Court", "Drive", "Highway", "Lane", "Loop", "Parkway", "Place", "Road",
"Sqaure", "Street", "Trail"]
'''
map_old_to_new
Create Dictionary to map abreiviations to full street's suffix
'''
map_old_to_new = {
"Ave" : "Avenue",
"Ave." : "Avenue",
"Blvd" : "Boulevard",
"Blvd." : "Boulevard",
"Cir" : "Circle",
"Cmn" : "Commons",
"Crt" : "Court",
"Crt." : "Court",
"Dr" : "Drive",
"Dr." : "Drive",
"Hwy" : "Highway",
"Ln" : "Lane",
"Ln." : "Lane",
"LN" : "Lane",
"Lp" : "Loop",
"PARK":"Park",
"Pk" : "Parkway",
"Pk." : "Parkway",
"Pl" : "Place",
"Pl." : "Place",
"Rd.": "Road",
"Rd" : "Road",
"Sq": "Sqaure",
"Sq.": "Sqaure",
"St": "Street",
"St.": "Street",
"Tr": "Trail",
"Tr.": "Trail",
"Ashwood": "Ashwood Street"
}
'''
Create the CREATED dictionary to store the a node's meta data
'''
CREATED = [ "version", "changeset", "timestamp", "user", "uid"]
'''
Create the POSITION Dictionary, which contains the latititude and
the longititued. Lat is in the 0 position Lon is in the 1 position,
This will be used as a lookup dictionary to determine if a key
exits in an element
'''
POSITION = ["lat","lon"]
def shape_element(element):
'''
shape_element will peform the following tasks:
- if second level tag "k" value contains problematic characters, it should be ignored
- if second level tag "k" value starts with "addr:", it should be added to a dictionary "address"
- if second level tag "k" value does not start with "addr:", but contains ":", you can process it
same as any other tag.
- if there is a second ":" that separates the type/direction of a street,
the tag should be ignored, for example:
'''
# Create the node dictionary
node = {}
# Add the created object to the node dictionary
node['created'] = {}
# For Lat and Lon we will store these in a 'pos' (position)
# we need lat, lon and in specific order (LAT, LON)
node['pos'] =[0 for i in range(2)]
# Search only through the node and way types
if element.tag == "node" or element.tag == "way" :
# add the type to the node, the tag of the element
node['type'] = element.tag
# Search through the node and way types
# to build the CREATED and POSITION dictionaries
for k,v in element.attrib.iteritems():
# CREATE VALUES {"version", "changeset", "timestamp", "user", "uid"}
if k in CREATED:
node['created'][k] = v
#TODO: make sure time is formated from string to date
# Lat is in first position, Lon second position
# In JSON and mongodb we need to represent the Lat and Lon as floats
elif k in POSITION:
if k=="lat":
node['pos'][0]=(float(v))
else: # Lon
node['pos'][1]=(float(v))
# Key was not in the CREATED or POSITION dictionary
# Add a new key value pair
else:
node[k] = v
'''
Setup processing for the TAGS - Addresses and other meta data for the
node and way objects
'''
# Instantiate the address dictionary
address = {}
'''
Search all the subelements and prepare valid tags for processing
Any ignored data will be emitted to the console
'''
for tag in element.iter("tag"):
if is_valid_tag(tag) == True:
# address attributes - create the dictionary object to hold
# the attributes.
# use a slice of the item from beginning for 5 characters
if tag.attrib['k'][:5] == "addr:":
# Set the keyName to the text to the RIGHT of the colon, dropping "addr:"
newKey = tag.attrib['k'][5:]
# if there is a second ":" that separates the
# type/direction of a street ignore it - Per Assignment
if newKey.count(":")> 0:
print "found colon, and it's not address - ignoring it", newKey
else:
# Add new key to the address object, and assign the
# value to the key
address[newKey] = tag.attrib['v']
# Clean the Address
if newKey == "street":
clean_name = update_streetname(tag.attrib['v'], map_old_to_new)
address[newKey] = clean_name
# Clean the postcode
if newKey == "postcode":
clean_zip = update_zipcode(tag.attrib['v'] )
address[newKey] = clean_zip
# Clean the state, assume all states should be AK
if newKey == "state":
if tag.attrib['v'] != "AK":
address[newKey] = "AK"
# clean the city name
if newKey == "city":
clean_city = update_city(tag.attrib['v'])
address[newKey] = clean_city
# we have a generic tag item with no colon, to be added root on the node/way object
elif tag.attrib['k'].count(":") < 1:
plainKey = tag.attrib['k']
#print "Plain KEY", tag.attrib['k'], tag.attrib['v']
node[plainKey] = tag.attrib['v']
# For keys similar to the "addr:" key process these keys like the generic keys
elif tag.attrib['k'].count(":") == 1 and tag.attrib['k'][:5] != "addr:" and tag.attrib['k'][5:] != "created" :
# Get the length to the colon, and get the text from the
# right of colon to the end for the key.
# We are going to strip off the first text to the left of
# the colon, for readability and mongodb
keyIndex = tag.attrib['k'].find(":")
# increment by one so we start at the new key name
keyIndex += 1
# Get the key name and create a dictionary for this key and value
oddKey = tag.attrib['k'][keyIndex:]
node[oddKey] = tag.attrib['v']
else:
print "Ingnore tag - tag is invalid" , tag.attrib['k'], tag.attrib['v']
# Search for any node_refs in the sub arrays - just for the way tag, per instructions
node_refs = []
if element.tag =="way":
for ndref in element.iter("nd"):
node_refs.append(ndref.attrib['ref'])
# Check to see if we have any node_refs, if we do add the node_refs to the node
if len(node_refs) > 0:
node['node_refs'] = node_refs
# Check to see if we have any addresses, if we have addresses add the addresses to the node
if len(address)>0:
node['address'] = address
return node
else:
return None
def is_valid_tag(element):
'''
Check for Valid Tags and return true for valid tags false for invalid
'''
isValid = True
if problemchars.search(element.attrib['k']):
isValid = False
else: # Count all the others as valid
isValid = True
return isValid
def process_map(file_in, pretty = False):
'''
Process map reads in the OpenStreet Map file
and writes out to file the JSON data structure
file_in is the path and filename, pretty parameter formats the json
'''
# Keep the same filename and just append .json to the filename
file_out = "{0}.2.json".format(file_in)
data = []
with codecs.open(file_out, "w") as fo:
# Go element by element to read the file
for _, element in ET.iterparse(file_in):
el = shape_element(element)
# If we have an element add it to the dictionary
# and write the data to a file
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2)+"\n")
else:
fo.write(json.dumps(el) + "\n")
return data
def update_streetname(name, map_old_to_new):
'''
Update name compares current name to the map of bad values to good values
and provides the updated name back to the method
'''
for iName in map_old_to_new.keys():
#Check to see if we find a match for a bad value in our map
match = re.search(iName, name)
#if match is found then remap the old value with new value
if match:
name = re.sub(iName+'$', map_old_to_new[iName], name)
return name
def update_zipcode(zipcode):
'''
Clean the zip code
These are a few of the errors one might encounter
{ "_id" : "Homer, AK 99603", "count" : 2 }
{ "_id" : "AK", "count" : 1 }
{ "_id" : "Alaska", "count" : 1 }
{ "_id" : "AK 99501-2129", "count" : 1 }
{ "_id" : "AK 99501-2118", "count" : 1 }
'''
# use regex to remove all strings from zipcode, this
# will leave us with a numeric number which should be
# 5 or 9 characters long
zipcode_clean = re.sub(r"\D", "", zipcode)
return zipcode_clean
def update_city(cityname):
'''
TODO
Scan the dictionary of city names and
fix bad spellings, improve alogrithim over time
'''
if cityname == "Anchoage":
cityname = "Anchorage"
return cityname
def test():
# NOTE: if you are running this code on your computer, with a larger dataset,
# call the process_map procedure with pretty=False. The pretty=True option adds
# additional spaces to the output, making it significantly larger.
data = process_map('Alaska_Small.xml', False)
pprint.pprint(len(data))
print "DONE"
if __name__ == "__main__":
test()
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
from tornado.concurrent import Future, TracebackFuture
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server::
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def current():
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as current
by `make_current`, returns that instance. Otherwise returns
`IOLoop.instance()`, i.e. the main thread's `IOLoop`.
A common pattern for classes that depend on ``IOLoops`` is to use
a default argument to enable programs with multiple ``IOLoops``
but not require the argument for simpler applications::
class MyClass(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explictly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self):
pass
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for fd.
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for fd."""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on fd."""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.instance().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if isinstance(result, Future):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
"""
raise NotImplementedError()
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert isinstance(future, Future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
callback()
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None):
super(PollIOLoop, self).initialize()
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd in self._handlers.keys():
try:
close_method = getattr(fd, 'close', None)
if close_method is not None:
close_method()
else:
os.close(fd)
except Exception:
gen_log.debug("error closing fd %s", fd, exc_info=True)
self._waker.close()
self._impl.close()
def add_handler(self, fd, handler, events):
self._handlers[fd] = stack_context.wrap(handler)
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if not logging.getLogger().handlers:
# The IOLoop catches and logs exceptions, so it's
# important that log output be visible. However, python's
# default behavior for non-root loggers (prior to python
# 3.2) is to print an unhelpful "no handlers could be
# found" message rather than the actual log entry, so we
# must explicitly configure logging if we've made it this
# far without anything.
logging.basicConfig()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHILD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError: # non-main thread
pass
while True:
poll_timeout = 3600.0
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
for callback in callbacks:
self._run_callback(callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = None
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# the timeout was cancelled
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
timeout = heapq.heappop(self._timeouts)
self._run_callback(timeout.callback)
del timeout
else:
seconds = self._timeouts[0].deadline - now
poll_timeout = min(seconds, poll_timeout)
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if (getattr(e, 'errno', None) == errno.EINTR or
(isinstance(getattr(e, 'args', None), tuple) and
len(e.args) == 2 and e.args[0] == errno.EINTR)):
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
self._handlers[fd](fd, events)
except (OSError, IOError) as e:
if e.args[0] == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
app_log.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
except Exception:
app_log.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def add_timeout(self, deadline, callback):
timeout = _Timeout(deadline, stack_context.wrap(callback), self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback']
def __init__(self, deadline, callback, io_loop):
if isinstance(deadline, numbers.Real):
self.deadline = deadline
elif isinstance(deadline, datetime.timedelta):
self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r" % deadline)
self.callback = callback
@staticmethod
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, id(self)) <
(other.deadline, id(other)))
def __le__(self, other):
return ((self.deadline, id(self)) <=
(other.deadline, id(other)))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
`start` must be called after the `PeriodicCallback` is created.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def _run(self):
if not self._running:
return
try:
self.callback()
except Exception:
app_log.error("Error in periodic callback", exc_info=True)
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2015 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
pydoop.hdfs.file -- HDFS File Objects
-------------------------------------
"""
import os
from pydoop.hdfs import common
def _complain_ifclosed(closed):
if closed:
raise ValueError("I/O operation on closed HDFS file object")
def _seek_with_boundary_checks(f, position, whence):
if whence == os.SEEK_CUR:
position += f.tell()
elif whence == os.SEEK_END:
position += f.size
position = max(0, position)
if position > f.size:
raise IOError('cannot seek past end of file')
if f.mode != 'r':
raise IOError('can seek only in read-only')
return position
class hdfs_file(object):
"""
Instances of this class represent HDFS file objects.
Objects from this class should not be instantiated directly. To
open an HDFS file, use :meth:`~.fs.hdfs.open_file`, or the
top-level ``open`` function in the hdfs package.
"""
ENDL = os.linesep
def __init__(self, raw_hdfs_file, fs, name, flags,
chunk_size=common.BUFSIZE):
if not chunk_size > 0:
raise ValueError("chunk size must be positive")
self.f = raw_hdfs_file
self.__fs = fs
self.__name = fs.get_path_info(name)["name"]
self.__size = fs.get_path_info(name)["size"]
self.__mode = "r" if flags == os.O_RDONLY else "w"
self.chunk_size = chunk_size
self.closed = False
self.__reset()
def __reset(self):
self.buffer_list = []
self.chunk = ""
self.EOF = False
self.p = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def fs(self):
"""
The file's hdfs instance.
"""
return self.__fs
@property
def name(self):
"""
The file's fully qualified name.
"""
return self.__name
@property
def size(self):
"""
The file's size in bytes. This attribute is initialized when the
file is opened and updated when it is closed.
"""
return self.__size
@property
def mode(self):
"""
The I/O mode for the file.
"""
return self.__mode
def __read_chunk(self):
self.chunk = self.f.read(self.chunk_size)
self.p = 0
if not self.chunk:
self.EOF = True
def __read_chunks_until_nl(self):
if self.EOF:
eol = self.chunk.find(self.ENDL, self.p)
return eol if eol > -1 else len(self.chunk)
if not self.chunk:
self.__read_chunk()
eol = self.chunk.find(self.ENDL, self.p)
while eol < 0 and not self.EOF:
if self.p < len(self.chunk):
self.buffer_list.append(self.chunk[self.p:])
self.__read_chunk()
eol = self.chunk.find(self.ENDL, self.p)
return eol if eol > -1 else len(self.chunk)
def readline(self):
"""
Read and return a line of text.
:rtype: str
:return: the next line of text in the file, including the
newline character
"""
_complain_ifclosed(self.closed)
eol = self.__read_chunks_until_nl()
line = "".join(self.buffer_list) + self.chunk[self.p:eol+1]
self.buffer_list = []
self.p = eol + 1
return line
def next(self):
"""
Return the next input line, or raise :class:`StopIteration`
when EOF is hit.
"""
_complain_ifclosed(self.closed)
line = self.readline()
if line == "":
raise StopIteration
return line
def __iter__(self):
return self
def available(self):
"""
Number of bytes that can be read from this input stream without
blocking.
:rtype: int
:return: available bytes
"""
_complain_ifclosed(self.closed)
return self.f.available()
def close(self):
"""
Close the file.
"""
if not self.closed:
self.closed = True
retval = self.f.close()
if self.mode == "w":
self.__size = self.fs.get_path_info(self.name)["size"]
return retval
def pread(self, position, length):
r"""
Read ``length`` bytes of data from the file, starting from
``position``\ .
:type position: int
:param position: position from which to read
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
if position < 0:
raise ValueError("position must be >= 0")
if position > self.size:
raise IOError("position cannot be past EOF")
if length < 0:
length = self.size - position
return self.f.pread(position, length)
def pread_chunk(self, position, chunk):
r"""
Works like :meth:`pread`\ , but data is stored in the writable
buffer ``chunk`` rather than returned. Reads at most a number of
bytes equal to the size of ``chunk``\ .
:type position: int
:param position: position from which to read
:type chunk: writable string buffer
:param chunk: a c-like string buffer, such as the one returned by the
``create_string_buffer`` function in the :mod:`ctypes` module
:rtype: int
:return: the number of bytes read
"""
_complain_ifclosed(self.closed)
if position > self.size:
raise IOError("position cannot be past EOF")
return self.f.pread_chunk(position, chunk)
def read(self, length=-1):
"""
Read ``length`` bytes from the file. If ``length`` is negative or
omitted, read all data until EOF.
:type length: int
:param length: the number of bytes to read
:rtype: string
:return: the chunk of data read from the file
"""
_complain_ifclosed(self.closed)
# NOTE: libhdfs read stops at block boundaries: it is *essential*
# to ensure that we actually read the required number of bytes.
if length < 0:
length = self.size
chunks = []
while 1:
if length <= 0:
break
c = self.f.read(min(self.chunk_size, length))
if c == "":
break
chunks.append(c)
length -= len(c)
return "".join(chunks)
def read_chunk(self, chunk):
r"""
Works like :meth:`read`\ , but data is stored in the writable
buffer ``chunk`` rather than returned. Reads at most a number of
bytes equal to the size of ``chunk``\ .
:type chunk: writable string buffer
:param chunk: a c-like string buffer, such as the one returned by the
``create_string_buffer`` function in the :mod:`ctypes` module
:rtype: int
:return: the number of bytes read
"""
_complain_ifclosed(self.closed)
return self.f.read_chunk(chunk)
def seek(self, position, whence=os.SEEK_SET):
"""
Seek to ``position`` in file.
:type position: int
:param position: offset in bytes to seek to
:type whence: int
:param whence: defaults to ``os.SEEK_SET`` (absolute); other
values are ``os.SEEK_CUR`` (relative to the current position)
and ``os.SEEK_END`` (relative to the file's end).
"""
_complain_ifclosed(self.closed)
position = _seek_with_boundary_checks(self, position, whence)
self.__reset()
return self.f.seek(position)
def tell(self):
"""
Get the current byte offset in the file.
:rtype: int
:return: current offset in bytes
"""
_complain_ifclosed(self.closed)
return self.f.tell()
def write(self, data):
"""
Write ``data`` to the file.
:type data: string
:param data: the data to be written to the file
:rtype: int
:return: the number of bytes written
"""
_complain_ifclosed(self.closed)
return self.f.write(data)
def write_chunk(self, chunk):
"""
Write data from buffer ``chunk`` to the file.
:type chunk: writable string buffer
:param chunk: a c-like string buffer, such as the one returned by the
``create_string_buffer`` function in the :mod:`ctypes` module
:rtype: int
:return: the number of bytes written
"""
return self.write(chunk)
def flush(self):
"""
Force any buffered output to be written.
"""
_complain_ifclosed(self.closed)
return self.f.flush()
class local_file(file):
def __init__(self, fs, name, flags):
if not flags.startswith("r"):
local_file.__make_parents(fs, name)
super(local_file, self).__init__(name, flags)
self.__fs = fs
self.__name = os.path.abspath(super(local_file, self).name)
self.__size = os.fstat(super(local_file, self).fileno()).st_size
self.f = self
self.chunk_size = 0
@staticmethod
def __make_parents(fs, name):
d = os.path.dirname(name)
if d:
try:
fs.create_directory(d)
except IOError:
raise IOError("Cannot open file %s" % name)
@property
def fs(self):
return self.__fs
@property
def name(self):
return self.__name
@property
def size(self):
return self.__size
def write(self, data):
_complain_ifclosed(self.closed)
if isinstance(data, unicode):
data = data.encode(common.TEXT_ENCODING)
elif not isinstance(data, (basestring, bytearray)):
# access non string data through a buffer
data = str(buffer(data))
super(local_file, self).write(data)
return len(data)
def available(self):
_complain_ifclosed(self.closed)
return self.size
def close(self):
if self.mode == "w":
self.flush()
os.fsync(self.fileno())
self.__size = os.fstat(self.fileno()).st_size
super(local_file, self).close()
def seek(self, position, whence=os.SEEK_SET):
position = _seek_with_boundary_checks(self, position, whence)
return super(local_file, self).seek(position)
def pread(self, position, length):
_complain_ifclosed(self.closed)
if position < 0:
raise ValueError("Position must be >= 0")
old_pos = self.tell()
self.seek(position)
if length < 0:
length = self.size - position
data = self.read(length)
self.seek(old_pos)
return data
def pread_chunk(self, position, chunk):
_complain_ifclosed(self.closed)
data = self.pread(position, len(chunk))
chunk[:len(data)] = data
return len(data)
def read_chunk(self, chunk):
_complain_ifclosed(self.closed)
data = self.read(len(chunk))
chunk[:len(data)] = data
return len(data)
def write_chunk(self, chunk):
return self.write(chunk)
|
|
from __future__ import with_statement
import os
from os.path import isabs, join
from subprocess import Popen, check_call, PIPE, STDOUT
import tempfile
import shutil
from cStringIO import StringIO
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import find_executable
from latex import latex
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png') # doctest: +SKIP
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp') # doctest: +SKIP
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi') # doctest: +SKIP
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer') # doctest: +SKIP
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception.
You may also enter 'file' for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if 'filename'
is unset. However, if it was set, then 'preview' writes the genereted
file to this filename instead.
There is also support for writing to a StringIO like object, which needs
to be passed to the 'outputbuffer' argument.
>>> from StringIO import StringIO
>>> obj = StringIO()
>>> preview(x + y, output='png', viewer='StringIO',
... outputbuffer=obj) # doctest: +SKIP
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom documentclass or import certain set of LaTeX packages.
>>> preamble = "\\documentclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{document}"
>>> preview(x + y, output='png', preamble=preamble) # doctest: +SKIP
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see subprocess.Popen).
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'}) # doctest: +SKIP
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
"sample.tex" and run the default png viewer to display the resulting
bitmap, do
>>> preview(x+y, output='png', outputTexFile="sample.tex") # doctest: +SKIP
"""
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
# TODO add candidates for windows to list
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
if find_executable(candidate):
viewer = candidate
break
else:
raise SystemError(
"No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer == "file":
if filename is None:
SymPyDeprecationWarning(feature="Using viewer=\"file\" without a "
"specified filename ", last_supported_version="0.7.3",
use_instead="viewer=\"file\" and filename=\"desiredname\"")
elif viewer == "StringIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a StringIO "
"compatible object if viewer=\"StringIO\"")
elif viewer not in special and not find_executable(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
preamble = r"""\documentclass[12pt]{article}
\pagestyle{empty}
%s
\begin{document}
""" % (package_includes)
else:
if len(packages) > 0:
raise ValueError("The \"packages\" keyword must not be set if a "
"custom LaTeX preamble was specified")
latex_main = preamble + '\n%s\n\n' + r"\end{document}"
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
try:
workdir = tempfile.mkdtemp()
with open(join(workdir, 'texput.tex'), 'w') as fh:
fh.write(latex_main % latex_string)
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
with open(os.devnull, 'w') as devnull:
check_call(['latex', '-halt-on-error', 'texput.tex'], cwd=workdir,
stdout=devnull, stderr=STDOUT)
if output != "dvi":
defaultoptions = {
"ps": [],
"pdf": [],
"png": ["-T", "tight", "-z", "9", "--truecolor"]
}
commandend = {
"ps": ["-o", "texput.ps", "texput.dvi"],
"pdf": ["texput.dvi", "texput.pdf"],
"png": ["-o", "texput.png", "texput.dvi"]
}
cmd = ["dvi" + output]
try:
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions[output])
cmd.extend(commandend[output])
except KeyError:
raise SystemError("Invalid output format: %s" % output)
with open(os.devnull, 'w') as devnull:
check_call(cmd, cwd=workdir, stdout=devnull, stderr=STDOUT)
src = "texput.%s" % (output)
if viewer == "file":
if filename is None:
buffer = StringIO()
with open(join(workdir, src), 'rb') as fh:
buffer.write(fh.read())
return buffer
else:
shutil.move(join(workdir,src), filename)
elif viewer == "StringIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for plotting.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(join(workdir, src), decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="sympy",
resizable=False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
with open(os.devnull, 'w') as devnull:
check_call([viewer, src], cwd=workdir, stdout=devnull,
stderr=STDOUT)
finally:
try:
shutil.rmtree(workdir) # delete directory
except OSError, e:
if e.errno != 2: # code 2 - no such file or directory
raise
|
|
'''
This is still a sketch.
The goal is to to make this inherit from Cube,
but it's created via
c = Cube()
bc = BinnedCube(c, binsize=50)
but then has access to the same visualization, saving, and processing tools.
'''
'''
The stuff below is mostly code that came out of the original Cube (some not)
### FIX ME ### (looks like this was made before multiple widths became a thing)
def roughLC(self, target=None, comps=None, wavelengths=None, **kwargs):
'''construct a rough LC, over a given wavelength bin'''
# if comps is just one-element, make it into a list
target = self.target
comps = self.comparisons
self.speak('updating the rough LC with target {0} and comps {1} and wavelength range {2}'.format(target, comps, wavelengths))
w = self.spectral['wavelength']
if wavelengths is None:
wavelengths = [np.min(w), np.max(w)]
blueenough = (w <= np.max(wavelengths))
redenough = (w >= np.min(wavelengths))
waveok = blueenough*redenough
targetflux = self.cubes['raw_counts'][target][:,waveok].sum(-1)
comparisonflux = self.cubes['raw_counts'][comps].sum(0)[:,waveok].sum(-1)
self.speak('comp/targ is typically {0}'.format(np.median(comparisonflux/targetflux)))
self.temporal['lc'] = targetflux/comparisonflux
self.temporal['lc'] /= np.median(self.temporal['lc'])
self.speak('rms is {0}'.format(np.std(self.temporal['lc'])))
plt.ion()
x, y = self.temporal['bjd'], self.temporal['lc']
ok = self.temporal['ok']
plt.plot(x[ok], y[ok], **kwargs)
# ????????
def doubleplot(self,
binsize=500,
wavemin=4500, wavemax=9000,
divide=False, ylim=None,
median=False,
title=None):
self.populate()
c = self
name = c.obs.target.name
date = c.obs.night.name
wavelengths = np.arange(wavemin, wavemax, binsize)
lcs = []
import craftroom.cmaps
blue, red = 'indigo', 'darkorange'
cmap = craftroom.cmaps.one2another(blue, red)
plt.ioff()
for i, w in enumerate(wavelengths):
c.roughLC(wavelengths=[w-binsize/2, w+binsize/2])
ok = np.array(c.temporal['ok'])
lc = c.temporal['bjd']+0.0, c.temporal['lc'] +0.0, ok
lcs.append(lc)
offset = int(c.temporal['bjd'][0])
plt.figure(name, figsize=(8,6), dpi=70)
if median:
gs = plt.matplotlib.gridspec.GridSpec(3,1,
height_ratios=[1,1,.5], hspace=0.02)
else:
gs = plt.matplotlib.gridspec.GridSpec(2,1,
height_ratios=[1,.5], hspace=0.02)
axlc = plt.subplot(gs[-1])
plt.xlabel('BJD - {0}'.format(offset))
plt.ylabel('Relative Flux')
n, m = len(lc[2]), len(wavelengths)
image, imageok = np.zeros((n,m)), np.zeros((n,m))
for i, lc in enumerate(lcs):
fraction = i/(len(wavelengths) - 1.0)
t = lc[0] - offset
flux = lc[1]
ok = lc[2]
image[:,i] = flux + 0.0
imageok[:,i] = ok + 0
#print ok
plt.plot(t[ok], flux[ok], alpha=binsize/500.0, linewidth=1,color=cmap(fraction))
if ylim is None:
valid = np.nonzero((imageok*np.isfinite(image)).flatten())[0]
ylim = np.percentile(image.flatten()[valid], [1,99])
plt.ylim(*ylim)
plt.xlim(t.min(), t.max())
if divide:
image /= np.median(image, 1)[:,np.newaxis]
axim = plt.subplot(gs[0])
kw = dict(interpolation='nearest', cmap='gray',
vmin=ylim[0], vmax=ylim[1], aspect='auto',
extent=[min(t), max(t),
(min(wavelengths) - binsize/2.0)/10, (max(wavelengths) + binsize/2.0)/10],
origin='lower')
axim.imshow(image.T, **kw)
plt.setp(axim.get_xticklabels(), visible=False)
if title is None:
title = '{name} with {instrument}\n[from {0}nm ({blue}) to {1}nm ({red}) in {2}nm-wide bins]'.format( wavemin/10, wavemax/10, binsize/10, name=name,blue=blue, red=red, instrument=c.obs.instrument.name)
plt.title(title)
plt.ylabel('Wavelength (nm)')
if median:
axmed = plt.subplot(gs[1])
divided = (image/np.median(image, 1)[:,np.newaxis])
kw['vmin'], kw['vmax'] = np.percentile(divided, [5,95])
axmed.imshow(divided.T, **kw)
plt.setp(axmed.get_xticklabels(), visible=False)
plt.draw()
if divide:
filename = '{0}_binto{1}_{2}_normalized.pdf'.format(name,binsize, date)
else:
filename = '{0}_binto{1}_{2}.pdf'.format(name,binsize, date)
plt.savefig(filename)
### FIX ME (not used)
def convolveCube(self,width=0.5):
'''Take a spectral cube, convolve it with a Gaussian (NOT USED!).'''
c = self.flux
w = self.spectral['wavelength']
plt.ion()
x = np.arange(-width*10, width*10)
gauss = lambda x: np.exp(-0.5*x**2/width**2)
new_cube = np.copy(c)
for star in range(len(c[:,0,0])):
plt.figure('convolution')
for i in range(len(c[0,:,0])):
new_cube[star,i,:] = np.convolve(c[star,i,:], gauss(x),'same')
plt.plot(c[star,i,:])
plt.plot(new_cube[star,i,:])
self.flux = new_cube
def createBins(self, binsize=250, remake=False):
'''Take a spectral cube, and creates big wavelength bins.'''
self.populate()
self.speak("Binning the spectral cube to {0}A binsize.".format(binsize))
#self.shiftCube(plot=True)
#self.show()
wavelength = self.spectral['wavelength']
#dnpixelsdw = self.spectral['dnpixelsdw']
# the shape of the cube
nStars, nTimes, nWaves = self.cubes['raw_counts'].shape
plt.ion()
self.binned_cubes = {}
# define bins
bin_starts = np.arange(wavelength.min(), wavelength.max() - binsize/2.0, binsize)
bin_ends = bin_starts + binsize
bin_centers = (bin_starts + bin_ends)/2.0
nBins = bin_centers.size
bin_ok = np.zeros((nStars, nBins))
satlimit = 150000
faintlimit = 2500
# loop through stars, determining which bins are okay for each
for k in self.cubes.keys():
self.speak('binning {0} to {1}A bins'.format(k, binsize))
shape = (self.numberofstars, self.numberoftimes, self.numberofwavelengths/binsize, binsize)
# integrate spectrum over bin (dndw factor accounts for the fractional pixels represented by resampled cube spectrum
# !!!!! DO I NEED TO INCLUDE DNPIXELSDW??
# this was the old version:
# self.binned_cubes[newkey][star, time, bin] = scipy.integrate.trapz(wholespectrum[mask]*dnpixelsdw[mask], wavelength[mask])
#if k == 'ok':
# dndw = 1
#else:
# dndw = self.spectral['dnpixelsdw'].reshape(1, 1, self.numberofwavelengths)
self.binned_cubes[k] = (self.cubes[k]*dndw).reshape(shape).sum(-1)
if k=='width' or k =='centroid' or k =='peak':
self.binned_cubes[k] /= (dndw).reshape((1,1, self.numberofwavelengths/binsize, binsize)).sum(-1)
if k=='ok':
self.binned_cubes[k] = (self.binned_cubes[k]/binsize).astype(np.bool)
#
self.bin_centers = bin_centers
self.binned_cubes = astropy.table.Table(self.binned_cubes)
#np.save(binned_filename, (self.binned_cubes, self.bin_centers, self.binned_cubes['ok']))
#self.speak(" Saved binned cube to {0}".format(binned_filename))
def correctBins(self, **kw):
'''Use comparison stars to correct for atmospheric losses, create a self.binned_corrected.'''
self.populate()
self.speak('using comparison stars {0} to correct for atmospheric losses'.format(self.comparisons))
wavelength = self.bin_centers
vmin = 0.98
vmax = 1.02
nStars, nTimes, nWaves = self.binned_cubes['raw_counts'].shape
# create empty correction and uncertainty arrays
correction, uncertainty = np.ones((nTimes, nWaves)), np.ones((nTimes, nWaves))
def weightedsum(array):
return ((array*self.binned_cubes['ok'])[self.comparisons,:,:].sum(0)/(self.binned_cubes['ok'])[self.comparisons,:,:].sum(0))
correction = weightedsum(self.binned_cubes['raw_counts'])
uncertainty = np.sqrt(weightedsum(self.binned_cubes['raw_counts'] + self.binned_cubes['sky']))
self.binned_correction = np.ma.MaskedArray(correction, mask=((self.binned_cubes['ok']==False).sum(0).astype(np.bool)), fill_value=np.nan)
self.binned_correction_uncertainty = np.ma.MaskedArray(uncertainty, mask=((self.binned_cubes['ok']==False).sum(0).astype(np.bool)), fill_value=np.nan)
# normalize the correction spectrum to be close to one
mediancompositespectrum = np.ma.median(self.binned_correction, 0)
self.binned_correction /= mediancompositespectrum.reshape(1,nWaves)
self.binned_correction_uncertainty /= mediancompositespectrum.reshape(1,nWaves)
#self.display.one(self.binned_correction.filled(), clobber=True)
#self.display.one(self.binned_correction_uncertainty.filled())
self.binned_cubes['corrected'] = self.binned_cubes['raw_counts']/self.binned_correction
photonnoise = np.sqrt(self.binned_cubes['raw_counts'] + self.binned_cubes['sky'])/self.binned_cubes['raw_counts']
correctionnoise = self.binned_correction_uncertainty
self.binned_cubes['uncertainty'] = np.sqrt(photonnoise**2 + correctionnoise**2)
def imageBins(self, **kw):
self.createBins(**kw)
self.correctBins(**kw)
figure = plt.figure(figsize=(10,10), dpi=70)
gs = plt.matplotlib.gridspec.GridSpec(self.numberofstars, 2)
kw = dict(cmap='gray')
ax=None
for i in range(self.numberofstars):
ax = plt.subplot(gs[i,0], sharex=ax, sharey=ax)
ax.imshow(self.binned_cubes['raw_counts'][i], **kw)
ax = plt.subplot(gs[i,1], sharex=ax, sharey=ax)
ax.imshow(self.binned_cubes['corrected'][i], **kw)
def makeMeanSpectrum(self, plot=False):
self.populate()
wavelength = self.spectral['wavelength']
spectrum = np.median(self.cubes['raw_counts'][self.obs.target,:,:],1).flatten()
assert(len(spectrum) == len(wavelength))
if plot:
fi, ax = plt.subplots(1)
unit = 10.0
ax.plot(wavelength/unit, spectrum*unit)
ax.set_xlabel('Wavelength (nm)')
ax.set_ylabel('Flux (photons/nm/exposure)')
self.speak("saving median spectrum to")
filename = os.path.join(self.directory, 'medianSpectrum.npy')
self.speak(filename)
np.save(filename, (wavelength, spectrum))
def makeLCs(self,binsize=250, remake=False):
'''Wrapper to go from extracted spectra to binned, multiwavelength lightcurves.'''
self.populate()
# make (and save) and mean spectrum, for plotting comparisons at later steps
self.makeMeanSpectrum()
# pick the target star
target = self.target
comparisons = self.obs.comparisons
# bin the cube into manageable wavelength bins
self.createBins(binsize=binsize)
# use comparison star(s) to divide out flux losses
self.correctBins()
# setup
nStars, nTimes, nWaves = self.binned_cubes['corrected'].shape
bw = self.bin_centers
binsize = bw[1] - bw[0]
bin_starts = bw - binsize/2.0
bin_ends = bw + binsize/2.0
lcDirectory = os.path.join(self.directory, "chromatic{binsize:05.0f}/".format(binsize=binsize))
mkdir(lcDirectory)
lcDirectory = os.path.join(lcDirectory, 'originalLCs/')
mkdir(lcDirectory)
self.lcs = []
# loop through wavelength bins
for wave in range(nWaves):
left, right = bin_starts[wave], bin_ends[wave]
lcfilename = os.path.join(lcDirectory, '/{0:05d}to{1:05d}.lightcurve'.format(np.int(left), np.int(right)))
# is there *any* good data at this wavelength?
if self.binned_cubes['ok'][target,:,wave].any():
# determine where the target star is ok and the correction is ok (on a time-by-time basis)
ok = ((self.binned_cubes['ok'][target,:,wave] != False).flatten()*(self.binned_correction.mask[:,wave] == False).flatten())
# report what's being made
self.speak('making light curve for {0} to {1}A, with {2} good points'.format(bin_starts[wave], bin_ends[wave], np.sum(ok != False)))
# create an empty LC object
lc = astropy.table.Table()
lc['bjd'] = self.temporal['bjd']
lc['flux'] = self.binned_cubes['corrected'][target,:,wave].flatten()/np.median(self.binned_cubes['raw_counts'][target,:,wave].flatten())
lc['uncertainty'] = self.binned_cubes['uncertainty'][target,:,wave].flatten()
lc['ok'] = ok.astype(np.int)
# pull out global values
for key in ['airmass', 'rotatore']:
lc['{0}'.format(key)] = self.temporal[key]
# pull out the star-by-star (wavelength-independent) quantities
for key in ['width', 'centroid', 'shift']:
try:
lc['{0}_target'.format(key)] = self.squares[key][target]
for comparison in comparisons:
lc['{0}_star{1:02.0f}'.format(key, comparison)] = self.squares[key][comparison]
except KeyError:
self.speak("{} couldn't be found!".format(key))
# pull out the star-by-star wavelength specific values
for key in ['sky', 'peak']:
lc['{0}_target'.format(key)] = self.binned_cubes[key][target,:,wave]
for comparison in comparisons:
lc['{0}_star{1:02.0f}'.format(key, comparison)] = self.binned_cubes[key][comparison,:,wave]
# pull out the star-by-star wavelength specific values that should be measured relative to the more global values
for key in ['width', 'centroid']:
lc['d{0}_target'.format(key)] = self.binned_cubes[key][target,:,wave] - lc['{0}_target'.format(key)]
for comparison in comparisons:
lc['d{0}_star{1:02.0f}'.format(key, comparison)] = self.binned_cubes[key][comparison,:,wave] - lc['{0}_star{1:02.0f}'.format(key, comparison)]
#lc.populate(bjd, flux, uncertainty, **lc)
table = astropy.table.Table(lc)
table['bjd'].format = '.10f'
#table = table[table['ok'].astype(np.bool)]
# REMOVED TO MAKE SURE MASKING IS EASIER AT LATER STEP
table.write(lcfilename, format='ascii.fixed_width', bookend=False)
self.speak('saved light curve to')
self.speak('{0}'.format(lcfilename))
'''for key in self.binned_cubes.keys():
if key != 'corrected' and key != 'error':
dict[key+'_target'] = self.binned_cubes[key][target,ok,wave].flatten()
for comparison in self.obs.comparisons:
dict[key+'_comparison{0:02.0f}'.format(comparison)] = self.binned_cubes[key][comparison,ok,wave].flatten()
for k in keystoinclude:
if k == 'ok':
continue
if k == 'airmass':
newkey = k
else:
newkey = 'global_{0}'.format(k)
dict[k] = self.temporal[k][ok]
assert(np.isfinite(flux[ok]).all())
assert(np.sum(ok) > 0)
self.speak(bjd.flatten()[ok].size)
lc.populate(bjd[ok], flux[ok], error[ok], **dict)
#lc.plot()
self.speak(lc)
lc.save()
self.lcs.append(lc)'''
def loadLCs(self, binsize=250):
lcDirectory = os.path.join(self.directory, 'lc_binby' + ('%d' % binsize) + '/')
g = glob.glob(os.path.join(lcDirectory, 'lc_*.npy'))
wavelengths = []
lcs = []
for file in g:
lc = LC(self.obs, filename=file)
if lc.lc is not None:
wavelengths.append(lc.wavelength)
lcs.append(lc)
self.lcs = np.array(lcs)
self.lcs = self.lcs[np.argsort(wavelengths)]
return self.lcs
def imageTarget(self, title=None, vmin=None, vmax=None):
'''Make an image of the input cube.'''
if title is None:
title = self.obs.target.name + ' | ' + self.obs.night
self.speak(" Trying to image bins for " + title)
bin_centers = self.bin_centers
bin_ok = self.binned_cubes['ok']
target = self.target_raw
'''Take binned lightcurves, image them. Returns the bins with median correction subtracted.'''
plt.ion()
if vmin == None:
vmin = 0.985
if vmax == None:
vmax = 1.005
nTimes, nWaves = target.shape
fi, ax = plt.subplots(4,1, figsize=(4.5,12), sharex=True, sharey=True)
plt.subplots_adjust()
ax[0].set_title(title)
binsize = bin_centers[1] - bin_centers[0]
self.bin_starts = bin_centers - binsize/2.0
self.bin_ends = bin_centers + binsize/2.0
nBins = bin_centers.size
normalized = np.ones_like(self.target_raw)
targets = [self.target_raw, self.target_corrected, self.target_median, self.target_divided]
names = ['Raw Detected Flux', 'Divided by Comparison', 'Median Transit', 'Divided by Transit']
for i in range(len(targets)):
target = targets[i]
medianspectrum = np.median(target,0)*np.ones_like(target)
normalized = target/medianspectrum
ax[i].imshow(normalized, vmin=vmin, vmax=vmax, extent=[self.bin_starts.min(), self.bin_ends.max(), 0, nTimes], aspect='auto', cmap='gray', interpolation='nearest')
ax[i].set_ylabel(names[i])
ax[-1].set_xlabel('Wavelength (angstroms)')
ax[-1].set_xlim(self.bin_starts.min(), self.bin_ends.max())
for bin in range(nBins):
if bin_ok[self.obs.target,bin] == 0:
for a in ax:
a.axvspan(self.bin_starts[bin], self.bin_ends[bin], alpha=0.7, color='white', edgecolor=None)
plt.tight_layout(h_pad=0.0)
plt.savefig(os.path.join(self.obs.reducer.extractionDirectory, + 'divided_{0}.pdf'.format(self.obs.night)))
a = raw_input('imaging target')
def help(self):
self.speak(cubehelp)
class LC():
def __init__(self, obs, left=None, right=None, filename=None):
self.obs = obs
if filename is not None:
self.load(filename)
else:
self.left = left
self.right = right
self.setup()
def setup(self):
self.wavelength = (self.left + self.right)/2.0
self.binsize = self.right - self.left
self.filename = os.path.join(self.directory, 'lc_binby' + ('%d' % self.binsize) + '/lc_{0:05d}to{1:05d}.npy'.format(np.int(self.left), np.int(self.right)))
def populate(self, bjd, flux, error, **kwargs):
# set up the column names for the light curve record array
types = [('bjd', np.float), ('flux', np.float), ('error', np.float)]
for key in kwargs.keys():
types.append((key,np.float))
# populate the columns with data
self.lc = np.zeros(bjd.size, types)
self.lc['bjd'] = bjd
self.lc['flux'] = flux
self.lc['error'] = error
for key in kwargs.keys():
self.lc[key] = kwargs[key]
def save(self):
np.save(self.filename, self.lc)
def load(self, filename):
self.left = np.float(filename.split('lc_')[-1].split('to')[-2])
self.right = np.float(filename.split('.npy')[-2].split('to')[-1])
self.setup()
assert(self.filename == filename)
self.lc = np.load(self.filename)
def plot(self):
try:
for a in self.ax.values():
a.cla()
except:
self.cubekeystoplot = ['flux', 'raw_counts_target', 'sky_target', 'airmass', 'width_target', 'centroid_target', 'peak_target']
gs = plt.matplotlib.gridspec.GridSpec(len(self.cubekeystoplot), 1, height_ratios=[5,1,1,1,1,1,1], wspace=0, hspace=0)
self.ax = {}
for i in range(len(self.cubekeystoplot)):
key = self.cubekeystoplot[i]
try:
sharex = self.ax[self.cubekeystoplot[0]]
except:
sharex = None
self.ax[key] = plt.subplot(gs[i], sharex=sharex)
for key in self.cubekeystoplot:
self.ax[key].plot(self.lc['bjd'], self.lc[key], markersize=1, marker='o', alpha=0.3, color='black', linewidth=0)
self.ax[key].set_xlim(self.lc['bjd'].min(), self.lc['bjd'].max())
self.ax[key].set_ylabel(key)
plt.draw()
bla = raw_input(self.filename + '?')
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.compiler.jit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.compiler import jit
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
_REGISTERED_OPS = op_def_registry.get_registered_ops()
def enable_jit_nonstateful(node_def):
try:
return not _REGISTERED_OPS[node_def.op].is_stateful
except KeyError:
raise ValueError("Unregistered op being created: %s" % node_def)
class JITTest(test.TestCase):
def compute(self, use_jit, compute_fn):
random_seed.set_random_seed(1234)
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(use_jit):
r = compute_fn()
sess.run(variables.global_variables_initializer())
return (r, sess.run(r))
def testJITCreateOpsLambda(self):
"""Test several ways of customizing the compilation attribute."""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = random_ops.random_uniform((1,), seed=1)
return inputs
v_false_1_t, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
v_true_1_t, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
v_all_true_t, _ = self.compute(True, create_ops)
self.assertFalse(v_false_1_t.op.get_attr("_XlaCompile"))
v_true_1_t_sampler_op = v_true_1_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
v_all_true_t_sampler_op = v_all_true_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
self.assertFalse(v_true_1_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t_sampler_op.get_attr("_XlaCompile"))
self.assertTrue(v_true_1_t.op.get_attr("_XlaCompile"))
self.assertTrue(v_all_true_t.op.get_attr("_XlaCompile"))
# Additionally ensure that where no JIT compilation happens on the
# random_uniform op, the output values are identical to the case
# where no JIT compilation happens anywhere.
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
def testJITXlaScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
# XlaScope 0
a1 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope 1
a2 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 1
a3 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 2
a4 = constant_op.constant(1)
# XlaScope still 1, depth 1
a5 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope now 2, depth 0
a6 = constant_op.constant(1)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a3.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a4.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a5.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_2", a6.op.get_attr("_XlaScope"))
def testJITVariableSeed(self):
"""Test that the stateful initializer is not marked for compilation.
XLA does not currently support seeded initialization and XLA initializers
therefore return different values than non-XLA counterparts. Here
we ensure that if we can disable JIT compilation for the initializers and
get the same variable values as if no JIT compilation happened.
"""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable("var", (1,))
return inputs
_, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
_, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
def testDefunNoJitScope(self):
with self.session(graph=ops.Graph()):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# No enclosing jit scope so function sets its own value for _XlaScope.
self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s)
def testDefunInheritsJitScope(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
# Ensure _XlaScope is inherited from enclosing context.
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
class CompilationEnabledInGradientTest(test.TestCase):
def testCompilationInGradient(self):
with self.test_session():
x = constant_op.constant([[3.]])
y_nc = math_ops.matmul(x, x, name="not_compiled")
with jit.experimental_jit_scope():
y_c = math_ops.matmul(y_nc, y_nc, name="compiled")
x_grads = gradients.gradients([y_c], [x])[0]
operations = x.graph.get_operations()
c_grad_ops = [
op for op in operations if "gradients/compiled" in op.name]
nc_grad_ops = [
op for op in operations if "gradients/not_compiled" in op.name]
self.assertGreater(len(c_grad_ops), 0)
self.assertGreater(len(nc_grad_ops), 0)
for cg in c_grad_ops:
self.assertTrue(cg.get_attr("_XlaCompile"))
for ncg in nc_grad_ops:
with self.assertRaisesRegexp(ValueError, "[Nn]o attr named"):
ncg.get_attr("_XlaCompile")
# d/dx (x ** 4) = 4 * (x ** 3)
self.assertAllClose([[108]], x_grads.eval())
def testCompilationGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope():
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope():
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", grad_a2.op.get_attr("_XlaScope"))
def testCompilationSeparateGradientScopeNames(self):
with self.session(graph=ops.Graph()):
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 0
a1 = constant_op.constant([[1.]])
a1t = math_ops.matmul(a1, a1)
with jit.experimental_jit_scope(True, separate_compiled_gradients=True):
# XlaScope 1
a2 = constant_op.constant([[1.]])
a2t = math_ops.matmul(a2, a2)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertTrue(grad_a1.op.get_attr("_XlaCompile"))
self.assertTrue(grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1_grad_GB",
grad_a2.op.get_attr("_XlaScope"))
def testPlaysNicelyWithDefun(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with the same
# _XlaScope as the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0", grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
def testPlaysNicelyWithDefunSeparateGradientScope(self):
with self.session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True):
@function.Defun(
compiled=True, noinline=True, separate_compiled_gradients=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled.
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled, with a different
# _XlaScope from the function itself.
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_op.get_attr("_XlaScope"))
# Ensure the ops run: grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
if __name__ == "__main__":
test.main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from collections import namedtuple
import functools
import os
import tensorflow.compat.v1 as tf
import resnet_preprocessing
from tensorflow.contrib import cloud as contrib_cloud
def image_serving_input_fn():
"""Serving input fn for raw images."""
def _preprocess_image(image_bytes):
"""Preprocess a single raw image."""
image = resnet_preprocessing.preprocess_image(
image_bytes=image_bytes, is_training=False)
return image
image_bytes_list = tf.placeholder(
shape=[None],
dtype=tf.string,
)
images = tf.map_fn(
_preprocess_image, image_bytes_list, back_prop=False, dtype=tf.float32)
return tf.estimator.export.ServingInputReceiver(
images, {'image_bytes': image_bytes_list})
class ImageNetTFExampleInput(object):
"""Base class for ImageNet input_fn generator.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
num_cores: `int` for the number of TPU cores
"""
__metaclass__ = abc.ABCMeta
def __init__(self,
is_training,
use_bfloat16,
num_cores=8,
image_size=224,
transpose_input=False):
self.image_preprocessing_fn = resnet_preprocessing.preprocess_image
self.is_training = is_training
self.use_bfloat16 = use_bfloat16
self.num_cores = num_cores
self.transpose_input = transpose_input
self.image_size = image_size
def set_shapes(self, batch_size, images, labels):
"""Statically set the batch_size dimension."""
if self.transpose_input:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([None, None, None, batch_size])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
else:
images.set_shape(images.get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels.set_shape(labels.get_shape().merge_with(
tf.TensorShape([batch_size])))
return images, labels
def dataset_parser(self, value):
"""Parses an image and its label from a serialized ResNet-50 TFExample.
Args:
value: serialized string containing an ImageNet TFExample.
Returns:
Returns a tuple of (image, label) from the TFExample.
"""
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/format': tf.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label': tf.FixedLenFeature([], tf.int64, -1),
'image/class/text': tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
image_size=self.image_size,
use_bfloat16=self.use_bfloat16)
# Subtract one so that labels are in [0, 1000).
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - 1
return image, label
@abc.abstractmethod
def make_source_dataset(self):
"""Makes dataset of serialized TFExamples.
The returned dataset will contain `tf.string` tensors, but these strings are
serialized `TFExample` records that will be parsed by `dataset_parser`.
If self.is_training, the dataset should be infinite.
Returns:
A `tf.data.Dataset` object.
"""
return
def input_fn(self, params):
"""Input function which provides a single batch for train or eval.
Args:
params: `dict` of parameters passed from the `TPUEstimator`.
`params['batch_size']` is always provided and should be used as the
effective batch size.
Returns:
A `tf.data.Dataset` object.
"""
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# tf.contrib.tpu.RunConfig for details.
batch_size = params['batch_size']
dataset = self.make_source_dataset()
# Use the fused map-and-batch operation.
#
# For XLA, we must used fixed shapes. Because we repeat the source training
# dataset indefinitely, we can use `drop_remainder=True` to get fixed-size
# batches without dropping any training examples.
#
# When evaluating, `drop_remainder=True` prevents accidentally evaluating
# the same image twice by dropping the final batch if it is less than a full
# batch size. As long as this validation is done with consistent batch size,
# exactly the same images will be used.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
self.dataset_parser,
batch_size=batch_size,
num_parallel_batches=self.num_cores,
drop_remainder=True))
# Transpose for performance on TPU
if self.transpose_input:
dataset = dataset.map(
lambda images, labels: (tf.transpose(images, [1, 2, 3, 0]), labels),
num_parallel_calls=self.num_cores)
# Assign static batch size dimension
dataset = dataset.map(functools.partial(self.set_shapes, batch_size))
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
if self.is_training:
# Use tf.data's experimental_slack option to reduce CPU contention at the
# start of a step. Enable non-determinism only for training.
options = tf.data.Options()
options.experimental_slack = True
options.experimental_deterministic = False
dataset = dataset.with_options(options)
return dataset
class ImageNetInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a series of TFRecord files.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
"""
def __init__(self,
is_training,
use_bfloat16,
transpose_input,
data_dir,
image_size=224,
num_parallel_calls=64,
cache=False,
num_replicas=None,
replica=0):
"""Create an input from TFRecord files.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null') or implicitly False
then construct a null pipeline, consisting of empty images
and blank labels.
num_parallel_calls: concurrency level to use when reading data from disk.
cache: if true, fill the dataset by repeating from its cache
num_replicas: `int` for the number of model replicas this dataset should
be sharded onto, or `None` if this dataset should not be sharded.
replica: `int` for the replica that input_fn should produce data for
"""
super(ImageNetInput, self).__init__(
is_training=is_training,
image_size=image_size,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input)
self.data_dir = data_dir
# TODO(b/112427086): simplify the choice of input source
if self.data_dir == 'null' or not self.data_dir:
self.data_dir = None
self.num_parallel_calls = num_parallel_calls
self.cache = cache
self.num_replicas = num_replicas
self.replica = replica
def _get_null_input(self, data):
"""Returns a null image (all black pixels).
Args:
data: element of a dataset, ignored in this method, since it produces
the same null image regardless of the element.
Returns:
a tensor representing a null image.
"""
del data # Unused since output is constant regardless of input
return tf.zeros([self.image_size, self.image_size, 3], tf.bfloat16
if self.use_bfloat16 else tf.float32)
def dataset_parser(self, value):
"""See base class."""
if not self.data_dir:
return value, tf.constant(0, tf.int32)
return super(ImageNetInput, self).dataset_parser(value)
def make_source_dataset(self):
"""See base class."""
if not self.data_dir:
tf.logging.info('Undefined data_dir implies null input')
return tf.data.Dataset.range(1).repeat().map(self._get_null_input)
# Shuffle the filenames to ensure better randomization.
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training)
# Shard the data into `num_replicas` parts, get the part for `replica`
if self.num_replicas:
dataset = dataset.shard(self.num_replicas, self.replica)
if self.is_training and not self.cache:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.interleave(
fetch_dataset,
cycle_length=self.num_parallel_calls,
num_parallel_calls=self.num_parallel_calls)
if self.cache:
dataset = dataset.cache().shuffle(1024 * 16).repeat()
else:
dataset = dataset.shuffle(1024)
return dataset
# Defines a selection of data from a Cloud Bigtable.
BigtableSelection = namedtuple('BigtableSelection',
['project',
'instance',
'table',
'prefix',
'column_family',
'column_qualifier'])
class ImageNetBigtableInput(ImageNetTFExampleInput):
"""Generates ImageNet input_fn from a Bigtable for training or evaluation.
"""
def __init__(self, is_training, use_bfloat16, transpose_input, selection):
"""Constructs an ImageNet input from a BigtableSelection.
Args:
is_training: `bool` for whether the input is for training
use_bfloat16: If True, use bfloat16 precision; else use float32.
transpose_input: 'bool' for whether to use the double transpose trick
selection: a BigtableSelection specifying a part of a Bigtable.
"""
super(ImageNetBigtableInput, self).__init__(
is_training=is_training,
use_bfloat16=use_bfloat16,
transpose_input=transpose_input)
self.selection = selection
def make_source_dataset(self):
"""See base class."""
data = self.selection
client = contrib_cloud.BigtableClient(data.project, data.instance)
table = client.table(data.table)
ds = table.parallel_scan_prefix(data.prefix,
columns=[(data.column_family,
data.column_qualifier)])
# The Bigtable datasets will have the shape (row_key, data)
ds_data = ds.map(lambda index, data: data)
if self.is_training:
ds_data = ds_data.repeat()
return ds_data
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'IndustryProfile.create_date'
db.add_column(u'account_industryprofile', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'IndustryProfile.write_date'
db.add_column(u'account_industryprofile', 'write_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'UserProfile.create_date'
db.add_column(u'account_userprofile', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'UserProfile.write_date'
db.add_column(u'account_userprofile', 'write_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'TrainingProfile.create_date'
db.add_column(u'account_trainingprofile', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'TrainingProfile.write_date'
db.add_column(u'account_trainingprofile', 'write_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'GovernmentProfile.create_date'
db.add_column(u'account_governmentprofile', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'GovernmentProfile.write_date'
db.add_column(u'account_governmentprofile', 'write_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'StudentProfile.create_date'
db.add_column(u'account_studentprofile', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
# Adding field 'StudentProfile.write_date'
db.add_column(u'account_studentprofile', 'write_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 12, 31, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'IndustryProfile.create_date'
db.delete_column(u'account_industryprofile', 'create_date')
# Deleting field 'IndustryProfile.write_date'
db.delete_column(u'account_industryprofile', 'write_date')
# Deleting field 'UserProfile.create_date'
db.delete_column(u'account_userprofile', 'create_date')
# Deleting field 'UserProfile.write_date'
db.delete_column(u'account_userprofile', 'write_date')
# Deleting field 'TrainingProfile.create_date'
db.delete_column(u'account_trainingprofile', 'create_date')
# Deleting field 'TrainingProfile.write_date'
db.delete_column(u'account_trainingprofile', 'write_date')
# Deleting field 'GovernmentProfile.create_date'
db.delete_column(u'account_governmentprofile', 'create_date')
# Deleting field 'GovernmentProfile.write_date'
db.delete_column(u'account_governmentprofile', 'write_date')
# Deleting field 'StudentProfile.create_date'
db.delete_column(u'account_studentprofile', 'create_date')
# Deleting field 'StudentProfile.write_date'
db.delete_column(u'account_studentprofile', 'write_date')
models = {
u'account.emailaddress': {
'Meta': {'object_name': 'EmailAddress'},
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'account.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email_address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['account.EmailAddress']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'account.governmentprofile': {
'Meta': {'object_name': 'GovernmentProfile'},
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'department_type': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['account.UserProfile']", 'unique': 'True'}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.industryprofile': {
'Meta': {'object_name': 'IndustryProfile'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['admin.Company']", 'null': 'True', 'blank': 'True'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'est_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sub_sector': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user_profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['account.UserProfile']", 'unique': 'True'}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.studentprofile': {
'Meta': {'object_name': 'StudentProfile'},
'address_line1': ('django.db.models.fields.TextField', [], {}),
'address_line2': ('django.db.models.fields.TextField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_company': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'educational_background': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'experience': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'functional_area': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry_belongs_to': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'key_skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['admin.OccupationalStandard']", 'symmetrical': 'False'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'user_profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['account.UserProfile']", 'unique': 'True'}),
'work_status': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.trainingprofile': {
'Meta': {'object_name': 'TrainingProfile'},
'area_of_specialization': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'est_year': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['account.UserProfile']", 'unique': 'True'}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'admin.company': {
'Meta': {'object_name': 'Company'},
'company_type': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '3'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'nasscom_membership_number': ('django.db.models.fields.CharField', [], {'default': "'N/A'", 'max_length': '20'}),
'training_provider': ('django.db.models.fields.CharField', [], {'default': "'NO'", 'max_length': '3'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '100'}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'admin.occupationalstandard': {
'Meta': {'unique_together': "(('code', 'version'),)", 'object_name': 'OccupationalStandard'},
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'code': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '9', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': 'None'}),
'drafted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'knowledge': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'last_reviewed_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'next_review_on': ('django.db.models.fields.DateField', [], {}),
'performace_criteria': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'scope': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'skills': ('tinymce.models.HTMLField', [], {'default': 'None'}),
'sub_sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.SubSector']"}),
'title': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '8', 'db_index': 'True'})
},
'admin.sector': {
'Meta': {'object_name': 'Sector', 'index_together': "[['name']]"},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'unique': 'True', 'max_length': '9', 'db_index': 'True'}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'admin.subsector': {
'Meta': {'unique_together': "(('sector', 'name'),)", 'object_name': 'SubSector', 'index_together': "[['name', 'sector']]"},
'career_guide': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobility_map': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'db_index': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['admin.Sector']"}),
'write_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['account']
|
|
#
# Collective Knowledge
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
import os
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# Add module
def add(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - normally should be 'module' already
data_uoa - UOA of the module to be created
(desc) - module description
(license) - module license
(copyright) - module copyright
(developer) - module developer
(developer_email) - module developer
(developer_webpage) - module developer
(actions) - dict with actions {"func1":{}, "func2":{} ...}
(dict) - other meta description to add to entry
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
Output of the 'add' kernel function
}
"""
# Check if global writing is allowed
r=ck.check_writing({'module_uoa':work['self_module_uoa']})
if r['return']>0: return r
o=i.get('out','')
# Find path to module 'module' to get dummies
r=ck.access({'action':'load',
'module_uoa':work['self_module_uoa'],
'data_uoa':work['self_module_uoa'],
'common_func':'yes'})
if r['return']>0: return r
p=r['path']
pm=os.path.join(p,cfg['dummy_module'])
pma=os.path.join(p,cfg['dummy_module_action'])
# Load module dummy
r=ck.load_text_file({'text_file':pm})
if r['return']>0: return r
spm=r['string']
# Load module action dummy
r=ck.load_text_file({'text_file':pma})
if r['return']>0: return r
spma=r['string']
# Prepare meta description
desc=i.get('desc','')
license=i.get('license','')
copyright=i.get('copyright','')
developer=i.get('developer','')
developer_email=i.get('developer_email','')
developer_webpage=i.get('developer_webpage','')
actions=i.get('actions',{})
# If console mode, ask some questions
if o=='con':
if desc=='':
r=ck.inp({'text':'Add brief module description: '})
desc=r['string']
ck.out('')
if license=='':
r=ck.inp({'text':'Add brief module license (or Enter to use "'+ck.cfg['default_license']+'"): '})
license=r['string']
if license=='': license=ck.cfg['default_license']
if copyright=='':
r=ck.inp({'text':'Add brief module copyright (or Enter to use "'+ck.cfg['default_copyright']+'"): '})
copyright=r['string']
if copyright=='': copyright=ck.cfg['default_copyright']
ck.out('')
if developer=='':
r=ck.inp({'text':'Add module\'s developer (or Enter to use "'+ck.cfg['default_developer']+'"): '})
developer=r['string']
if developer=='': developer=ck.cfg['default_developer']
if developer_email=='':
r=ck.inp({'text':'Add module\'s developer email (or Enter to use "'+ck.cfg['default_developer_email']+'"): '})
developer_email=r['string']
if developer_email=='': developer_email=ck.cfg['default_developer_email']
if developer_webpage=='':
r=ck.inp({'text':'Add module\'s developer webpage (or Enter to use "'+ck.cfg['default_developer_webpage']+'"): '})
developer_webpage=r['string']
if developer_webpage=='': developer_webpage=ck.cfg['default_developer_webpage']
if len(actions)==0:
act='*'
while act!='':
ck.out('')
r=ck.inp({'text':'Add action function (or Enter to stop): '})
act=r['string']
if act!='':
actions[act]={}
r1=ck.inp({'text':'Support web (y/N): '})
x=r1['string'].lower()
if x=='yes' or x=='y':
fweb='yes'
actions[act]['for_web']=fweb
r1=ck.inp({'text':'Add action description: '})
adesc=r1['string']
if adesc!='':
actions[act]['desc']=adesc
ck.out('')
# Prepare meta description
dd={}
if desc!='':
dd['desc']=desc
spm=spm.replace('$#desc#$', desc)
if license!='':
dd['license']=license
spm=spm.replace('$#license#$', license)
if copyright!='':
dd['copyright']=copyright
spm=spm.replace('$#copyright#$', copyright)
dev=''
if developer!='':
dev=developer
dd['developer']=developer
if developer_email!='':
if dev!='': dev+=', '
dev+=developer_email
dd['developer_email']=developer_email
if developer_webpage!='':
if dev!='': dev+=', '
dev+=developer_webpage
dd['developer_webpage']=developer_webpage
if dev!='':
spm=spm.replace('$#developer#$', dev)
dd['actions']=actions
# Substitute actions
for act in actions:
adesc=actions[act].get('desc','TBD: action description')
spm+='\n'+spma.replace('$#action#$', act).replace('$#desc#$',adesc)
dx=i.get('dict',{})
r=ck.merge_dicts({'dict1':dx, 'dict2':dd})
if r['return']>0: return r
# Add entry (it will ask further questions about alias and user-friendly name)
i['common_func']='yes'
i['dict']=dx
i['sort_keys']='yes'
r=ck.access(i)
if r['return']>0: return r
# Add module code
p=r['path']
pf=os.path.join(p, ck.cfg['module_full_code_name'])
if o=='con':
ck.out('')
ck.out('Creating module code '+pf+' ...')
# Write module code
rx=ck.save_text_file({'text_file':pf, 'string':spm})
if rx['return']>0: return rx
return r
##############################################################################
# show info about modules
def show(i):
"""
Input: {
(the same as list; can use wildcards)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
html=False
if o=='html' or i.get('web','')=='yes':
html=True
h=''
unique_repo=False
if i.get('repo_uoa','')!='': unique_repo=True
import copy
ii=copy.deepcopy(i)
ii['out']=''
ii['action']='list'
ii['add_meta']='yes'
rx=ck.access(ii)
if rx['return']>0: return rx
ll=sorted(rx['lst'], key=lambda k: k['data_uoa'])
if html:
h+='<i><b>Note:</b> you can obtain JSON API of a given action of a given module in CMD via "ck <action> <module> --help"</i><br><br>\n'
h+='<table cellpadding="5">\n'
h+=' <tr>\n'
h+=' <td><b>CK module (aka wrapper, plugin or container):</b></td>\n'
h+=' <td width="200"><b>CK Repository:</b></td>\n'
h+=' <td><b>Description and actions:</b></td>\n'
h+=' </tr>\n'
repo_url={}
for l in ll:
ln=l['data_uoa']
lr=l['repo_uoa']
lr_uid=l['repo_uid']
url=''
if lr=='default':
url='' #'http://github.com/ctuning/ck'
elif lr_uid in repo_url:
url=repo_url[lr_uid]
else:
rx=ck.load_repo_info_from_cache({'repo_uoa':lr_uid})
if rx['return']>0: return rx
url=rx.get('dict',{}).get('url','')
repo_url[lr_uid]=url
if lr not in cfg['skip_repos']:
lm=l['meta']
ld=lm.get('desc','')
actions=lm.get('actions',{})
###############################################################
if html:
h+=' <tr>\n'
h+=' <td valign="top"><b>'+ln+'</b></td>\n'
x1=''
x2=''
if url!='':
x1='<a href="'+url+'">'
x2='</a>'
h+=' <td valign="top"><i>'+x1+lr+x2+'</i></td>\n'
h+=' <td valign="top">'+ld+'\n'
if len(actions)>0:
h+='<ul>\n'
for q in sorted(actions):
qq=actions[q]
qd=qq.get('desc','')
h+='<li><i>'+q+'</i>'
if qd!='':
h+=' - '+qd
h+='</ul>\n'
h+='</td>\n'
h+=' </tr>\n'
###############################################################
elif o=='mediawiki':
x=lr
if url!='':
x='['+url+' '+lr+']'
ck.out('* \'\'\''+ln+'\'\'\' ('+x+') - '+ld)
if len(actions)>0:
for q in sorted(actions):
qq=actions[q]
qd=qq.get('desc','')
ck.out('** \'\''+q+'\'\' - '+qd)
###############################################################
elif o=='con' or o=='txt':
if unique_repo:
ck.out('')
s=ln+' - '+ld
else:
ss=''
if len(ln)<35: ss=' '*(35-len(ln))
ss1=''
if len(lr)<30: ss1=' '*(30-len(lr))
s=ln+ss+' ('+lr+')'
if ld!='': s+=ss1+' '+ld
ck.out(s)
if len(actions)>0:
ck.out('')
for q in sorted(actions):
qq=actions[q]
qd=qq.get('desc','')
ck.out(' * '+q+' - '+qd)
if html:
h+='</table>\n'
return {'return':0, 'html':h}
|
|
"""
Tests for transformer objects.
"""
from __future__ import division
from __future__ import unicode_literals
from deepchem.molnet import load_delaney
from deepchem.trans.transformers import FeaturizationTransformer
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import unittest
import numpy as np
import pandas as pd
import deepchem as dc
class TestTransformers(unittest.TestCase):
"""
Test top-level API for transformer objects.
"""
def setUp(self):
super(TestTransformers, self).setUp()
self.current_dir = os.path.dirname(os.path.abspath(__file__))
def test_y_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t, np.log(y + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_transform_unlabelled(self):
ul_dataset = dc.data.tests.load_unlabelled_data()
# transforming y should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_y=True).transform(ul_dataset)
# transforming w should raise an exception
with self.assertRaises(ValueError) as context:
dc.trans.transformers.Transformer(transform_w=True).transform(ul_dataset)
# transforming X should be okay
dc.trans.NormalizationTransformer(
transform_X=True, dataset=ul_dataset).transform(ul_dataset)
def test_X_log_transformer(self):
"""Tests logarithmic data transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
log_transformer = dc.trans.LogTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = log_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t, np.log(X + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_log_transformer_select(self):
"""Tests logarithmic data transformer with selection."""
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
tid = []
tasklist = ["task0", "task3", "task4", "task5"]
first_task = "task0"
for task in tasklist:
tiid = dfe.columns.get_loc(task) - dfe.columns.get_loc(first_task)
tid = np.concatenate((tid, np.array([tiid])))
tasks = tid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_y=True, tasks=tasks, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(y_t[:, tasks], np.log(y[:, tasks] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(y_t), y)
def test_X_log_transformer_select(self):
# Tests logarithmic data transformer with selection.
multitask_dataset = dc.data.tests.load_feat_multitask_data()
dfe = pd.read_csv(
os.path.join(self.current_dir,
"../../models/tests/feat_multitask_example.csv"))
fid = []
featurelist = ["feat0", "feat1", "feat2", "feat3", "feat5"]
first_feature = "feat0"
for feature in featurelist:
fiid = dfe.columns.get_loc(feature) - dfe.columns.get_loc(first_feature)
fid = np.concatenate((fid, np.array([fiid])))
features = fid.astype(int)
log_transformer = dc.trans.LogTransformer(
transform_X=True, features=features, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = log_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now a logarithmic version of itself
np.testing.assert_allclose(X_t[:, features], np.log(X[:, features] + 1))
# Check that untransform does the right thing.
np.testing.assert_allclose(log_transformer.untransform(X_t), X)
def test_y_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_y=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that y_t has zero mean, unit std.
assert np.isclose(y_t.mean(), 0.)
assert np.isclose(y_t.std(), 1.)
# Check that untransform does the right thing.
np.testing.assert_allclose(normalization_transformer.untransform(y_t), y)
def test_X_normalization_transformer(self):
"""Tests normalization transformer."""
solubility_dataset = dc.data.tests.load_solubility_data()
normalization_transformer = dc.trans.NormalizationTransformer(
transform_X=True, dataset=solubility_dataset)
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = normalization_transformer.transform(solubility_dataset)
X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is a X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check that X_t has zero mean, unit std.
# np.set_printoptions(threshold='nan')
mean = X_t.mean(axis=0)
assert np.amax(np.abs(mean - np.zeros_like(mean))) < 1e-7
orig_std_array = X.std(axis=0)
std_array = X_t.std(axis=0)
# Entries with zero std are not normalized
for orig_std, std in zip(orig_std_array, std_array):
if not np.isclose(orig_std, 0):
assert np.isclose(std, 1)
# TODO(rbharath): Untransform doesn't work properly for binary feature
# vectors. Need to figure out what's wrong here. (low priority)
## Check that untransform does the right thing.
# np.testing.assert_allclose(normalization_transformer.untransform(X_t), X)
def test_cdf_X_transformer(self):
"""Test CDF transformer on Gaussian normal dataset."""
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_X=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
sorted = np.sort(X_t, axis=0)
np.testing.assert_allclose(sorted, target)
def test_cdf_y_transformer(self):
# Test CDF transformer on Gaussian normal dataset.
target = np.array(np.transpose(np.linspace(0., 1., 1001)))
target = np.transpose(np.array(np.append([target], [target], axis=0)))
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
bins = 1001
cdf_transformer = dc.trans.CDFTransformer(
transform_y=True, dataset=gaussian_dataset, bins=bins)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset = cdf_transformer.transform(gaussian_dataset, bins=bins)
X_t, y_t, w_t, ids_t = (gaussian_dataset.X, gaussian_dataset.y,
gaussian_dataset.w, gaussian_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
sorted = np.sort(y_t, axis=0)
np.testing.assert_allclose(sorted, target)
# Check that untransform does the right thing.
np.testing.assert_allclose(cdf_transformer.untransform(y_t), y)
def test_clipping_X_transformer(self):
"""Test clipping transformer on X of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.ones((n_samples, n_features))
target = 5. * X
X *= 6.
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_X=True, x_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values when sorted.
np.testing.assert_allclose(X_t, target)
def test_clipping_y_transformer(self):
"""Test clipping transformer on y of singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.zeros((n_samples, n_features))
y = np.ones((n_samples, n_tasks))
target = 5. * y
y *= 6.
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
transformer = dc.trans.ClippingTransformer(transform_y=True, y_max=5.)
clipped_dataset = transformer.transform(dataset)
X_t, y_t, w_t, ids_t = (clipped_dataset.X, clipped_dataset.y,
clipped_dataset.w, clipped_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a y transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is a y transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values when sorted.
np.testing.assert_allclose(y_t, target)
def test_power_X_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_X=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check y is unchanged since this is an X transformer
np.testing.assert_allclose(y, y_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check X is now holding the proper values in each column.
np.testing.assert_allclose(X_t.shape[1], len(powers) * X.shape[1])
np.testing.assert_allclose(X, X_t[:, :2])
np.testing.assert_allclose(np.power(X, 2), X_t[:, 2:4])
np.testing.assert_allclose(np.power(X, 0.5), X_t[:, 4:])
def test_power_y_transformer(self):
"""Test Power transformer on Gaussian normal dataset."""
gaussian_dataset = dc.data.tests.load_gaussian_cdf_data()
powers = [1, 2, 0.5]
power_transformer = dc.trans.PowerTransformer(
transform_y=True, powers=powers)
X, y, w, ids = (gaussian_dataset.X, gaussian_dataset.y, gaussian_dataset.w,
gaussian_dataset.ids)
gaussian_dataset2 = power_transformer.transform(gaussian_dataset)
X_t, y_t, w_t, ids_t = (gaussian_dataset2.X, gaussian_dataset2.y,
gaussian_dataset2.w, gaussian_dataset2.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is an X transformer
np.testing.assert_allclose(X, X_t)
# Check w is unchanged since this is an X transformer
np.testing.assert_allclose(w, w_t)
# Check y is now holding the proper values in each column.
np.testing.assert_allclose(y_t.shape[1], len(powers) * y.shape[1])
np.testing.assert_allclose(y, y_t[:, :2])
np.testing.assert_allclose(np.power(y, 2), y_t[:, 2:4])
np.testing.assert_allclose(np.power(y, 0.5), y_t[:, 4:])
# Check that untransform does the right thing.
np.testing.assert_allclose(power_transformer.untransform(y_t), y)
def test_singletask_balancing_transformer(self):
"""Test balancing transformer on single-task dataset."""
classification_dataset = dc.data.tests.load_classification_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=classification_dataset)
X, y, w, ids = (classification_dataset.X, classification_dataset.y,
classification_dataset.w, classification_dataset.ids)
classification_dataset = balancing_transformer.transform(
classification_dataset)
X_t, y_t, w_t, ids_t = (classification_dataset.X, classification_dataset.y,
classification_dataset.w,
classification_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(classification_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_multitask_balancing_transformer(self):
"""Test balancing transformer on multitask dataset."""
multitask_dataset = dc.data.tests.load_multitask_data()
balancing_transformer = dc.trans.BalancingTransformer(
transform_w=True, dataset=multitask_dataset)
X, y, w, ids = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
multitask_dataset = balancing_transformer.transform(multitask_dataset)
X_t, y_t, w_t, ids_t = (multitask_dataset.X, multitask_dataset.y,
multitask_dataset.w, multitask_dataset.ids)
# Check ids are unchanged.
for id_elt, id_t_elt in zip(ids, ids_t):
assert id_elt == id_t_elt
# Check X is unchanged since this is a w transformer
np.testing.assert_allclose(X, X_t)
# Check y is unchanged since this is a w transformer
np.testing.assert_allclose(y, y_t)
for ind, task in enumerate(multitask_dataset.get_task_names()):
y_task = y_t[:, ind]
w_task = w_t[:, ind]
w_orig_task = w[:, ind]
# Assert that entries with zero weight retain zero weight
np.testing.assert_allclose(w_task[w_orig_task == 0],
np.zeros_like(w_task[w_orig_task == 0]))
# Check that sum of 0s equals sum of 1s in transformed for each task
assert np.isclose(
np.sum(w_task[y_task == 0]), np.sum(w_task[y_task == 1]))
def test_coulomb_fit_transformer(self):
"""Test coulomb fit transformer on singletask dataset."""
n_samples = 10
n_features = 3
n_tasks = 1
ids = np.arange(n_samples)
X = np.random.rand(n_samples, n_features, n_features)
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids)
fit_transformer = dc.trans.CoulombFitTransformer(dataset)
X_t = fit_transformer.X_transform(dataset.X)
assert len(X_t.shape) == 2
def test_IRV_transformer(self):
n_features = 128
n_samples = 20
test_samples = 5
n_tasks = 2
X = np.random.randint(2, size=(n_samples, n_features))
y = np.zeros((n_samples, n_tasks))
w = np.ones((n_samples, n_tasks))
dataset = dc.data.NumpyDataset(X, y, w, ids=None)
X_test = np.random.randint(2, size=(test_samples, n_features))
y_test = np.zeros((test_samples, n_tasks))
w_test = np.ones((test_samples, n_tasks))
test_dataset = dc.data.NumpyDataset(X_test, y_test, w_test, ids=None)
sims = np.sum(
X_test[0, :] * X, axis=1, dtype=float) / np.sum(
np.sign(X_test[0, :] + X), axis=1, dtype=float)
sims = sorted(sims, reverse=True)
IRV_transformer = dc.trans.IRVTransformer(10, n_tasks, dataset)
test_dataset_trans = IRV_transformer.transform(test_dataset)
dataset_trans = IRV_transformer.transform(dataset)
assert test_dataset_trans.X.shape == (test_samples, 20 * n_tasks)
assert np.allclose(test_dataset_trans.X[0, :10], sims[:10])
assert np.allclose(test_dataset_trans.X[0, 10:20], [0] * 10)
assert not np.isclose(dataset_trans.X[0, 0], 1.)
def test_featurization_transformer(self):
fp_size = 2048
tasks, all_dataset, transformers = load_delaney('Raw')
train = all_dataset[0]
transformer = FeaturizationTransformer(
transform_X=True,
dataset=train,
featurizer=dc.feat.CircularFingerprint(size=fp_size))
new_train = transformer.transform(train)
self.assertEqual(new_train.y.shape, train.y.shape)
self.assertEqual(new_train.X.shape[-1], fp_size)
|
|
from unittest import TestCase
from ..utils import FieldsParameterParseError, parse_boolean, parse_fields_parameter
class TestParseFieldsParameter(TestCase):
# GOOD STUFF
def test_valid_single_field(self):
parsed = parse_fields_parameter('test')
self.assertEqual(parsed, [
('test', False, None),
])
def test_valid_multiple_fields(self):
parsed = parse_fields_parameter('test,another_test')
self.assertEqual(parsed, [
('test', False, None),
('another_test', False, None),
])
def test_valid_negated_field(self):
parsed = parse_fields_parameter('-test')
self.assertEqual(parsed, [
('test', True, None),
])
def test_valid_nested_fields(self):
parsed = parse_fields_parameter('test(foo,bar)')
self.assertEqual(parsed, [
('test', False, [
('foo', False, None),
('bar', False, None),
]),
])
def test_valid_star_field(self):
parsed = parse_fields_parameter('*,-test')
self.assertEqual(parsed, [
('*', False, None),
('test', True, None),
])
def test_valid_star_with_additional_field(self):
# Note: '*,test' is not allowed but '*,test(foo)' is
parsed = parse_fields_parameter('*,test(foo)')
self.assertEqual(parsed, [
('*', False, None),
('test', False, [
('foo', False, None),
]),
])
def test_valid_underscore_field(self):
parsed = parse_fields_parameter('_,test')
self.assertEqual(parsed, [
('_', False, None),
('test', False, None),
])
def test_valid_field_with_underscore_in_middle(self):
parsed = parse_fields_parameter('a_test')
self.assertEqual(parsed, [
('a_test', False, None),
])
def test_valid_negated_field_with_underscore_in_middle(self):
parsed = parse_fields_parameter('-a_test')
self.assertEqual(parsed, [
('a_test', True, None),
])
def test_valid_field_with_underscore_at_beginning(self):
parsed = parse_fields_parameter('_test')
self.assertEqual(parsed, [
('_test', False, None),
])
def test_valid_field_with_underscore_at_end(self):
parsed = parse_fields_parameter('test_')
self.assertEqual(parsed, [
('test_', False, None),
])
# BAD STUFF
def test_invalid_char(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test#')
self.assertEqual(str(e.exception), "unexpected char '#' at position 4")
def test_invalid_whitespace_before_identifier(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter(' test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 0")
def test_invalid_whitespace_after_identifier(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test ')
self.assertEqual(str(e.exception), "unexpected whitespace at position 4")
def test_invalid_whitespace_after_comma(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test, test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 5")
def test_invalid_whitespace_before_comma(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test ,test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 4")
def test_invalid_unexpected_negation_operator(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test-')
self.assertEqual(str(e.exception), "unexpected char '-' at position 4")
def test_invalid_unexpected_open_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,(foo)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 5")
def test_invalid_unexpected_close_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test)')
self.assertEqual(str(e.exception), "unexpected char ')' at position 4")
def test_invalid_unexpected_comma_in_middle(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,,foo')
self.assertEqual(str(e.exception), "unexpected char ',' at position 5")
def test_invalid_unexpected_comma_at_end(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,foo,')
self.assertEqual(str(e.exception), "unexpected char ',' at position 9")
def test_invalid_unclosed_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test(foo')
self.assertEqual(str(e.exception), "unexpected end of input (did you miss out a close bracket?)")
def test_invalid_subfields_on_negated_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-test(foo)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 5")
def test_invalid_star_field_in_wrong_position(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,*')
self.assertEqual(str(e.exception), "'*' must be in the first position")
def test_invalid_negated_star(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-*')
self.assertEqual(str(e.exception), "'*' cannot be negated")
def test_invalid_star_with_nesting(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*(foo,bar)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 1")
def test_invalid_star_with_chars_after(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*foo')
self.assertEqual(str(e.exception), "unexpected char 'f' at position 1")
def test_invalid_star_with_chars_before(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('foo*')
self.assertEqual(str(e.exception), "unexpected char '*' at position 3")
def test_invalid_star_with_additional_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*,foo')
self.assertEqual(str(e.exception), "additional fields with '*' doesn't make sense")
def test_invalid_underscore_in_wrong_position(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,_')
self.assertEqual(str(e.exception), "'_' must be in the first position")
def test_invalid_negated_underscore(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-_')
self.assertEqual(str(e.exception), "'_' cannot be negated")
def test_invalid_underscore_with_nesting(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('_(foo,bar)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 1")
def test_invalid_underscore_with_negated_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('_,-foo')
self.assertEqual(str(e.exception), "negated fields with '_' doesn't make sense")
def test_invalid_star_and_underscore(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*,_')
self.assertEqual(str(e.exception), "'_' must be in the first position")
class TestParseBoolean(TestCase):
# GOOD STUFF
def test_valid_true(self):
parsed = parse_boolean('true')
self.assertEqual(parsed, True)
def test_valid_false(self):
parsed = parse_boolean('false')
self.assertEqual(parsed, False)
def test_valid_1(self):
parsed = parse_boolean('1')
self.assertEqual(parsed, True)
def test_valid_0(self):
parsed = parse_boolean('0')
self.assertEqual(parsed, False)
# BAD STUFF
def test_invalid(self):
with self.assertRaises(ValueError) as e:
parse_boolean('foo')
self.assertEqual(str(e.exception), "expected 'true' or 'false', got 'foo'")
def test_invalid_integer(self):
with self.assertRaises(ValueError) as e:
parse_boolean('2')
self.assertEqual(str(e.exception), "expected 'true' or 'false', got '2'")
|
|
"""Session implementation for CherryPy.
You need to edit your config file to use sessions. Here's an example::
[/]
tools.sessions.on = True
tools.sessions.storage_class = cherrypy.lib.sessions.FileSession
tools.sessions.storage_path = "/home/site/sessions"
tools.sessions.timeout = 60
This sets the session to be stored in files in the directory
/home/site/sessions, and the session timeout to 60 minutes. If you omit
``storage_class``, the sessions will be saved in RAM.
``tools.sessions.on`` is the only required line for working sessions,
the rest are optional.
By default, the session ID is passed in a cookie, so the client's browser must
have cookies enabled for your site.
To set data for the current session, use
``cherrypy.session['fieldname'] = 'fieldvalue'``;
to get data use ``cherrypy.session.get('fieldname')``.
================
Locking sessions
================
By default, the ``'locking'`` mode of sessions is ``'implicit'``, which means
the session is locked early and unlocked late. Be mindful of this default mode
for any requests that take a long time to process (streaming responses,
expensive calculations, database lookups, API calls, etc), as other concurrent
requests that also utilize sessions will hang until the session is unlocked.
If you want to control when the session data is locked and unlocked,
set ``tools.sessions.locking = 'explicit'``. Then call
``cherrypy.session.acquire_lock()`` and ``cherrypy.session.release_lock()``.
Regardless of which mode you use, the session is guaranteed to be unlocked when
the request is complete.
=================
Expiring Sessions
=================
You can force a session to expire with :func:`cherrypy.lib.sessions.expire`.
Simply call that function at the point you want the session to expire, and it
will cause the session cookie to expire client-side.
===========================
Session Fixation Protection
===========================
If CherryPy receives, via a request cookie, a session id that it does not
recognize, it will reject that id and create a new one to return in the
response cookie. This `helps prevent session fixation attacks
<http://en.wikipedia.org/wiki/Session_fixation#Regenerate_SID_on_each_request>`_.
However, CherryPy "recognizes" a session id by looking up the saved session
data for that id. Therefore, if you never save any session data,
**you will get a new session id for every request**.
A side effect of CherryPy overwriting unrecognised session ids is that if you
have multiple, separate CherryPy applications running on a single domain (e.g.
on different ports), each app will overwrite the other's session id because by
default they use the same cookie name (``"session_id"``) but do not recognise
each others sessions. It is therefore a good idea to use a different name for
each, for example::
[/]
...
tools.sessions.name = "my_app_session_id"
================
Sharing Sessions
================
If you run multiple instances of CherryPy (for example via mod_python behind
Apache prefork), you most likely cannot use the RAM session backend, since each
instance of CherryPy will have its own memory space. Use a different backend
instead, and verify that all instances are pointing at the same file or db
location. Alternately, you might try a load balancer which makes sessions
"sticky". Google is your friend, there.
================
Expiration Dates
================
The response cookie will possess an expiration date to inform the client at
which point to stop sending the cookie back in requests. If the server time
and client time differ, expect sessions to be unreliable. **Make sure the
system time of your server is accurate**.
CherryPy defaults to a 60-minute session timeout, which also applies to the
cookie which is sent to the client. Unfortunately, some versions of Safari
("4 public beta" on Windows XP at least) appear to have a bug in their parsing
of the GMT expiration date--they appear to interpret the date as one hour in
the past. Sixty minutes minus one hour is pretty close to zero, so you may
experience this bug as a new session id for every request, unless the requests
are less than one second apart. To fix, try increasing the session.timeout.
On the other extreme, some users report Firefox sending cookies after their
expiration date, although this was on a system with an inaccurate system time.
Maybe FF doesn't trust system time.
"""
import sys
import datetime
import os
import time
import threading
import binascii
import pickle
import zc.lockfile
import cherrypy
from cherrypy.lib import httputil
from cherrypy.lib import locking
from cherrypy.lib import is_iterator
missing = object()
class Session(object):
"""A CherryPy dict-like Session object (one per request)."""
_id = None
id_observers = None
"A list of callbacks to which to pass new id's."
@property
def id(self):
"""Return the current session id."""
return self._id
@id.setter
def id(self, value):
self._id = value
for o in self.id_observers:
o(value)
timeout = 60
'Number of minutes after which to delete session data.'
locked = False
"""
If True, this session instance has exclusive read/write access
to session data."""
loaded = False
"""
If True, data has been retrieved from storage. This should happen
automatically on the first attempt to access session data."""
clean_thread = None
'Class-level Monitor which calls self.clean_up.'
clean_freq = 5
'The poll rate for expired session cleanup in minutes.'
originalid = None
'The session id passed by the client. May be missing or unsafe.'
missing = False
'True if the session requested by the client did not exist.'
regenerated = False
"""
True if the application called session.regenerate(). This is not set by
internal calls to regenerate the session id."""
debug = False
'If True, log debug information.'
# --------------------- Session management methods --------------------- #
def __init__(self, id=None, **kwargs):
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if self._exists():
if self.debug:
cherrypy.log('Set id to %s.' % id, 'TOOLS.SESSIONS')
else:
if self.debug:
cherrypy.log('Expired or malicious session %r; '
'making a new one' % id, 'TOOLS.SESSIONS')
# Expired or malicious session. Make a new one.
# See https://github.com/cherrypy/cherrypy/issues/709.
self.id = None
self.missing = True
self._regenerate()
def now(self):
"""Generate the session specific concept of 'now'.
Other session providers can override this to use alternative,
possibly timezone aware, versions of 'now'.
"""
return datetime.datetime.now()
def regenerate(self):
"""Replace the current session (with a new id)."""
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
if self.debug:
cherrypy.log(
'Deleting the existing session %r before '
'regeneration.' % self.id,
'TOOLS.SESSIONS')
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
if self.debug:
cherrypy.log('Old lock released.', 'TOOLS.SESSIONS')
self.id = None
while self.id is None:
self.id = self.generate_id()
# Assert that the generated id is not already stored.
if self._exists():
self.id = None
if self.debug:
cherrypy.log('Set id to generated %s.' % self.id,
'TOOLS.SESSIONS')
if old_session_was_locked:
self.acquire_lock()
if self.debug:
cherrypy.log('Regenerated lock acquired.', 'TOOLS.SESSIONS')
def clean_up(self):
"""Clean up expired sessions."""
pass
def generate_id(self):
"""Return a new session id."""
return binascii.hexlify(os.urandom(20)).decode('ascii')
def save(self):
"""Save session data."""
try:
# If session data has never been loaded then it's never been
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds=self.timeout * 60)
expiration_time = self.now() + t
if self.debug:
cherrypy.log('Saving session %r with expiry %s' %
(self.id, expiration_time),
'TOOLS.SESSIONS')
self._save(expiration_time)
else:
if self.debug:
cherrypy.log(
'Skipping save of session %r (no session loaded).' %
self.id, 'TOOLS.SESSIONS')
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
if self.debug:
cherrypy.log('Lock released after save.', 'TOOLS.SESSIONS')
def load(self):
"""Copy stored session data into this session instance."""
data = self._load()
# data is either None or a tuple (session_data, expiration_time)
if data is None or data[1] < self.now():
if self.debug:
cherrypy.log('Expired session %r, flushing data.' % self.id,
'TOOLS.SESSIONS')
self._data = {}
else:
if self.debug:
cherrypy.log('Data loaded for session %r.' % self.id,
'TOOLS.SESSIONS')
self._data = data[0]
self.loaded = True
# Stick the clean_thread in the class, not the instance.
# The instances are created and destroyed per-request.
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
# clean_up is an instancemethod and not a classmethod,
# so that tool config can be accessed inside the method.
t = cherrypy.process.plugins.Monitor(
cherrypy.engine, self.clean_up, self.clean_freq * 60,
name='Session cleanup')
t.subscribe()
cls.clean_thread = t
t.start()
if self.debug:
cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
def delete(self):
"""Delete stored session data."""
self._delete()
if self.debug:
cherrypy.log('Deleted session %s.' % self.id,
'TOOLS.SESSIONS')
# -------------------- Application accessor methods -------------------- #
def __getitem__(self, key):
if not self.loaded:
self.load()
return self._data[key]
def __setitem__(self, key, value):
if not self.loaded:
self.load()
self._data[key] = value
def __delitem__(self, key):
if not self.loaded:
self.load()
del self._data[key]
def pop(self, key, default=missing):
"""Remove the specified key and return the corresponding value.
If key is not found, default is returned if given,
otherwise KeyError is raised.
"""
if not self.loaded:
self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
if not self.loaded:
self.load()
return key in self._data
def get(self, key, default=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
if not self.loaded:
self.load()
return self._data.get(key, default)
def update(self, d):
"""D.update(E) -> None. Update D from E: for k in E: D[k] = E[k]."""
if not self.loaded:
self.load()
self._data.update(d)
def setdefault(self, key, default=None):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D."""
if not self.loaded:
self.load()
return self._data.setdefault(key, default)
def clear(self):
"""D.clear() -> None. Remove all items from D."""
if not self.loaded:
self.load()
self._data.clear()
def keys(self):
"""D.keys() -> list of D's keys."""
if not self.loaded:
self.load()
return self._data.keys()
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples."""
if not self.loaded:
self.load()
return self._data.items()
def values(self):
"""D.values() -> list of D's values."""
if not self.loaded:
self.load()
return self._data.values()
class RamSession(Session):
# Class-level objects. Don't rebind these!
cache = {}
locks = {}
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
for _id, (data, expiration_time) in self.cache.copy().items():
if expiration_time <= now:
try:
del self.cache[_id]
except KeyError:
pass
try:
if self.locks[_id].acquire(blocking=False):
lock = self.locks.pop(_id)
lock.release()
except KeyError:
pass
# added to remove obsolete lock objects
for _id in list(self.locks):
locked = (
_id not in self.cache
and self.locks[_id].acquire(blocking=False)
)
if locked:
lock = self.locks.pop(_id)
lock.release()
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
return len(self.cache)
class FileSession(Session):
"""Implementation of the File backend for sessions
storage_path
The folder where session data will be saved. Each session
will be saved as pickle.dump(data, expiration_time) in its own file;
the filename will be self.SESSION_PREFIX + self.id.
lock_timeout
A timedelta or numeric seconds indicating how long
to block acquiring a lock. If None (default), acquiring a lock
will block indefinitely.
"""
SESSION_PREFIX = 'session-'
LOCK_SUFFIX = '.lock'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
kwargs.setdefault('lock_timeout', None)
Session.__init__(self, id=id, **kwargs)
# validate self.lock_timeout
if isinstance(self.lock_timeout, (int, float)):
self.lock_timeout = datetime.timedelta(seconds=self.lock_timeout)
if not isinstance(self.lock_timeout, (datetime.timedelta, type(None))):
raise ValueError(
'Lock timeout must be numeric seconds or a timedelta instance.'
)
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for file-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
for k, v in kwargs.items():
setattr(cls, k, v)
def _get_file_path(self):
f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
if not os.path.abspath(f).startswith(self.storage_path):
raise cherrypy.HTTPError(400, 'Invalid session id in cookie.')
return f
def _exists(self):
path = self._get_file_path()
return os.path.exists(path)
def _load(self, path=None):
assert self.locked, ('The session load without being locked. '
"Check your tools' priority levels.")
if path is None:
path = self._get_file_path()
try:
with open(path, 'rb') as f:
return pickle.load(f)
except (IOError, EOFError):
e = sys.exc_info()[1]
if self.debug:
cherrypy.log('Error loading the session pickle: %s' %
e, 'TOOLS.SESSIONS')
return None
def _save(self, expiration_time):
assert self.locked, ('The session was saved without being locked. '
"Check your tools' priority levels.")
with open(self._get_file_path(), 'wb') as f:
pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
def _delete(self):
assert self.locked, ('The session deletion without being locked. '
"Check your tools' priority levels.")
try:
os.unlink(self._get_file_path())
except OSError:
pass
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
checker = locking.LockChecker(self.id, self.lock_timeout)
while not checker.expired():
try:
self.lock = zc.lockfile.LockFile(path)
except zc.lockfile.LockError:
time.sleep(0.1)
else:
break
self.locked = True
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self, path=None):
"""Release the lock on the currently-loaded session data."""
self.lock.close()
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
now = self.now()
# Iterate over all session files in self.storage_path
for fname in os.listdir(self.storage_path):
have_session = (
fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX)
)
if have_session:
# We have a session file: lock and load it and check
# if it's expired. If it fails, nevermind.
path = os.path.join(self.storage_path, fname)
self.acquire_lock(path)
if self.debug:
# This is a bit of a hack, since we're calling clean_up
# on the first instance rather than the entire class,
# so depending on whether you have "debug" set on the
# path of the first session called, this may not run.
cherrypy.log('Cleanup lock acquired.', 'TOOLS.SESSIONS')
try:
contents = self._load(path)
# _load returns None on IOError
if contents is not None:
data, expiration_time = contents
if expiration_time < now:
# Session expired: deleting it
os.unlink(path)
finally:
self.release_lock(path)
def __len__(self):
"""Return the number of active sessions."""
return len([fname for fname in os.listdir(self.storage_path)
if (fname.startswith(self.SESSION_PREFIX) and
not fname.endswith(self.LOCK_SUFFIX))])
class MemcachedSession(Session):
# The most popular memcached client for Python isn't thread-safe.
# Wrap all .get and .set operations in a single lock.
mc_lock = threading.RLock()
# This is a separate set of locks per session id.
locks = {}
servers = ['localhost:11211']
@classmethod
def setup(cls, **kwargs):
"""Set up the storage system for memcached-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
import memcache
cls.cache = memcache.Client(cls.servers)
def _exists(self):
self.mc_lock.acquire()
try:
return bool(self.cache.get(self.id))
finally:
self.mc_lock.release()
def _load(self):
self.mc_lock.acquire()
try:
return self.cache.get(self.id)
finally:
self.mc_lock.release()
def _save(self, expiration_time):
# Send the expiration time as "Unix time" (seconds since 1/1/1970)
td = int(time.mktime(expiration_time.timetuple()))
self.mc_lock.acquire()
try:
if not self.cache.set(self.id, (self._data, expiration_time), td):
raise AssertionError(
'Session data for id %r not set.' % self.id)
finally:
self.mc_lock.release()
def _delete(self):
self.cache.delete(self.id)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
raise NotImplementedError
# Hook functions (for CherryPy tools)
def save():
"""Save any changed session data."""
if not hasattr(cherrypy.serving, 'session'):
return
request = cherrypy.serving.request
response = cherrypy.serving.response
# Guard against running twice
if hasattr(request, '_sessionsaved'):
return
request._sessionsaved = True
if response.stream:
# If the body is being streamed, we have to save the data
# *after* the response has been written out
request.hooks.attach('on_end_request', cherrypy.session.save)
else:
# If the body is not being streamed, we save the data now
# (so we can release the lock).
if is_iterator(response.body):
response.collapse_body()
cherrypy.session.save()
save.failsafe = True
def close():
"""Close the session object for this request."""
sess = getattr(cherrypy.serving, 'session', None)
if getattr(sess, 'locked', False):
# If the session is still locked we release the lock
sess.release_lock()
if sess.debug:
cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')
close.failsafe = True
close.priority = 90
def init(storage_type=None, path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, clean_freq=5,
persistent=True, httponly=False, debug=False,
# Py27 compat
# *, storage_class=RamSession,
**kwargs):
"""Initialize session object (using cookies).
storage_class
The Session subclass to use. Defaults to RamSession.
storage_type
(deprecated)
One of 'ram', 'file', memcached'. This will be
used to look up the corresponding class in cherrypy.lib.sessions
globals. For example, 'file' will use the FileSession class.
path
The 'path' value to stick in the response cookie metadata.
path_header
If 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
The name of the cookie.
timeout
The expiration timeout (in minutes) for the stored session data.
If 'persistent' is True (the default), this is also the timeout
for the cookie.
domain
The cookie domain.
secure
If False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
clean_freq (minutes)
The poll rate for expired session cleanup.
persistent
If True (the default), the 'timeout' argument will be used
to expire the cookie. If False, the cookie will not have an expiry,
and the cookie will be a "session cookie" which expires when the
browser is closed.
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
Any additional kwargs will be bound to the new Session instance,
and may be specific to the storage type. See the subclass of Session
you're using for more information.
"""
# Py27 compat
storage_class = kwargs.pop('storage_class', RamSession)
request = cherrypy.serving.request
# Guard against running twice
if hasattr(request, '_session_init_flag'):
return
request._session_init_flag = True
# Check if request came with a session ID
id = None
if name in request.cookie:
id = request.cookie[name].value
if debug:
cherrypy.log('ID obtained from request.cookie: %r' % id,
'TOOLS.SESSIONS')
first_time = not hasattr(cherrypy, 'session')
if storage_type:
if first_time:
msg = 'storage_type is deprecated. Supply storage_class instead'
cherrypy.log(msg)
storage_class = storage_type.title() + 'Session'
storage_class = globals()[storage_class]
# call setup first time only
if first_time:
if hasattr(storage_class, 'setup'):
storage_class.setup(**kwargs)
# Create and attach a new Session instance to cherrypy.serving.
# It will possess a reference to (and lock, and lazily load)
# the requested session data.
kwargs['timeout'] = timeout
kwargs['clean_freq'] = clean_freq
cherrypy.serving.session = sess = storage_class(id, **kwargs)
sess.debug = debug
def update_cookie(id):
"""Update the cookie every time the session id changes."""
cherrypy.serving.response.cookie[name] = id
sess.id_observers.append(update_cookie)
# Create cherrypy.session which will proxy to cherrypy.serving.session
if not hasattr(cherrypy, 'session'):
cherrypy.session = cherrypy._ThreadLocalProxy('session')
if persistent:
cookie_timeout = timeout
else:
# See http://support.microsoft.com/kb/223799/EN-US/
# and http://support.mozilla.com/en-US/kb/Cookies
cookie_timeout = None
set_response_cookie(path=path, path_header=path_header, name=name,
timeout=cookie_timeout, domain=domain, secure=secure,
httponly=httponly)
def set_response_cookie(path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, httponly=False):
"""Set a response cookie for the client.
path
the 'path' value to stick in the response cookie metadata.
path_header
if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name
the name of the cookie.
timeout
the expiration timeout for the cookie. If 0 or other boolean
False, no 'expires' param will be set, and the cookie will be a
"session cookie" which expires when the browser is closed.
domain
the cookie domain.
secure
if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
httponly
If False (the default) the cookie 'httponly' value will not be set.
If True, the cookie 'httponly' value will be set (to 1).
"""
# Set response cookie
cookie = cherrypy.serving.response.cookie
cookie[name] = cherrypy.serving.session.id
cookie[name]['path'] = (
path or
cherrypy.serving.request.headers.get(path_header) or
'/'
)
if timeout:
cookie[name]['max-age'] = timeout * 60
_add_MSIE_max_age_workaround(cookie[name], timeout)
if domain is not None:
cookie[name]['domain'] = domain
if secure:
cookie[name]['secure'] = 1
if httponly:
if not cookie[name].isReservedKey('httponly'):
raise ValueError('The httponly cookie token is not supported.')
cookie[name]['httponly'] = 1
def _add_MSIE_max_age_workaround(cookie, timeout):
"""
We'd like to use the "max-age" param as indicated in
http://www.faqs.org/rfcs/rfc2109.html but IE doesn't
save it to disk and the session is lost if people close
the browser. So we have to use the old "expires" ... sigh ...
"""
expires = time.time() + timeout * 60
cookie['expires'] = httputil.HTTPDate(expires)
def expire():
"""Expire the current session cookie."""
name = cherrypy.serving.request.config.get(
'tools.sessions.name', 'session_id')
one_year = 60 * 60 * 24 * 365
e = time.time() - one_year
cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
cherrypy.serving.response.cookie[name].pop('max-age', None)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import re
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_variables # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
def exception_to_validation_msg(e):
"""Extracts a validation message to display to the user."""
try:
error = json.loads(str(e))
# NOTE(jianingy): if no message exists, we just return 'None'
# and let the caller to deciede what to show
return error['error'].get('message', None)
except Exception:
# NOTE(jianingy): fallback to legacy message parsing approach
# either if error message isn't a json nor the json isn't in
# valid format.
validation_patterns = [
"Remote error: \w* {'Error': '(.*?)'}",
'Remote error: \w* (.*?) \[',
'400 Bad Request\n\nThe server could not comply with the request '
'since it is either malformed or otherwise incorrect.\n\n (.*)',
'(ParserError: .*)'
]
for pattern in validation_patterns:
match = re.search(pattern, str(e))
if match:
return match.group(1)
def create_upload_form_attributes(prefix, input_type, name):
"""Creates attribute dicts for the switchable upload form
:type prefix: str
:param prefix: prefix (environment, template) of field
:type input_type: str
:param input_type: field type (file, raw, url)
:type name: str
:param name: translated text label to display to user
:rtype: dict
:return: an attribute set to pass to form build
"""
attributes = {'class': 'switched', 'data-switch-on': prefix + 'source'}
attributes['data-' + prefix + 'source-' + input_type] = name
return attributes
class TemplateForm(forms.SelfHandlingForm):
class Meta:
name = _('Select Template')
help_text = _('From here you can select a template to launch '
'a stack.')
choices = [('url', _('URL')),
('file', _('File')),
('raw', _('Direct Input'))]
attributes = {'class': 'switchable', 'data-slug': 'templatesource'}
template_source = forms.ChoiceField(label=_('Template Source'),
choices=choices,
widget=forms.Select(attrs=attributes))
attributes = create_upload_form_attributes(
'template',
'file',
_('Template File'))
template_upload = forms.FileField(
label=_('Template File'),
help_text=_('A local template to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'url',
_('Template URL'))
template_url = forms.URLField(
label=_('Template URL'),
help_text=_('An external (HTTP) URL to load the template from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'template',
'raw',
_('Template Data'))
template_data = forms.CharField(
label=_('Template Data'),
help_text=_('The raw contents of the template.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
attributes = {'data-slug': 'envsource', 'class': 'switchable'}
environment_source = forms.ChoiceField(
label=_('Environment Source'),
choices=choices,
widget=forms.Select(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'file',
_('Environment File'))
environment_upload = forms.FileField(
label=_('Environment File'),
help_text=_('A local environment to upload.'),
widget=forms.FileInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'url',
_('Environment URL'))
environment_url = forms.URLField(
label=_('Environment URL'),
help_text=_('An external (HTTP) URL to load the environment from.'),
widget=forms.TextInput(attrs=attributes),
required=False)
attributes = create_upload_form_attributes(
'env',
'raw',
_('Environment Data'))
environment_data = forms.CharField(
label=_('Environment Data'),
help_text=_('The raw contents of the environment file.'),
widget=forms.widgets.Textarea(attrs=attributes),
required=False)
def __init__(self, *args, **kwargs):
self.next_view = kwargs.pop('next_view')
super(TemplateForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned = super(TemplateForm, self).clean()
files = self.request.FILES
self.clean_uploaded_files('template', _('template'), cleaned, files)
self.clean_uploaded_files('environment',
_('environment'),
cleaned,
files)
# Validate the template and get back the params.
kwargs = {}
if cleaned['template_data']:
kwargs['template'] = cleaned['template_data']
else:
kwargs['template_url'] = cleaned['template_url']
try:
validated = api.heat.template_validate(self.request, **kwargs)
cleaned['template_validate'] = validated
except Exception as e:
msg = exception_to_validation_msg(e)
if not msg:
msg = _('An unknown problem occurred validating the template.')
LOG.exception(msg)
raise forms.ValidationError(msg)
return cleaned
def clean_uploaded_files(self, prefix, field_label, cleaned, files):
"""Cleans Template & Environment data from form upload.
Does some of the crunchy bits for processing uploads vs raw
data depending on what the user specified. Identical process
for environment data & template data.
:type prefix: str
:param prefix: prefix (environment, template) of field
:type field_label: str
:param field_label: translated prefix str for messages
:type input_type: dict
:param prefix: existing cleaned fields from form
:rtype: dict
:return: cleaned dict including environment & template data
"""
upload_str = prefix + "_upload"
data_str = prefix + "_data"
url = cleaned.get(prefix + '_url')
data = cleaned.get(prefix + '_data')
has_upload = upload_str in files
# Uploaded file handler
if has_upload and not url:
log_template_name = files[upload_str].name
LOG.info('got upload %s' % log_template_name)
tpl = files[upload_str].read()
if tpl.startswith('{'):
try:
json.loads(tpl)
except Exception as e:
msg = _('There was a problem parsing the'
' %(prefix)s: %(error)s')
msg = msg % {'prefix': prefix, 'error': e}
raise forms.ValidationError(msg)
cleaned[data_str] = tpl
# URL handler
elif url and (has_upload or data):
msg = _('Please specify a %s using only one source method.')
msg = msg % field_label
raise forms.ValidationError(msg)
elif prefix == 'template':
# Check for raw template input - blank environment allowed
if not url and not data:
msg = _('You must specify a template via one of the '
'available sources.')
raise forms.ValidationError(msg)
def create_kwargs(self, data):
kwargs = {'parameters': data['template_validate'],
'environment_data': data['environment_data'],
'environment_url': data['environment_url'],
'template_data': data['template_data'],
'template_url': data['template_url']}
if data.get('stack_id'):
kwargs['stack_id'] = data['stack_id']
return kwargs
def handle(self, request, data):
kwargs = self.create_kwargs(data)
# NOTE (gabriel): This is a bit of a hack, essentially rewriting this
# request so that we can chain it as an input to the next view...
# but hey, it totally works.
request.method = 'GET'
return self.next_view.as_view()(request, **kwargs)
class ChangeTemplateForm(TemplateForm):
class Meta:
name = _('Edit Template')
help_text = _('From here you can select a new template to re-launch '
'a stack.')
stack_id = forms.CharField(label=_('Stack ID'),
widget=forms.widgets.HiddenInput,
required=True)
stack_name = forms.CharField(label=_('Stack Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
class CreateStackForm(forms.SelfHandlingForm):
param_prefix = '__param_'
class Meta:
name = _('Create Stack')
template_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
template_url = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
environment_data = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
environment_url = forms.CharField(
widget=forms.widgets.HiddenInput,
required=False)
parameters = forms.CharField(
widget=forms.widgets.HiddenInput,
required=True)
stack_name = forms.RegexField(
max_length='255',
label=_('Stack Name'),
help_text=_('Name of the stack to create.'),
regex=r"^[a-zA-Z][a-zA-Z0-9_.-]*$",
error_messages={'invalid': _('Name must start with a letter and may '
'only contain letters, numbers, underscores, '
'periods and hyphens.')},
required=True)
timeout_mins = forms.IntegerField(
initial=60,
label=_('Creation Timeout (minutes)'),
help_text=_('Stack creation timeout in minutes.'),
required=True)
enable_rollback = forms.BooleanField(
label=_('Rollback On Failure'),
help_text=_('Enable rollback on create/update failure.'),
required=False)
def __init__(self, *args, **kwargs):
parameters = kwargs.pop('parameters')
# special case: load template data from API, not passed in params
if(kwargs.get('validate_me')):
parameters = kwargs.pop('validate_me')
super(CreateStackForm, self).__init__(*args, **kwargs)
self._build_parameter_fields(parameters)
def _build_parameter_fields(self, template_validate):
self.fields['password'] = forms.CharField(
label=_('Password for user "%s"') % self.request.user.username,
help_text=_('This is required for operations to be performed '
'throughout the lifecycle of the stack'),
required=True,
widget=forms.PasswordInput())
self.help_text = template_validate['Description']
params = template_validate.get('Parameters', {})
for param_key, param in params.items():
field_key = self.param_prefix + param_key
field_args = {
'initial': param.get('Default', None),
'label': param_key,
'help_text': param.get('Description', ''),
'required': param.get('Default', None) is None
}
param_type = param.get('Type', None)
if 'AllowedValues' in param:
choices = map(lambda x: (x, x), param['AllowedValues'])
field_args['choices'] = choices
field = forms.ChoiceField(**field_args)
elif param_type in ('CommaDelimitedList', 'String'):
if 'MinLength' in param:
field_args['min_length'] = int(param['MinLength'])
field_args['required'] = param.get('MinLength', 0) > 0
if 'MaxLength' in param:
field_args['max_length'] = int(param['MaxLength'])
field = forms.CharField(**field_args)
elif param_type == 'Number':
if 'MinValue' in param:
field_args['min_value'] = int(param['MinValue'])
if 'MaxValue' in param:
field_args['max_value'] = int(param['MaxValue'])
field = forms.IntegerField(**field_args)
self.fields[field_key] = field
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in data.iteritems()
if k.startswith(self.param_prefix)]
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
if data.get('template_data'):
fields['template'] = data.get('template_data')
else:
fields['template_url'] = data.get('template_url')
if data.get('environment_data'):
fields['environment'] = data.get('environment_data')
elif data.get('environment_url'):
fields['environment_url'] = data.get('environment_url')
try:
api.heat.stack_create(self.request, **fields)
messages.success(request, _("Stack creation started."))
return True
except Exception as e:
msg = exception_to_validation_msg(e)
exceptions.handle(request, msg or _('Stack creation failed.'))
class EditStackForm(CreateStackForm):
class Meta:
name = _('Update Stack Parameters')
stack_id = forms.CharField(label=_('Stack ID'),
widget=forms.widgets.HiddenInput,
required=True)
stack_name = forms.CharField(label=_('Stack Name'),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}
))
@sensitive_variables('password')
def handle(self, request, data):
prefix_length = len(self.param_prefix)
params_list = [(k[prefix_length:], v) for (k, v) in data.iteritems()
if k.startswith(self.param_prefix)]
stack_id = data.get('stack_id')
fields = {
'stack_name': data.get('stack_name'),
'timeout_mins': data.get('timeout_mins'),
'disable_rollback': not(data.get('enable_rollback')),
'parameters': dict(params_list),
'password': data.get('password')
}
# if the user went directly to this form, resubmit the existing
# template data. otherwise, submit what they had from the first form
if data.get('template_data'):
fields['template'] = data.get('template_data')
elif data.get('template_url'):
fields['template_url'] = data.get('template_url')
elif data.get('parameters'):
fields['template'] = data.get('parameters')
try:
api.heat.stack_update(self.request, stack_id=stack_id, **fields)
messages.success(request, _("Stack update started."))
return True
except Exception as e:
msg = exception_to_validation_msg(e)
exceptions.handle(request, msg or _('Stack update failed.'))
|
|
"""
Support for Honeywell Round Connected and Honeywell Evohome thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.honeywell/
"""
import logging
import socket
import datetime
import voluptuous as vol
import requests
from homeassistant.components.climate import (ClimateDevice, PLATFORM_SCHEMA,
ATTR_FAN_MODE, ATTR_FAN_LIST,
ATTR_OPERATION_MODE,
ATTR_OPERATION_LIST)
from homeassistant.const import (
CONF_PASSWORD, CONF_USERNAME, TEMP_CELSIUS, TEMP_FAHRENHEIT,
ATTR_TEMPERATURE)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['evohomeclient==0.2.5',
'somecomfort==0.4.1']
_LOGGER = logging.getLogger(__name__)
ATTR_FAN = 'fan'
ATTR_SYSTEM_MODE = 'system_mode'
ATTR_CURRENT_OPERATION = 'equipment_output_status'
CONF_AWAY_TEMPERATURE = 'away_temperature'
CONF_COOL_AWAY_TEMPERATURE = 'away_cool_temperature'
CONF_HEAT_AWAY_TEMPERATURE = 'away_heat_temperature'
CONF_REGION = 'region'
DEFAULT_AWAY_TEMPERATURE = 16
DEFAULT_COOL_AWAY_TEMPERATURE = 30
DEFAULT_HEAT_AWAY_TEMPERATURE = 16
DEFAULT_REGION = 'eu'
REGIONS = ['eu', 'us']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_AWAY_TEMPERATURE,
default=DEFAULT_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_COOL_AWAY_TEMPERATURE,
default=DEFAULT_COOL_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_HEAT_AWAY_TEMPERATURE,
default=DEFAULT_HEAT_AWAY_TEMPERATURE): vol.Coerce(float),
vol.Optional(CONF_REGION, default=DEFAULT_REGION): vol.In(REGIONS),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Honeywell thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
region = config.get(CONF_REGION)
if region == 'us':
return _setup_us(username, password, config, add_devices)
else:
return _setup_round(username, password, config, add_devices)
def _setup_round(username, password, config, add_devices):
"""Setup rounding function."""
from evohomeclient import EvohomeClient
away_temp = config.get(CONF_AWAY_TEMPERATURE)
evo_api = EvohomeClient(username, password)
try:
zones = evo_api.temperatures(force_refresh=True)
for i, zone in enumerate(zones):
add_devices(
[RoundThermostat(evo_api, zone['id'], i == 0, away_temp)]
)
except socket.error:
_LOGGER.error(
"Connection error logging into the honeywell evohome web service")
return False
return True
# config will be used later
def _setup_us(username, password, config, add_devices):
"""Setup user."""
import somecomfort
try:
client = somecomfort.SomeComfort(username, password)
except somecomfort.AuthError:
_LOGGER.error('Failed to login to honeywell account %s', username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error('Failed to initialize honeywell client: %s', str(ex))
return False
dev_id = config.get('thermostat')
loc_id = config.get('location')
cool_away_temp = config.get(CONF_COOL_AWAY_TEMPERATURE)
heat_away_temp = config.get(CONF_HEAT_AWAY_TEMPERATURE)
add_devices([HoneywellUSThermostat(client, device, cool_away_temp,
heat_away_temp, username, password)
for location in client.locations_by_id.values()
for device in location.devices_by_id.values()
if ((not loc_id or location.locationid == loc_id) and
(not dev_id or device.deviceid == dev_id))])
return True
class RoundThermostat(ClimateDevice):
"""Representation of a Honeywell Round Connected thermostat."""
def __init__(self, device, zone_id, master, away_temp):
"""Initialize the thermostat."""
self.device = device
self._current_temperature = None
self._target_temperature = None
self._name = 'round connected'
self._id = zone_id
self._master = master
self._is_dhw = False
self._away_temp = away_temp
self._away = False
self.update()
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._is_dhw:
return None
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.device.set_temperature(self._name, temperature)
@property
def current_operation(self: ClimateDevice) -> str:
"""Get the current operation of the system."""
return getattr(self.device, ATTR_SYSTEM_MODE, None)
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def set_operation_mode(self: ClimateDevice, operation_mode: str) -> None:
"""Set the HVAC mode for the thermostat."""
if hasattr(self.device, ATTR_SYSTEM_MODE):
self.device.system_mode = operation_mode
def turn_away_mode_on(self):
"""Turn away on.
Honeywell does have a proprietary away mode, but it doesn't really work
the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
self.device.set_temperature(self._name, self._away_temp)
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
self.device.cancel_temp_override(self._name)
def update(self):
"""Get the latest date."""
try:
# Only refresh if this is the "master" device,
# others will pick up the cache
for val in self.device.temperatures(force_refresh=self._master):
if val['id'] == self._id:
data = val
except StopIteration:
_LOGGER.error("Did not receive any temperature data from the "
"evohomeclient API.")
return
self._current_temperature = data['temp']
self._target_temperature = data['setpoint']
if data['thermostat'] == 'DOMESTIC_HOT_WATER':
self._name = 'Hot Water'
self._is_dhw = True
else:
self._name = data['name']
self._is_dhw = False
class HoneywellUSThermostat(ClimateDevice):
"""Representation of a Honeywell US Thermostat."""
def __init__(self, client, device, cool_away_temp,
heat_away_temp, username, password):
"""Initialize the thermostat."""
self._client = client
self._device = device
self._cool_away_temp = cool_away_temp
self._heat_away_temp = heat_away_temp
self._away = False
self._username = username
self._password = password
@property
def is_fan_on(self):
"""Return true if fan is on."""
return self._device.fan_running
@property
def name(self):
"""Return the name of the honeywell, if any."""
return self._device.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return (TEMP_CELSIUS if self._device.temperature_unit == 'C'
else TEMP_FAHRENHEIT)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._device.system_mode == 'cool':
return self._device.setpoint_cool
else:
return self._device.setpoint_heat
@property
def current_operation(self: ClimateDevice) -> str:
"""Return current operation ie. heat, cool, idle."""
oper = getattr(self._device, ATTR_CURRENT_OPERATION, None)
if oper == "off":
oper = "idle"
return oper
def set_temperature(self, **kwargs):
"""Set target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
import somecomfort
try:
# Get current mode
mode = self._device.system_mode
# Set hold if this is not the case
if getattr(self._device, "hold_{}".format(mode)) is False:
# Get next period key
next_period_key = '{}NextPeriod'.format(mode.capitalize())
# Get next period raw value
next_period = self._device.raw_ui_data.get(next_period_key)
# Get next period time
hour, minute = divmod(next_period * 15, 60)
# Set hold time
setattr(self._device,
"hold_{}".format(mode),
datetime.time(hour, minute))
# Set temperature
setattr(self._device,
"setpoint_{}".format(mode),
temperature)
except somecomfort.SomeComfortError:
_LOGGER.error('Temperature %.1f out of range', temperature)
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
import somecomfort
data = {
ATTR_FAN: (self.is_fan_on and 'running' or 'idle'),
ATTR_FAN_MODE: self._device.fan_mode,
ATTR_OPERATION_MODE: self._device.system_mode,
}
data[ATTR_FAN_LIST] = somecomfort.FAN_MODES
data[ATTR_OPERATION_LIST] = somecomfort.SYSTEM_MODES
return data
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
def turn_away_mode_on(self):
"""Turn away on.
Somecomfort does have a proprietary away mode, but it doesn't really
work the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
import somecomfort
try:
# Get current mode
mode = self._device.system_mode
except somecomfort.SomeComfortError:
_LOGGER.error('Can not get system mode')
return
try:
# Set permanent hold
setattr(self._device,
"hold_{}".format(mode),
True)
# Set temperature
setattr(self._device,
"setpoint_{}".format(mode),
getattr(self, "_{}_away_temp".format(mode)))
except somecomfort.SomeComfortError:
_LOGGER.error('Temperature %.1f out of range',
getattr(self, "_{}_away_temp".format(mode)))
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
import somecomfort
try:
# Disabling all hold modes
self._device.hold_cool = False
self._device.hold_heat = False
except somecomfort.SomeComfortError:
_LOGGER.error('Can not stop hold mode')
def set_operation_mode(self: ClimateDevice, operation_mode: str) -> None:
"""Set the system mode (Cool, Heat, etc)."""
if hasattr(self._device, ATTR_SYSTEM_MODE):
self._device.system_mode = operation_mode
def update(self):
"""Update the state."""
import somecomfort
retries = 3
while retries > 0:
try:
self._device.refresh()
break
except (somecomfort.client.APIRateLimited, OSError,
requests.exceptions.ReadTimeout) as exp:
retries -= 1
if retries == 0:
raise exp
if not self._retry():
raise exp
_LOGGER.error("SomeComfort update failed, Retrying "
"- Error: %s", exp)
def _retry(self):
"""Recreate a new somecomfort client.
When we got an error, the best way to be sure that the next query
will succeed, is to recreate a new somecomfort client.
"""
import somecomfort
try:
self._client = somecomfort.SomeComfort(self._username,
self._password)
except somecomfort.AuthError:
_LOGGER.error('Failed to login to honeywell account %s',
self._username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error('Failed to initialize honeywell client: %s',
str(ex))
return False
devices = [device
for location in self._client.locations_by_id.values()
for device in location.devices_by_id.values()
if device.name == self._device.name]
if len(devices) != 1:
_LOGGER.error('Failed to find device %s', self._device.name)
return False
self._device = devices[0]
return True
|
|
import pytest
from flask import Response
from werkzeug.exceptions import Forbidden
from flask_allows import Allows
from flask_allows.additional import Additional, current_additions
from flask_allows.overrides import Override, current_overrides
def test_warns_about_request_deprecation_with_old_style_requirement(member):
import warnings
allows = Allows(identity_loader=lambda: member)
req = lambda u, p: True # noqa: E731
repred = repr(req)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
allows.fulfill([req])
warnings.simplefilter("default", DeprecationWarning)
assert len(w) == 1
assert issubclass(w[0].category, DeprecationWarning)
assert str(w[0].message).startswith(repred)
assert "Passing request to requirements is now deprecated" in str(w[0].message)
def test_Allows_defaults():
allows = Allows()
assert allows._identity_loader is None and allows.throws is Forbidden
def test_Allows_config_with_app(app):
allows = Allows(app)
assert hasattr(app, "extensions") and allows is app.extensions["allows"]
def test_Allows_init_app(app):
allows = Allows()
assert app.extensions == {}
allows.init_app(app)
assert hasattr(app, "extensions") and allows is app.extensions["allows"]
def test_Allows_identity_loader_on_init(member):
ident = lambda: member # noqa
allows = Allows(identity_loader=ident)
assert allows._identity_loader is ident
def test_Allows_custom_throws():
myforbid = Forbidden()
allows = Allows(throws=myforbid)
assert allows.throws is myforbid
def test_Allows_identity_loader_func(member):
allows = Allows()
@allows.identity_loader
def ident():
return member
assert allows._identity_loader is ident and allows._identity_loader() is member
def test_Allows_fulfill_true(member, always):
allows = Allows(identity_loader=lambda: member)
assert allows.fulfill([always])
assert always.called_with == {"user": member}
def test_Allows_fulfill_false(member, never):
allows = Allows(identity_loader=lambda: member)
assert not allows.fulfill([never])
def test_Allows_fulfill_ident_override(member, guest, spy):
allows = Allows(identity_loader=lambda: guest)
allows.fulfill([spy], identity=member)
assert spy.called_with["user"] is member
def test_allows_requires(member, ismember):
allows = Allows(identity_loader=lambda: member)
@allows.requires(ismember)
def stub():
return True
assert stub()
def test_allows_requires_throws(member, atleastmod):
allows = Allows(identity_loader=lambda: member)
@allows.requires(atleastmod)
def stub():
return True
with pytest.raises(Forbidden) as excinfo:
stub()
assert excinfo.value.code == 403
def test_allows_requires_throws_override(member, atleastmod):
class MyForbid(Forbidden):
pass
allows = Allows(identity_loader=lambda: member)
@allows.requires(atleastmod, throws=MyForbid("Go away"))
def stub():
pass
with pytest.raises(MyForbid) as excinfo:
stub()
assert "Go away" == excinfo.value.description
def test_allows_on_fail(member, atleastmod):
allows = Allows(
identity_loader=lambda: member, on_fail=lambda *a, **k: "I've failed"
)
@allows.requires(atleastmod)
def stub():
pass
assert stub() == "I've failed"
def test_allows_makes_on_fail_callable(member, atleastmod):
allows = Allows(identity_loader=lambda: member, on_fail="I've failed")
@allows.requires(atleastmod)
def stub():
pass
assert stub() == "I've failed"
def test_allows_on_fail_override_at_decoration(member, atleastmod):
allows = Allows(identity_loader=lambda: member)
@allows.requires(atleastmod, on_fail=lambda *a, **k: "Overridden failure")
def stub():
pass
assert stub() == "Overridden failure"
def test_allows_on_fail_returning_none_raises(member, atleastmod):
allows = Allows(on_fail=lambda *a, **k: None, identity_loader=lambda: member)
@allows.requires(atleastmod)
def stub():
pass
with pytest.raises(Forbidden):
stub()
def test_allows_can_call_requirements_with_old_and_new_style_arguments(member):
allows = Allows(identity_loader=lambda: member)
def new_style(user):
return True
def old_style(user, request):
return True
assert allows.fulfill([new_style, old_style])
def test_fulfills_skips_overridden_requirements(member, never):
allows = Allows(identity_loader=lambda: member)
allows.overrides.push(Override(never))
assert allows.fulfill([never])
# be a good test denizen and cleanup
allows.overrides.pop()
def test_allows_cleans_up_override_contexts_in_after_request(
app, member, never, always
):
allows = Allows(app, identity_loader=lambda: member)
# need to route a request for this test so the whole before/after request
# cycle is invoked
@app.route("/")
def index():
assert allows.overrides.current.is_overridden(never)
assert allows.additional.current.is_added(always)
return Response("...")
@app.before_request
def disable_never(*a, **k):
current_overrides.add(never)
current_additions.add(always)
with app.test_request_context("/"):
app.preprocess_request()
result = index()
app.process_response(result)
assert allows.overrides.current is None
assert allows.additional.current is None
def test_fulfill_calls_additional_requirements(member, always):
allows = Allows(identity_loader=lambda: member)
allows.additional.push(Additional(always))
assert allows.fulfill([])
assert always.called
def test_req_not_called_when_both_added_and_overridden(member, never):
allows = Allows(identity_loader=lambda: member)
allows.additional.push(Additional(never))
allows.overrides.push(Override(never))
assert allows.fulfill([])
def test_req_called_once_even_if_added_multiple_times(member, counter):
allows = Allows(identity_loader=lambda: member)
allows.additional.push(Additional(counter, counter))
assert allows.fulfill([])
assert counter.count == 1
|
|
"""
Form classes
"""
from __future__ import absolute_import, unicode_literals
import copy
import warnings
from django.core.exceptions import ValidationError
from django.forms.fields import Field, FileField
from django.forms.util import flatatt, ErrorDict, ErrorList
from django.forms.widgets import Media, media_property, TextInput, Textarea
from django.utils.datastructures import SortedDict
from django.utils.html import conditional_escape, format_html
from django.utils.encoding import smart_text, force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils import six
__all__ = ('BaseForm', 'Form')
NON_FIELD_ERRORS = '__all__'
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return ''
return name.replace('_', ' ').capitalize()
def get_declared_fields(bases, attrs, with_base_fields=True):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metaclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions.
"""
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in list(six.iteritems(attrs)) if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = list(six.iteritems(base.base_fields)) + fields
else:
for base in bases[::-1]:
if hasattr(base, 'declared_fields'):
fields = list(six.iteritems(base.declared_fields)) + fields
return SortedDict(fields)
class DeclarativeFieldsMetaclass(type):
"""
Metaclass that converts Field attributes to a dictionary called
'base_fields', taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = get_declared_fields(bases, attrs)
new_class = super(DeclarativeFieldsMetaclass,
cls).__new__(cls, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@python_2_unicode_compatible
class BaseForm(object):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
self.label_suffix = label_suffix
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
self._changed_data = None
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
def __str__(self):
return self.as_table()
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
@property
def errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not bool(self.errors)
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}
for e in bf_errors])
hidden_fields.append(six.text_type(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_text(bf_errors))
if bf.label:
label = conditional_escape(force_text(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.label_suffix:
if label[-1] not in ':?.!':
label = format_html('{0}{1}', label, self.label_suffix)
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_text(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_text(bf_errors),
'label': force_text(label),
'field': six.text_type(bf),
'help_text': help_text,
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0, error_row % force_text(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = '<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = '<tr><td colspan="2">%s</td></tr>',
row_ender = '</td></tr>',
help_text_html = '<br /><span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row = '<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row = '<li>%s</li>',
row_ender = '</li>',
help_text_html = ' <span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row = '<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row = '%s',
row_ender = '</p>',
help_text_html = ' <span class="helptext">%s</span>',
errors_on_separate_row = True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class())
def _raw_value(self, fieldname):
"""
Returns the raw_value for a particular field name. This is just a
convenient wrapper around widget.value_from_datadict.
"""
field = self.fields[fieldname]
prefix = self.add_prefix(fieldname)
return field.widget.value_from_datadict(self.data, self.files, prefix)
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name]
def _clean_form(self):
try:
self.cleaned_data = self.clean()
except ValidationError as e:
self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages)
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
@property
def changed_data(self):
if self._changed_data is None:
self._changed_data = []
# XXX: For now we're asking the individual widgets whether or not the
# data has changed. It would probably be more efficient to hash the
# initial data, store it in a hidden field, and compare a hash of the
# submitted data, but we'd need a way to easily get the string value
# for a given field. Right now, that logic is embedded in the render
# method of each widget.
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
self._changed_data.append(name)
continue
if hasattr(field.widget, '_has_changed'):
warnings.warn("The _has_changed method on widgets is deprecated,"
" define it at field level instead.",
PendingDeprecationWarning, stacklevel=2)
if field.widget._has_changed(initial_value, data_value):
self._changed_data.append(name)
elif field._has_changed(initial_value, data_value):
self._changed_data.append(name)
return self._changed_data
@property
def media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
@python_2_unicode_compatible
class BoundField(object):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __str__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def __iter__(self):
"""
Yields rendered strings that comprise all widgets in this BoundField.
This really is only useful for RadioSelect widgets, so that you can
iterate over individual radio buttons in a template.
"""
for subwidget in self.field.widget.subwidgets(self.html_name, self.value()):
yield subwidget
def __len__(self):
return len(list(self.__iter__()))
def __getitem__(self, idx):
return list(self.__iter__())[idx]
@property
def errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return widget.render(name, self.value(), attrs=attrs)
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
data = data()
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
contents should be 'mark_safe'd to avoid HTML escaping. If contents
aren't given, uses the field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
"""
contents = contents or conditional_escape(self.label)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
attrs = attrs and flatatt(attrs) or ''
contents = format_html('<label for="{0}"{1}>{2}</label>',
widget.id_for_label(id_), attrs, contents
)
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
@property
def is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_text(auto_id):
return smart_text(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MutiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
return widget.id_for_label(id_)
|
|
"""
Interface and implementation of a Kademlia routing table.
Classes:
RoutingTable -- Interface
OptimizedTreeRoutingTable -- Implementation
"""
from abc import ABCMeta, abstractmethod
import logging
import time
from node import constants, guid, kbucket
class RoutingTable(object):
"""
Interface for routing table implementations.
Classes inheriting from this should provide a suitable routing table
for a parent Node object (i.e. the local entity in the Kademlia
network).
"""
__metaclass__ = ABCMeta
def __init__(self, parent_node_id, market_id):
"""
Initialize a new RoutingTable.
@param parent_node_id: The node ID of the node to which this
routing table belongs.
@type parent_node_id: guid.GUIDMixin or str or unicode
@param market_id: FILLME
@type: int
"""
self.market_id = market_id
self.parent_node_id = parent_node_id
self.log = logging.getLogger(
'[%s] %s' % (self.market_id, self.__class__.__name__)
)
@abstractmethod
def add_contact(self, node_id):
"""
Add the given node to the correct KBucket; if it already
exists, update its status.
@param contact: The contact to add to this node's KBuckets
@type contact: guid.GUIDMixin or str or unicode
"""
pass
@staticmethod
def distance(node_id1, node_id2):
"""
Calculate the XOR result between two string variables.
@param node_id1: The ID of the first node.
@type node_id1: guid.GUIDMixin or str or unicode
@param node_id2: The ID of the second node.
@type node_id1: guid.GUIDMixin or str or unicode
@return: XOR result of two long variables
@rtype: long
@raises: ValueError: The strings have improper lengths for IDs.
"""
if isinstance(node_id1, guid.GUIDMixin):
key1 = node_id1.guid
else:
key1 = node_id1
if isinstance(node_id2, guid.GUIDMixin):
key2 = node_id2.guid
else:
key2 = node_id2
if len(key1) != constants.HEX_NODE_ID_LEN:
raise ValueError(
"node_id1 has invalid length %d; must be %d" % (
len(key1),
constants.HEX_NODE_ID_LEN
)
)
if len(key2) != constants.HEX_NODE_ID_LEN:
raise ValueError(
"node_id2 has invalid length %d; must be %d" % (
len(key2),
constants.HEX_NODE_ID_LEN
)
)
val_key1 = int(key1, base=16)
val_key2 = int(key2, base=16)
return val_key1 ^ val_key2
@staticmethod
def num_to_id(node_num):
"""
Converts an integer to a node ID.
It is the caller's responsibility to ensure the resulting
node ID falls in the ID space.
@param node_num: The integer to convert.
@type node_num: int
@return: A node ID (hex) corresponding to the number given.
@rtype: str
"""
# Convert to hex string.
node_id = hex(node_num)
# Strip '0x' prefix and 'L' suffix.
bare_node_id = node_id.lstrip("0x").rstrip("L")
# Pad to proper length and return.
return bare_node_id.rjust(constants.HEX_NODE_ID_LEN, '0')
@abstractmethod
def find_close_nodes(self, node_id, count, rpc_node_id=None):
"""
Find a number of known nodes closest to the node/value with the
specified ID.
@param node_id: The node ID to search for
@type node_id: guid.GUIDMixin or str or unicode
@param count: The amount of contacts to return
@type count: int
@param rpc_node_id: Used during RPC, this is the sender's node ID.
The ID passed as parameter is excluded from
the list of returned contacts.
@type rpc_node_id: guid.GUIDMixin or str or unicode
@return: A list of nodes closest to the specified key.
This method will return constants.K (or count, if
specified) contacts if at all possible; it will only
return fewer if the node is returning all of the
contacts that it knows of.
@rtype: list of guid.GUIDMixin
"""
pass
@abstractmethod
def get_contact(self, node_id):
"""
Return the known node with the specified ID, None if not found.
@param: node_id: The ID of the node to search for.
@type: guid.GUIDMixin or str or unicode
@return: The node with the specified ID or None
@rtype: guid.GUIDMixin or NoneType
"""
pass
@abstractmethod
def get_refresh_list(self, start_index=0, force=False):
"""
Find all KBuckets that need refreshing, starting at the KBucket
with the specified index, and return IDs to be searched for in
order to refresh those KBuckets.
@param start_index: The index of the bucket to start refreshing
at; this bucket and those further away from
it will be refreshed. For example, when
joining the network, this node will set this
to the index of the bucket after the one
containing its closest neighbour.
@type start_index: int
@param force: If this is True, all buckets in the specified
range will be refreshed, regardless of the time
they were last accessed.
@type force: bool
@return: A list of node IDs that the parent node should search for
in order to refresh the routing Table.
@rtype: list of guid.GUIDMixin
"""
pass
@abstractmethod
def remove_contact(self, node_id):
"""
Remove the node with the specified ID from the routing table.
@param node_id: The ID of the node to remove.
@type node_id: guid.GUIDMixin or str or unicode
"""
pass
@abstractmethod
def touch_kbucket(self, node_id, timestamp=None):
"""
Update the "last accessed" timestamp of the KBucket which covers
the range containing the specified key in the key/ID space.
@param node_id: A key in the range of the target KBucket
@type node_id: guid.GUIDMixin or str or unicode
@param timestamp: The timestamp to set on the bucket.
If None, it will be set to int(time.time()).
@type timestamp: int
"""
pass
class OptimizedTreeRoutingTable(RoutingTable):
"""
This class implements a routing table used by a Node class.
The Kademlia routing table is a binary tree whose leaves are KBuckets,
where each KBucket contains nodes with some common prefix of their IDs.
This prefix is the KBucket's position in the binary tree; it therefore
covers some range of ID values, and together all of the KBuckets cover
the entire ID space, without any overlaps.
Note: This implementation adds nodes in the tree (the KBuckets) in
an on-demand fashion, as described in section 2.4 of the 13-page
version of the Kademlia paper[1]. It also uses the contact accounting
optimization specified in section 4.1 of the said paper (optimized
node accounting without PINGs). This results in much less network
traffic, at the expense of some memory.
[1]: http://pdos.csail.mit.edu/~petar/papers/maymounkov-kademlia-lncs.pdf
"""
def __init__(self, parent_node_id, market_id):
"""
Initialize a new OptimizedTreeRoutingTable.
For details, see RoutingTable documentation.
"""
super(OptimizedTreeRoutingTable, self).__init__(
parent_node_id, market_id
)
# Cache containing nodes eligible to replace stale KBucket entries
self.replacement_cache = {}
self.buckets = [
kbucket.KBucket(
range_min=0,
range_max=2**constants.BIT_NODE_ID_LEN,
market_id=market_id
)
]
def add_contact(self, contact):
"""
Add the given contact to the correct KBucket; if it already
exists, update its status.
For details, see RoutingTable documentation.
"""
if not contact.guid:
self.log.error('No guid specified')
return
if contact.guid == self.parent_node_id:
self.log.info('Trying to add yourself. Leaving.')
return
bucket_index = self.kbucket_index(contact.guid)
old_contact = self.buckets[bucket_index].get_contact(contact.guid)
if not old_contact:
try:
self.buckets[bucket_index].add_contact(contact)
except kbucket.BucketFull:
# The bucket is full; see if it can be split (by checking if
# its range includes the host node's id)
if self.buckets[bucket_index].key_in_range(self.parent_node_id):
self.split_bucket(bucket_index)
# Retry the insertion attempt
self.add_contact(contact)
else:
# We can't split the KBucket
# NOTE: This implementation follows section 4.1 of the 13
# page version of the Kademlia paper (optimized contact
# accounting without PINGs - results in much less network
# traffic, at the expense of some memory)
# Put the new contact in our replacement cache for the
# corresponding KBucket (or update it's position if it
# exists already)
if bucket_index not in self.replacement_cache:
self.replacement_cache[bucket_index] = []
if contact in self.replacement_cache[bucket_index]:
self.replacement_cache[bucket_index].remove(contact)
# TODO: Using k to limit the size of the contact
# replacement cache - maybe define a separate value for
# this in constants.py?
elif len(self.replacement_cache) >= constants.K:
self.replacement_cache.pop(0)
self.replacement_cache[bucket_index].append(contact)
elif old_contact.address != contact.address:
self.log.info('Remove contact')
self.remove_contact(contact.guid)
try:
self.buckets[bucket_index].add_contact(contact)
except kbucket.BucketFull:
# The bucket is full; see if it can be split (by checking
# if its range includes the host node's id)
if self.buckets[bucket_index].key_in_range(self.parent_node_id):
self.split_bucket(bucket_index)
# Retry the insertion attempt
self.add_contact(contact)
else:
# We can't split the KBucket
# NOTE: This implementation follows section 4.1 of the
# 13 page version of the Kademlia paper (optimized
# contact accounting without PINGs - results in much
# less network traffic, at the expense of some memory)
# Put the new contact in our replacement cache for the
# corresponding KBucket (or update it's position if
# it exists already)
if bucket_index not in self.replacement_cache:
self.replacement_cache[bucket_index] = []
if contact in self.replacement_cache[bucket_index]:
self.replacement_cache[bucket_index].remove(contact)
# TODO: Using k to limit the size of the contact
# replacement cache - maybe define a separate value
# for this in constants.py?
elif len(self.replacement_cache) >= constants.K:
self.replacement_cache.pop(0)
self.replacement_cache[bucket_index].append(contact)
def find_close_nodes(self, key, count, node_id=None):
"""
Find a number of known nodes closest to the node/value with the
specified key.
@param key: The key (i.e. the node or value ID) to search for.
@type key: str
@param count: the amount of contacts to return
@type count: int
@param nodeID: Used during RPC, this is the sender's Node ID.
The ID passed in the paramater is excluded from
the list of contacts returned.
@type nodeID: str
@return: A list of node contacts (C{guid.GUIDMixin instances})
closest to the specified key.
This method will return C{k} (or C{count}, if specified)
contacts if at all possible; it will only return fewer if the
node is returning all of the contacts that it knows of.
@rtype: list
"""
bucket_index = self.kbucket_index(key)
bucket = self.buckets[bucket_index]
closest_nodes = bucket.get_contacts(constants.K, node_id)
# This method must return k contacts (even if we have the node with
# the specified key as node ID), unless there is less than k remote
# nodes in the routing table.
i = 1
can_go_lower = bucket_index - i >= 0
can_go_higher = bucket_index + i < len(self.buckets)
# Fill up the node list to k nodes, starting with the closest
# neighbouring nodes known.
while len(closest_nodes) < constants.K and (can_go_lower or can_go_higher):
# TODO: this may need to be optimized
if can_go_lower:
bucket = self.buckets[bucket_index - i]
closest_nodes.extend(
bucket.get_contacts(
constants.K - len(closest_nodes), node_id
)
)
can_go_lower = bucket_index - (i + 1) >= 0
if can_go_higher:
bucket = self.buckets[bucket_index + i]
closest_nodes.extend(
bucket.get_contacts(
constants.K - len(closest_nodes), node_id
)
)
can_go_higher = bucket_index + (i + 1) < len(self.buckets)
i += 1
self.log.datadump('Closest Nodes: %s', closest_nodes)
return closest_nodes
def get_contact(self, node_id):
"""
Return the known node with the specified ID, None if not found.
For details, see RoutingTable documentation.
"""
bucket_index = self.kbucket_index(node_id)
return self.buckets[bucket_index].get_contact(node_id)
def get_refresh_list(self, start_index=0, force=False):
"""
Find all KBuckets that need refreshing, starting at the
KBucket with the specified index, and return IDs to be searched for
in order to refresh those KBuckets.
For details, see RoutingTable documentation.
"""
if force:
# Copy the list to avoid accidental mutation.
return list(self.buckets[start_index:])
now = int(time.time())
timeout = constants.REFRESH_TIMEOUT
return [
# Since range_min is always in the KBucket's range
# return that as a representative.
self.num_to_id(bucket.range_min)
for bucket in self.buckets[start_index:]
if now - bucket.last_accessed >= timeout
]
def remove_contact(self, node_id):
"""
Remove the node with the specified ID from the routing table.
For details, see RoutingTable documentation.
"""
bucket_index = self.kbucket_index(node_id)
try:
self.buckets[bucket_index].remove_contact(node_id)
except ValueError:
self.log.error("Attempted to remove absent contact %s.", node_id)
else:
# Replace this stale contact with one from our replacement
# cache, if available.
try:
cached = self.replacement_cache[bucket_index].pop()
except KeyError:
# No replacement cache for this bucket.
pass
except IndexError:
# No cached contact for this bucket.
pass
else:
self.buckets[bucket_index].add_contact(cached)
finally:
self.log.datadump('Contacts: %s', self.buckets[bucket_index].contacts)
def touch_kbucket(self, node_id, timestamp=None):
"""
Update the "last accessed" timestamp of the KBucket which covers
the range containing the specified key in the key/ID space.
For details, see RoutingTable documentation.
"""
if timestamp is None:
timestamp = int(time.time())
bucket_index = self.kbucket_index(node_id)
self.buckets[bucket_index].last_accessed = timestamp
def kbucket_index(self, node_id):
"""
Calculate the index of the KBucket which is responsible for the
specified key (or ID).
@param key: The key for which to find the appropriate KBucket index
@type key: guid.GUIDMixin or str or unicode
@raises: KeyError: The key was no KBucket's responsibility; absent key.
RuntimeError: Many KBuckets responsible for same key;
invariants have been violated.
ValueError: The key is badly encoded.
@return: The index of the KBucket responsible for the specified key
@rtype: int
"""
if isinstance(node_id, guid.GUIDMixin):
key = node_id.guid
else:
key = node_id
# TODO: Since we are using monotonic node ID spaces,
# this *begs* to be done with binary search.
indexes = [
i
for i, bucket in enumerate(self.buckets)
if bucket.key_in_range(key)
]
if not indexes:
raise KeyError("No KBucket responsible for key %s." % key)
elif len(indexes) > 1:
raise RuntimeError(
"Many KBuckets responsible for key %s." % key
)
return indexes[0]
def split_bucket(self, old_bucket_index):
"""
Split the specified KBucket into two new buckets which together cover
the same range in the key/ID space.
@param old_bucket_index: The index of KBucket to split (in this table's
list of KBuckets)
@type old_bucket_index: int
"""
# Halve the range of the current (old) KBucket.
old_bucket = self.buckets[old_bucket_index]
split_point = (old_bucket.range_max -
(old_bucket.range_max - old_bucket.range_min) // 2)
# Create a new KBucket to cover the range split off from the old one.
new_bucket = kbucket.KBucket(
split_point, old_bucket.range_max, self.market_id
)
old_bucket.range_max = split_point
# Now, add the new bucket into the routing table tree
self.buckets.insert(old_bucket_index + 1, new_bucket)
# Finally, copy all nodes that belong to the new KBucket into it...
for contact in old_bucket.contacts:
if new_bucket.key_in_range(contact.guid):
new_bucket.add_contact(contact)
# ...and remove them from the old bucket
for contact in new_bucket.contacts:
old_bucket.remove_contact(contact)
|
|
# Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
import cookielib as cookie_lib
except ImportError:
import http.cookiejar as cookie_lib
import socket
import requests
from requests import exceptions
import six
from cinder.i18n import _
from cinder.volume.drivers.coprhd.helpers import commoncoprhdapi as common
class Authentication(common.CoprHDResource):
# Commonly used URIs for the 'Authentication' module
URI_SERVICES_BASE = ''
URI_AUTHENTICATION = '/login'
HEADERS = {'Content-Type': 'application/json',
'ACCEPT': 'application/json', 'X-EMC-REST-CLIENT': 'TRUE'}
def authenticate_user(self, username, password):
"""Makes REST API call to generate the authentication token.
Authentication token is generated for the specified user after
validation
:param username: Name of the user
:param password: Password for the user
:returns: The authtoken
"""
SEC_REDIRECT = 302
SEC_AUTHTOKEN_HEADER = 'X-SDS-AUTH-TOKEN'
LB_API_PORT = 4443
# Port on which load-balancer/reverse-proxy listens to all incoming
# requests for CoprHD REST APIs
APISVC_PORT = 8443 # Port on which apisvc listens to incoming requests
cookiejar = cookie_lib.LWPCookieJar()
url = ('https://%(ip)s:%(port)d%(uri)s' %
{'ip': self.ipaddr, 'port': self.port,
'uri': self.URI_AUTHENTICATION})
try:
if self.port == APISVC_PORT:
login_response = requests.get(
url, headers=self.HEADERS, verify=False,
auth=(username, password), cookies=cookiejar,
allow_redirects=False, timeout=common.TIMEOUT_SEC)
if login_response.status_code == SEC_REDIRECT:
location = login_response.headers['Location']
if not location:
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR, (_("The redirect"
" location of the"
" authentication"
" service is not"
" provided")))
# Make the second request
login_response = requests.get(
location, headers=self.HEADERS, verify=False,
cookies=cookiejar, allow_redirects=False,
timeout=common.TIMEOUT_SEC)
if (login_response.status_code !=
requests.codes['unauthorized']):
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR, (_("The"
" authentication"
" service failed"
" to reply with"
" 401")))
# Now provide the credentials
login_response = requests.get(
location, headers=self.HEADERS,
auth=(username, password), verify=False,
cookies=cookiejar, allow_redirects=False,
timeout=common.TIMEOUT_SEC)
if login_response.status_code != SEC_REDIRECT:
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR,
(_("Access forbidden: Authentication required")))
location = login_response.headers['Location']
if not location:
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR,
(_("The"
" authentication service failed to provide the"
" location of the service URI when redirecting"
" back")))
authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER]
if not authtoken:
details_str = self.extract_error_detail(login_response)
raise common.CoprHdError(common.CoprHdError.HTTP_ERR,
(_("The token is not"
" generated by"
" authentication service."
"%s") %
details_str))
# Make the final call to get the page with the token
new_headers = self.HEADERS
new_headers[SEC_AUTHTOKEN_HEADER] = authtoken
login_response = requests.get(
location, headers=new_headers, verify=False,
cookies=cookiejar, allow_redirects=False,
timeout=common.TIMEOUT_SEC)
if login_response.status_code != requests.codes['ok']:
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR, (_(
"Login failure code: "
"%(statuscode)s Error: %(responsetext)s") %
{'statuscode': six.text_type(
login_response.status_code),
'responsetext': login_response.text}))
elif self.port == LB_API_PORT:
login_response = requests.get(
url, headers=self.HEADERS, verify=False,
cookies=cookiejar, allow_redirects=False)
if(login_response.status_code ==
requests.codes['unauthorized']):
# Now provide the credentials
login_response = requests.get(
url, headers=self.HEADERS, auth=(username, password),
verify=False, cookies=cookiejar, allow_redirects=False)
authtoken = None
if SEC_AUTHTOKEN_HEADER in login_response.headers:
authtoken = login_response.headers[SEC_AUTHTOKEN_HEADER]
else:
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR,
(_("Incorrect port number. Load balanced port is: "
"%(lb_api_port)s, api service port is: "
"%(apisvc_port)s") %
{'lb_api_port': LB_API_PORT,
'apisvc_port': APISVC_PORT}))
if not authtoken:
details_str = self.extract_error_detail(login_response)
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR,
(_("The token is not generated by authentication service."
" %s") % details_str))
if login_response.status_code != requests.codes['ok']:
error_msg = None
if login_response.status_code == 401:
error_msg = _("Access forbidden: Authentication required")
elif login_response.status_code == 403:
error_msg = _("Access forbidden: You don't have"
" sufficient privileges to perform"
" this operation")
elif login_response.status_code == 500:
error_msg = _("Bourne internal server error")
elif login_response.status_code == 404:
error_msg = _(
"Requested resource is currently unavailable")
elif login_response.status_code == 405:
error_msg = (_("GET method is not supported by resource:"
" %s"),
url)
elif login_response.status_code == 503:
error_msg = _("Service temporarily unavailable:"
" The server is temporarily unable"
" to service your request")
else:
error_msg = login_response.text
raise common.CoprHdError(common.CoprHdError.HTTP_ERR,
(_("HTTP code: %(status_code)s"
", response: %(reason)s"
" [%(error_msg)s]") % {
'status_code': six.text_type(
login_response.status_code),
'reason': six.text_type(
login_response.reason),
'error_msg': six.text_type(
error_msg)
}))
except (exceptions.SSLError, socket.error, exceptions.ConnectionError,
exceptions.Timeout) as e:
raise common.CoprHdError(
common.CoprHdError.HTTP_ERR, six.text_type(e))
return authtoken
def extract_error_detail(self, login_response):
details_str = ""
try:
if login_response.content:
json_object = common.json_decode(login_response.content)
if 'details' in json_object:
details_str = json_object['details']
return details_str
except common.CoprHdError:
return details_str
|
|
#!/usr/bin/env python3
"""Implement configuration file parsing."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import re
from faucet import config_parser_util
from faucet.acl import ACL
from faucet.conf import test_config_condition, InvalidConfigError
from faucet.dp import DP
from faucet.meter import Meter
from faucet.port import Port
from faucet.router import Router
from faucet.vlan import VLAN
from faucet.watcher_conf import WatcherConf
V2_TOP_CONFS = (
'acls',
'dps',
'meters',
'routers',
'vlans')
def dp_parser(config_file, logname, meta_dp_state=None):
"""Parse a config file into DP configuration objects with hashes of config include/files."""
conf, _ = config_parser_util.read_config(config_file, logname)
config_hashes = None
dps = None
test_config_condition(conf is None, 'Config file is empty')
test_config_condition(
not isinstance(conf, dict),
'Config file does not have valid syntax')
version = conf.pop('version', 2)
test_config_condition(version != 2, 'Only config version 2 is supported')
config_hashes, config_contents, dps, top_conf = _config_parser_v2(
config_file, logname, meta_dp_state)
test_config_condition(dps is None, 'no DPs are not defined')
return config_hashes, config_contents, dps, top_conf
def _get_vlan_by_key(dp_id, vlan_key, vlans):
try:
if vlan_key in vlans:
return vlans[vlan_key]
except TypeError as err:
raise InvalidConfigError(err) from err
for vlan in vlans.values():
if vlan_key == vlan.vid:
return vlan
test_config_condition(not isinstance(vlan_key, int), (
'Implicitly created VLAN %s must be an int (not %s)' % (
vlan_key, type(vlan_key))))
# Create VLAN with VID, if not defined.
return vlans.setdefault(vlan_key, VLAN(vlan_key, dp_id))
def _dp_parse_port(dp_id, port_key, port_conf, vlans):
def _dp_parse_native_port_vlan():
if port.native_vlan is not None:
vlan = _get_vlan_by_key(dp_id, port.native_vlan, vlans)
port.native_vlan = vlan
def _dp_parse_tagged_port_vlans():
if port.tagged_vlans:
port_tagged_vlans = [
_get_vlan_by_key(dp_id, vlan_key, vlans) for vlan_key in port.tagged_vlans]
port.tagged_vlans = port_tagged_vlans
port = Port(port_key, dp_id, port_conf)
test_config_condition(str(port_key) not in (str(port.number), port.name), (
'Port key %s match port name or port number' % port_key))
_dp_parse_native_port_vlan()
_dp_parse_tagged_port_vlans()
return port
def _dp_add_ports(dp, dp_conf, dp_id, vlans):
ports_conf = dp_conf.get('interfaces', {})
port_ranges_conf = dp_conf.get('interface_ranges', {})
# as users can config port VLAN by using VLAN name, we store vid in
# Port instance instead of VLAN name for data consistency
test_config_condition(not isinstance(ports_conf, dict), (
'Invalid syntax in interface config'))
test_config_condition(not isinstance(port_ranges_conf, dict), (
'Invalid syntax in interface ranges config'))
def _map_port_num_to_port(ports_conf):
port_num_to_port_conf = {}
for port_key, port_conf in ports_conf.items():
test_config_condition(not isinstance(port_conf, dict), 'Invalid syntax in port config')
port_num = port_conf.get('number', port_key)
try:
port_num_to_port_conf[port_num] = (port_key, port_conf)
except TypeError as type_error:
raise InvalidConfigError('Invalid syntax in port config') from type_error
return port_num_to_port_conf
def _parse_port_ranges(port_ranges_conf, port_num_to_port_conf):
all_port_nums = set()
for port_range, port_conf in port_ranges_conf.items():
# port range format: 1-6 OR 1-6,8-9 OR 1-3,5,7-9
test_config_condition(not isinstance(port_conf, dict), 'Invalid syntax in port config')
port_nums = set()
if 'number' in port_conf:
del port_conf['number']
for range_ in re.findall(r'(\d+-\d+)', str(port_range)):
start_num, end_num = [int(num) for num in range_.split('-')]
test_config_condition(start_num >= end_num, (
'Incorrect port range (%d - %d)' % (start_num, end_num)))
port_nums.update(range(start_num, end_num + 1))
port_range = re.sub(range_, '', port_range)
other_nums = [int(p) for p in re.findall(r'\d+', str(port_range))]
port_nums.update(other_nums)
test_config_condition(
not port_nums, 'interface-ranges contain invalid config')
test_config_condition(
port_nums.intersection(all_port_nums), 'interfaces-ranges cannot overlap')
all_port_nums.update(port_nums)
for port_num in port_nums:
if port_num in port_num_to_port_conf:
# port range config has lower priority than individual port config
for attr, value in port_conf.items():
port_num_to_port_conf[port_num][1].setdefault(attr, value)
else:
port_num_to_port_conf[port_num] = (port_num, port_conf)
port_num_to_port_conf = _map_port_num_to_port(ports_conf)
_parse_port_ranges(port_ranges_conf, port_num_to_port_conf)
for port_num, port_conf in port_num_to_port_conf.values():
port = _dp_parse_port(dp_id, port_num, port_conf, vlans)
dp.add_port(port)
def _parse_acls(dp, acls_conf):
for acl_key, acl_conf in acls_conf.items():
acl = ACL(acl_key, dp.dp_id, acl_conf)
dp.add_acl(acl_key, acl)
def _parse_routers(dp, routers_conf):
for router_key, router_conf in routers_conf.items():
router = Router(router_key, dp.dp_id, router_conf)
dp.add_router(router_key, router)
def _parse_meters(dp, meters_conf):
for meter_key, meter_conf in meters_conf.items():
meter = Meter(meter_key, dp.dp_id, meter_conf)
dp.meters[meter_key] = meter
def _parse_dp(dp_key, dp_conf, acls_conf, meters_conf, routers_conf, vlans_conf):
test_config_condition(not isinstance(dp_conf, dict), 'DP config must be dict')
dp = DP(dp_key, dp_conf.get('dp_id', None), dp_conf)
test_config_condition(dp.name != dp_key, (
'DP key %s and DP name must match' % dp_key))
vlans = {}
vids = set()
for vlan_key, vlan_conf in vlans_conf.items():
vlan = VLAN(vlan_key, dp.dp_id, vlan_conf)
test_config_condition(str(vlan_key) not in (str(vlan.vid), vlan.name), (
'VLAN %s key must match VLAN name or VLAN VID' % vlan_key))
test_config_condition(not isinstance(vlan_key, (str, int)), (
'VLAN %s key must not be type %s' % (vlan_key, type(vlan_key))))
test_config_condition(vlan.vid in vids, (
'VLAN VID %u multiply configured' % vlan.vid))
vlans[vlan_key] = vlan
vids.add(vlan.vid)
_parse_acls(dp, acls_conf)
_parse_routers(dp, routers_conf)
_parse_meters(dp, meters_conf)
_dp_add_ports(dp, dp_conf, dp.dp_id, vlans)
return (dp, vlans)
def _dp_parser_v2(dps_conf, acls_conf, meters_conf,
routers_conf, vlans_conf, meta_dp_state):
# pylint: disable=invalid-name
dp_vlans = []
for dp_key, dp_conf in dps_conf.items():
try:
dp, vlans = _parse_dp(
dp_key, dp_conf, acls_conf, meters_conf, routers_conf, vlans_conf)
dp_vlans.append((dp, vlans))
except InvalidConfigError as err:
raise InvalidConfigError('DP %s: %s' % (dp_key, err)) from err
# Some VLANs are created implicitly just by referencing them in tagged/native,
# so we must make them available to all DPs.
implicit_vids = set()
for dp, vlans in dp_vlans:
implicit_vids.update(set(vlans.keys()) - set(vlans_conf.keys()))
dps = []
for dp, vlans in dp_vlans:
for vlan_key in implicit_vids:
if vlan_key not in vlans:
vlans[vlan_key] = VLAN(vlan_key, dp.dp_id)
dp.reset_refs(vlans=vlans)
dps.append(dp)
for dp in dps:
dp.finalize_config(dps)
for dp in dps:
dp.resolve_stack_topology(dps, meta_dp_state)
for dp in dps:
dp.finalize()
dpid_refs = set()
for dp in dps:
test_config_condition(dp.dp_id in dpid_refs, (
'DPID %u is duplicated' % dp.dp_id))
dpid_refs.add(dp.dp_id)
routers_referenced = set()
for dp in dps:
routers_referenced.update(dp.routers.keys())
for router in routers_conf:
test_config_condition(router not in routers_referenced, (
'router %s configured but not used by any DP' % router))
return dps
def dp_preparsed_parser(top_confs, meta_dp_state):
"""Parse a preparsed (after include files have been applied) FAUCET config."""
local_top_confs = copy.deepcopy(top_confs)
return _dp_parser_v2(
local_top_confs.get('dps', {}),
local_top_confs.get('acls', {}),
local_top_confs.get('meters', {}),
local_top_confs.get('routers', {}),
local_top_confs.get('vlans', {}),
meta_dp_state)
def _config_parser_v2(config_file, logname, meta_dp_state):
config_path = config_parser_util.dp_config_path(config_file)
top_confs = {top_conf: {} for top_conf in V2_TOP_CONFS}
config_hashes = {}
config_contents = {}
dps = None
if not config_parser_util.dp_include(
config_hashes, config_contents, config_path, logname, top_confs):
raise InvalidConfigError('Error found while loading config file: %s' % config_path)
if not top_confs['dps']:
raise InvalidConfigError('DPs not configured in file: %s' % config_path)
dps = dp_preparsed_parser(top_confs, meta_dp_state)
return (config_hashes, config_contents, dps, top_confs)
def watcher_parser(config_file, logname, prom_client):
"""Return Watcher instances from config."""
conf, _ = config_parser_util.read_config(config_file, logname)
conf_hash = config_parser_util.config_file_hash(config_file)
faucet_config_files, faucet_conf_hashes, result = _watcher_parser_v2(
conf, logname, prom_client)
return conf_hash, faucet_config_files, faucet_conf_hashes, result
def _parse_dps_for_watchers(conf, logname, meta_dp_state=None):
all_dps_list = []
faucet_conf_hashes = {}
if not isinstance(conf, dict):
raise InvalidConfigError('Gauge config not valid')
faucet_config_files = conf.get('faucet_configs', [])
for faucet_config_file in faucet_config_files:
conf_hashes, _, dp_list, _ = dp_parser(faucet_config_file, logname)
if dp_list:
faucet_conf_hashes[faucet_config_file] = conf_hashes
all_dps_list.extend(dp_list)
faucet_config = conf.get('faucet', None)
if faucet_config:
all_dps_list.extend(dp_preparsed_parser(faucet_config, meta_dp_state))
dps = {dp.name: dp for dp in all_dps_list}
if not dps:
raise InvalidConfigError(
'Gauge configured without any FAUCET configuration')
return faucet_config_files, faucet_conf_hashes, dps
def _watcher_parser_v2(conf, logname, prom_client):
logger = config_parser_util.get_logger(logname)
if conf is None:
conf = {}
faucet_config_files, faucet_conf_hashes, dps = _parse_dps_for_watchers(
conf, logname)
dbs = conf.pop('dbs')
result = []
for watcher_name, watcher_conf in conf['watchers'].items():
if watcher_conf.get('all_dps', False):
watcher_dps = dps.keys()
else:
watcher_dps = watcher_conf['dps']
# Watcher config has a list of DPs, but actually a WatcherConf is
# created for each DP.
# TODO: refactor watcher_conf as a container.
for dp_name in watcher_dps:
if dp_name not in dps:
logger.error('DP %s in Gauge but not configured in FAUCET', dp_name)
continue
dp = dps[dp_name]
if 'dbs' in watcher_conf:
watcher_dbs = watcher_conf['dbs']
elif 'db' in watcher_conf:
watcher_dbs = [watcher_conf['db']]
else:
raise InvalidConfigError('Watcher configured without DB')
for db in watcher_dbs:
watcher = WatcherConf(watcher_name, dp.dp_id, watcher_conf, prom_client)
watcher.add_db(dbs[db])
watcher.add_dp(dp)
result.append(watcher)
return faucet_config_files, faucet_conf_hashes, result
def get_config_for_api(valves):
"""Return config as dict for all DPs."""
config = {i: {} for i in V2_TOP_CONFS}
for valve in valves.values():
valve_conf = valve.get_config_dict()
for i in V2_TOP_CONFS:
if i in valve_conf:
config[i].update(valve_conf[i]) # pytype: disable=attribute-error
return config
|
|
# -*- coding: utf-8 -*-
import mock
import pytest
import urlparse
from django.db import connection, transaction
from django.test import TransactionTestCase
from django.test.utils import CaptureQueriesContext
from osf.models import QuickFilesNode
from website import util as website_utils
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
CollectionFactory,
ProjectFactory,
)
from website.util.sanitize import strip_html
from website.views import find_bookmark_collection
@pytest.mark.django_db
class TestUserDetail:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory()
user_one.social['twitter'] = 'rheisendennis'
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
def test_get(self, app, user_one, user_two):
# test_gets_200
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_get_correct_pk_user
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
user_json = res.json['data']
assert user_json['attributes']['full_name'] == user_one.fullname
assert user_one.social['twitter'] in user_json['attributes']['social']['twitter']
# test_get_incorrect_pk_user_logged_in
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url)
user_json = res.json['data']
assert user_json['attributes']['full_name'] != user_one.fullname
# test_returns_timezone_and_locale
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
attributes = res.json['data']['attributes']
assert attributes['timezone'] == user_one.timezone
assert attributes['locale'] == user_one.locale
# test_get_new_users
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url)
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == user_two.fullname
assert res.json['data']['attributes']['social'] == {}
# test_get_incorrect_pk_user_not_logged_in
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url, auth=user_one.auth)
user_json = res.json['data']
assert user_json['attributes']['full_name'] != user_one.fullname
assert user_json['attributes']['full_name'] == user_two.fullname
# test_user_detail_takes_profile_image_size_param
size = 42
url = '/{}users/{}/?profile_image_size={}'.format(API_BASE, user_one._id, size)
res = app.get(url)
user_json = res.json['data']
profile_image_url = user_json['links']['profile_image']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert int(query_dict.get('s')[0]) == size
# test_profile_image_in_links
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
user_json = res.json['data']
assert 'profile_image' in user_json['links']
def test_files_relationship_upload(self, app, user_one):
url = "/{}users/{}/".format(API_BASE, user_one._id)
res = app.get(url, auth=user_one)
quickfiles = QuickFilesNode.objects.get(creator=user_one)
user_json = res.json['data']
upload_url = user_json['relationships']['quickfiles']['links']['upload']['href']
waterbutler_upload = website_utils.waterbutler_api_url_for(quickfiles._id, 'osfstorage')
assert upload_url == waterbutler_upload
def test_preprint_relationship(self, app, user_one):
url = "/{}users/{}/".format(API_BASE, user_one._id)
preprint_url = "/{}users/{}/preprints/".format(API_BASE, user_one._id)
res = app.get(url, auth=user_one)
user_json = res.json['data']
href_url = user_json['relationships']['preprints']['links']['related']['href']
assert preprint_url in href_url
def test_registrations_relationship(self, app, user_one):
url = "/{}users/{}/".format(API_BASE, user_one._id)
registration_url = "/{}users/{}/registrations/".format(API_BASE, user_one._id)
res = app.get(url, auth=user_one)
user_json = res.json['data']
href_url = user_json['relationships']['registrations']['links']['related']['href']
assert registration_url in href_url
def test_nodes_relationship_is_absent(self, app, user_one):
url = "/{}users/{}/".format(API_BASE, user_one._id)
res = app.get(url, auth=user_one)
assert 'node' not in res.json['data']['relationships'].keys()
# Regression test for https://openscience.atlassian.net/browse/OSF-8966
def test_browsable_api_for_user_detail(self, app, user_one):
url = "/{}users/{}/?format=api".format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
@pytest.mark.django_db
class TestUserRoutesNodeRoutes:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory()
user_one.social['twitter'] = 'rheisendennis'
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def project_public_user_one(self, user_one):
return ProjectFactory(title='Public Project User One', is_public=True, creator=user_one)
@pytest.fixture()
def project_private_user_one(self, user_one):
return ProjectFactory(title='Private Project User One', is_public=False, creator=user_one)
@pytest.fixture()
def project_deleted_user_one(self, user_one):
return CollectionFactory(title='Deleted Project User One', is_public=False, creator=user_one, is_deleted=True)
@pytest.fixture()
def project_public_user_two(self, user_two):
return ProjectFactory(title='Public Project User Two', is_public=True, creator=user_two)
@pytest.fixture()
def project_private_user_two(self, user_two):
return ProjectFactory(title='Private Project User Two', is_public=False, creator=user_two)
@pytest.fixture()
def folder(self):
return CollectionFactory()
@pytest.fixture()
def folder_deleted(self, user_one):
return CollectionFactory(title='Deleted Folder User One', is_public=False, creator=user_one, is_deleted=True)
@pytest.fixture()
def bookmark_collection(self, user_one):
return find_bookmark_collection(user_one)
def test_get_200_responses(self, app, user_one, user_two, project_public_user_one, project_public_user_two, project_private_user_one, project_private_user_two, project_deleted_user_one, folder, folder_deleted, bookmark_collection):
# test_get_200_path_users_me_userone_logged_in
url = '/{}users/me/'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
# test_get_200_path_users_me_usertwo_logged_in
url = '/{}users/me/'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
assert res.status_code == 200
# test_get_200_path_users_user_id_user_logged_in
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
# test_get_200_path_users_user_id_no_user
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url)
assert res.status_code == 200
# test_get_200_path_users_user_id_unauthorized_user
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['id'] == user_two._id
# test_get_200_path_users_me_nodes_user_logged_in
url = '/{}users/me/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_200_path_users_user_id_nodes_user_logged_in
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_200_path_users_user_id_nodes_no_user
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
# an anonymous/unauthorized user can only see the public projects user_one contributes to.
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id not in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_200_path_users_user_id_nodes_unauthorized_user
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth)
assert res.status_code == 200
# an anonymous/unauthorized user can only see the public projects user_one contributes to.
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id not in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
def test_get_400_responses(self, app, user_one, user_two):
# test_get_403_path_users_me_nodes_no_user
# TODO: change expected exception from 403 to 401 for unauthorized users
url = '/{}users/me/nodes/'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# test_get_403_path_users_me_no_user
# TODO: change expected exception from 403 to 401 for unauthorized users
url = '/{}users/me/'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# test_get_404_path_users_user_id_me_user_logged_in
url = '/{}users/{}/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_me_no_user
url = '/{}users/{}/me/'.format(API_BASE, user_one._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_me_unauthorized_user
url = '/{}users/{}/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_nodes_me_user_logged_in
url = '/{}users/{}/nodes/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_nodes_me_unauthorized_user
url = '/{}users/{}/nodes/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_nodes_me_no_user
url = '/{}users/{}/nodes/me/'.format(API_BASE, user_one._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_me_user_logged_in
url = '/{}nodes/me/'.format(API_BASE)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_me_no_user
url = '/{}nodes/me/'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_user_id_user_logged_in
url = '/{}nodes/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_user_id_unauthorized_user
url = '/{}nodes/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_user_id_no_user
url = '/{}nodes/{}/'.format(API_BASE, user_one._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestUserUpdate:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory.build(
fullname='Martin Luther King Jr.',
given_name='Martin',
family_name='King',
suffix='Jr.',
social=dict(
github='userOneGithub',
scholar='userOneScholar',
profileWebsites=['http://www.useronepersonalwebsite.com'],
twitter='userOneTwitter',
linkedIn='userOneLinkedIn',
impactStory='userOneImpactStory',
orcid='userOneOrcid',
researcherId='userOneResearcherId'
)
)
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def url_user_one(self, user_one):
return '/v2/users/{}/'.format(user_one._id)
@pytest.fixture()
def data_new_user_one(self, user_one):
return {
'data': {
'type': 'users',
'id': user_one._id,
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'given_name': 'Malcolm',
'middle_names': 'Malik el-Shabazz',
'family_name': 'X',
'suffix': 'Sr.',
'social': {
'github': ['http://github.com/even_newer_github/'],
'scholar': ['http://scholar.google.com/citations?user=newScholar'],
'profileWebsites': ['http://www.newpersonalwebsite.com'],
'twitter': ['http://twitter.com/newtwitter'],
'linkedIn': ['https://www.linkedin.com/newLinkedIn'],
'impactStory': ['https://impactstory.org/newImpactStory'],
'orcid': ['http://orcid.org/newOrcid'],
'researcherId': ['http://researcherid.com/rid/newResearcherId'],
}
},
}
}
@pytest.fixture()
def data_missing_id(self):
return {
'data': {
'type': 'users',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_missing_type(self, user_one):
return {
'data': {
'id': user_one._id,
'attributes': {
'fullname': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_incorrect_id(self):
return {
'data': {
'id': '12345',
'type': 'users',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_incorrect_type(self, user_one):
return {
'data': {
'id': user_one._id,
'type': 'Wrong type.',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_blank_but_not_empty_full_name(self, user_one):
return {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': ' '
}
}
}
def test_select_for_update(self, app, user_one, url_user_one, data_new_user_one):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
for_update_sql = connection.ops.for_update_sql()
assert any(for_update_sql in query['sql'] for query in ctx.captured_queries)
@mock.patch('osf.utils.requests.settings.SELECT_FOR_UPDATE_ENABLED', False)
def test_select_for_update_disabled(self, app, user_one, url_user_one, data_new_user_one):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
for_update_sql = connection.ops.for_update_sql()
assert not any(for_update_sql in query['sql'] for query in ctx.captured_queries)
def test_update_patch_errors(self, app, user_one, user_two, data_new_user_one, data_incorrect_type, data_incorrect_id, data_missing_type, data_missing_id, data_blank_but_not_empty_full_name, url_user_one):
# test_update_user_blank_but_not_empty_full_name
res = app.put_json_api(url_user_one, data_blank_but_not_empty_full_name, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
# test_partial_update_user_blank_but_not_empty_full_name
res = app.patch_json_api(url_user_one, data_blank_but_not_empty_full_name, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
# test_patch_user_incorrect_type
res = app.put_json_api(url_user_one, data_incorrect_type, auth=user_one.auth, expect_errors=True)
assert res.status_code == 409
# test_patch_user_incorrect_id
res = app.put_json_api(url_user_one, data_incorrect_id, auth=user_one.auth, expect_errors=True)
assert res.status_code == 409
# test_patch_user_no_type
res = app.put_json_api(url_user_one, data_missing_type, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
# test_patch_user_no_id
res = app.put_json_api(url_user_one, data_missing_id, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
# test_partial_patch_user_incorrect_type
res = app.patch_json_api(url_user_one, data_incorrect_type, auth=user_one.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_patch_user_incorrect_id
res = app.patch_json_api(url_user_one, data_incorrect_id, auth=user_one.auth, expect_errors=True)
assert res.status_code == 409
# test_partial_patch_user_no_type
res = app.patch_json_api(url_user_one, data_missing_type, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
# test_partial_patch_user_no_id
res = app.patch_json_api(url_user_one, data_missing_id, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
# test_patch_fields_not_nested
res = app.put_json_api(url_user_one, {'data': {'id': user_one._id, 'type': 'users', 'full_name': 'New name'}}, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data/attributes.'
# test_partial_patch_fields_not_nested
res = app.patch_json_api(url_user_one, {'data': {'id': user_one._id, 'type': 'users', 'full_name': 'New name'}}, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
# test_patch_user_logged_out
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': data_new_user_one['data']['attributes']['full_name'],
}
}
}, expect_errors=True)
assert res.status_code == 401
# test_put_user_without_required_field
# PUT requires all required fields
res = app.put_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
# test_put_user_logged_out
res = app.put_json_api(url_user_one, data_new_user_one, expect_errors=True)
assert res.status_code == 401
# test_put_wrong_user
# User tries to update someone else's user information via put
res = app.put_json_api(url_user_one, data_new_user_one, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
# test_patch_wrong_user
# User tries to update someone else's user information via patch
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': data_new_user_one['data']['attributes']['full_name'],
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
user_one.reload()
assert user_one.fullname != data_new_user_one['data']['attributes']['full_name']
# test_update_user_social_with_invalid_value
"""update the social key which is not profileWebsites with more than one value should throw an error"""
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github', 'bad_github'],
}
},
}
}, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert 'github only accept a list of one single value' == res.json['errors'][0]['detail']
def test_patch_user_without_required_field(self, app, user_one, data_new_user_one, url_user_one):
# PATCH does not require required fields
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
user_one.reload()
assert user_one.family_name == data_new_user_one['data']['attributes']['family_name']
def test_partial_patch_user_logged_in(self, app, user_one, url_user_one):
# Test to make sure new fields are patched and old fields stay the same
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github'],
}
},
}}, auth=user_one.auth)
user_one.reload()
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == 'new_fullname'
assert res.json['data']['attributes']['suffix'] == 'The Millionth'
social = res.json['data']['attributes']['social']
assert 'even_newer_github' in social['github'][0]
assert res.json['data']['attributes']['given_name'] == user_one.given_name
assert res.json['data']['attributes']['middle_names'] == user_one.middle_names
assert res.json['data']['attributes']['family_name'] == user_one.family_name
assert user_one.social['profileWebsites'] == social['profileWebsites']
assert user_one.social['twitter'] in social['twitter'][0]
assert user_one.social['linkedIn'] in social['linkedIn'][0]
assert user_one.social['impactStory'] in social['impactStory'][0]
assert user_one.social['orcid'] in social['orcid'][0]
assert user_one.social['researcherId'] in social['researcherId'][0]
assert user_one.fullname == 'new_fullname'
assert user_one.suffix == 'The Millionth'
assert user_one.social['github'] == 'even_newer_github'
def test_partial_patch_user_logged_in_no_social_fields(self, app, user_one, url_user_one):
# Test to make sure new fields are patched and old fields stay the same
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github'],
}
},
}
}, auth=user_one.auth)
user_one.reload()
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == 'new_fullname'
assert res.json['data']['attributes']['suffix'] == 'The Millionth'
social = res.json['data']['attributes']['social']
assert user_one.social['github'] in social['github'][0]
assert res.json['data']['attributes']['given_name'] == user_one.given_name
assert res.json['data']['attributes']['middle_names'] == user_one.middle_names
assert res.json['data']['attributes']['family_name'] == user_one.family_name
assert user_one.social['profileWebsites'] == social['profileWebsites']
assert user_one.social['twitter'] in social['twitter'][0]
assert user_one.social['linkedIn'] in social['linkedIn'][0]
assert user_one.social['impactStory'] in social['impactStory'][0]
assert user_one.social['orcid'] in social['orcid'][0]
assert user_one.social['researcherId'] in social['researcherId'][0]
assert user_one.fullname == 'new_fullname'
assert user_one.suffix == 'The Millionth'
assert user_one.social['github'] == user_one.social['github']
def test_partial_put_user_logged_in(self, app, user_one, url_user_one):
# Test to make sure new fields are patched and old fields stay the same
res = app.put_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github'],
}
},
}
}, auth=user_one.auth)
user_one.reload()
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == 'new_fullname'
assert res.json['data']['attributes']['suffix'] == 'The Millionth'
assert 'even_newer_github' in res.json['data']['attributes']['social']['github'][0]
assert res.json['data']['attributes']['given_name'] == user_one.given_name
assert res.json['data']['attributes']['middle_names'] == user_one.middle_names
assert res.json['data']['attributes']['family_name'] == user_one.family_name
assert user_one.fullname == 'new_fullname'
assert user_one.suffix == 'The Millionth'
assert user_one.social['github'] == 'even_newer_github'
def test_put_user_logged_in(self, app, user_one, data_new_user_one, url_user_one):
# Logged in user updates their user information via put
res = app.put_json_api(url_user_one, data_new_user_one, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == data_new_user_one['data']['attributes']['full_name']
assert res.json['data']['attributes']['given_name'] == data_new_user_one['data']['attributes']['given_name']
assert res.json['data']['attributes']['middle_names'] == data_new_user_one['data']['attributes']['middle_names']
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
assert res.json['data']['attributes']['suffix'] == data_new_user_one['data']['attributes']['suffix']
social = res.json['data']['attributes']['social']
assert 'even_newer_github' in social['github'][0]
assert 'http://www.newpersonalwebsite.com' in social['profileWebsites'][0]
assert 'newtwitter' in social['twitter'][0]
assert 'newLinkedIn' in social['linkedIn'][0]
assert 'newImpactStory' in social['impactStory'][0]
assert 'newOrcid' in social['orcid'][0]
assert 'newResearcherId' in social['researcherId'][0]
user_one.reload()
assert user_one.fullname == data_new_user_one['data']['attributes']['full_name']
assert user_one.given_name == data_new_user_one['data']['attributes']['given_name']
assert user_one.middle_names == data_new_user_one['data']['attributes']['middle_names']
assert user_one.family_name == data_new_user_one['data']['attributes']['family_name']
assert user_one.suffix == data_new_user_one['data']['attributes']['suffix']
assert 'even_newer_github' in social['github'][0]
assert 'http://www.newpersonalwebsite.com' in social['profileWebsites'][0]
assert 'newtwitter' in social['twitter'][0]
assert 'newLinkedIn' in social['linkedIn'][0]
assert 'newImpactStory' in social['impactStory'][0]
assert 'newOrcid' in social['orcid'][0]
assert 'newResearcherId' in social['researcherId'][0]
def test_update_user_sanitizes_html_properly(self, app, user_one, url_user_one):
"""Post request should update resource, and any HTML in fields should be stripped"""
bad_fullname = 'Malcolm <strong>X</strong>'
bad_family_name = 'X <script>alert("is")</script> a cool name'
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': bad_fullname,
'family_name': bad_family_name,
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == strip_html(bad_fullname)
assert res.json['data']['attributes']['family_name'] == strip_html(bad_family_name)
@pytest.mark.django_db
class TestDeactivatedUser:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
def test_requesting_as_deactivated_user_returns_400_response(self, app, user_one):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 200
user_one.is_disabled = True
user_one.save()
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Making API requests with credentials associated with a deactivated account is not allowed.'
def test_unconfirmed_users_return_entire_user_object(self, app, user_one, user_two):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 200
user_one.is_registered = False
user_one.save()
res = app.get(url, expect_errors=True)
assert res.status_code == 200
attr = res.json['data']['attributes']
assert attr['active'] is False
assert res.json['data']['id'] == user_one._id
def test_requesting_deactivated_user_returns_410_response_and_meta_info(self, app, user_one, user_two):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 200
user_one.is_disabled = True
user_one.save()
res = app.get(url, expect_errors=True)
assert res.status_code == 410
assert res.json['errors'][0]['meta']['family_name'] == user_one.family_name
assert res.json['errors'][0]['meta']['given_name'] == user_one.given_name
assert res.json['errors'][0]['meta']['middle_names'] == user_one.middle_names
assert res.json['errors'][0]['meta']['full_name'] == user_one.fullname
assert urlparse.urlparse(res.json['errors'][0]['meta']['profile_image']).netloc == 'secure.gravatar.com'
assert res.json['errors'][0]['detail'] == 'The requested user is no longer available.'
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vispy: gallery 30
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import argparse
import numpy as np
from vispy import gloo
from vispy import app
from vispy.util.transforms import perspective, translate, rotate
# Manual galaxy creation
#imported argparse to add statement below
parser = argparse.ArgumentParser()
parser.add_argument("num_arms", type=int)
parser.add_argument('--debug', '-d', action='store_true')
args = parser.parse_args()
if args.debug:
DEBUG = True
else:
DEBUG = False
if args.num_arms:
num_arms= int(args.num_arms)
else:
num_arms=3
def make_arm(n, angle):
R = np.linspace(10, 450 + 50 * np.random.uniform(.5, 1.), n)
R += 40 * np.random.normal(0, 2., n) * np.linspace(1, .1, n)
#printed variables
if DEBUG:
print("The value of variable R is {0}".format(R))
T = angle + np.linspace(0, 2.5 * np.pi, n) + \
np.pi / 6 * np.random.normal(0, .5, n)
if DEBUG:
print("The value of variable T is {0}".format(T))
S = 8 + 2 * np.abs(np.random.normal(0, 1, n))
S *= np.linspace(1, .85, n)
if DEBUG:
print("The value of variable S is {0}".format(S))
P = np.zeros((n, num_arms), dtype=np.float32)
if DEBUG:
print("The value of variable P is {0}".format(P))
X, Y, Z = P[:, 0], P[:, 1], P[:, 2]
X[...] = R * np.cos(T)
if DEBUG:
print("The value of variable X is {0}".format(X))
Y[...] = R * np.sin(T) * 1.1
if DEBUG:
print("The value of variable Y is {0}".format(Y))
D = np.sqrt(X * X + Y * Y)
if DEBUG:
print("Th e value of variable D is {0}".format(D))
Z[...] = 8 * np.random.normal(0, 2 - D / 512., n)
if DEBUG:
print("The value of variable Z is {0}".format(Z))
X += (D * np.random.uniform(0, 1, n) > 250) * \
(.05 * D * np.random.uniform(-1, 1, n))
Y += (D * np.random.uniform(0, 1, n) > 250) * \
(.05 * D * np.random.uniform(-1, 1, n))
Z += (D * np.random.uniform(0, 1, n) > 250) * \
(.05 * D * np.random.uniform(-1, 1, n))
D = (D - D.min()) / (D.max() - D.min())
return P / 256, S / 2, D
p = 1000
n = num_arms * p
# Very simple colormap
cmap = np.array([[255, 124, 0], [255, 163, 76],
[255, 192, 130], [255, 214, 173],
[255, 232, 212], [246, 238, 237],
[237, 240, 253], [217, 228, 255],
[202, 219, 255], [191, 212, 255],
[182, 206, 255], [174, 202, 255],
[168, 198, 255], [162, 195, 255],
[158, 192, 255], [155, 189, 255],
[151, 187, 255], [148, 185, 255],
[145, 183, 255], [143, 182, 255],
[141, 181, 255], [140, 179, 255],
[139, 179, 255],
[137, 177, 255]], dtype=np.uint8).reshape(1, 24, 3)
VERT_SHADER = """
#version 120
// Uniforms
// ------------------------------------
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform float u_size;
// Attributes
// ------------------------------------
attribute vec3 a_position;
attribute float a_size;
attribute float a_dist;
// Varyings
// ------------------------------------
varying float v_size;
varying float v_dist;
void main (void) {
v_size = a_size*u_size*.75;
v_dist = a_dist;
gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0);
gl_PointSize = v_size;
}
"""
FRAG_SHADER = """
#version 120
// Uniforms
// ------------------------------------
uniform sampler2D u_colormap;
// Varyings
// ------------------------------------
varying float v_size;
varying float v_dist;
// Main
// ------------------------------------
void main()
{
float a = 2*(length(gl_PointCoord.xy - vec2(0.5,0.5)) / sqrt(2.0));
vec3 color = texture2D(u_colormap, vec2(v_dist,.5)).rgb;
gl_FragColor = vec4(color,(1-a)*.25);
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, keys='interactive', size=(800, 600))
ps = self.pixel_scale
self.title = "Deysha's Galaxy"
data = np.zeros(n, [('a_position', np.float32, num_arms),
('a_size', np.float32, 1),
('a_dist', np.float32, 1)])
#changed 3 to num_arms so the galaxy has more arms.
for i in range(num_arms):
P, S, D = make_arm(p, i * 2 * np.pi / num_arms)
data['a_dist'][(i + 0) * p:(i + 1) * p] = D
data['a_position'][(i + 0) * p:(i + 1) * p] = P
data['a_size'][(i + 0) * p:(i + 1) * p] = S*ps
self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
self.model = np.eye(4, dtype=np.float32)
self.projection = np.eye(4, dtype=np.float32)
self.theta, self.phi = 0, 0
self.translate = 5
self.view = translate((0, 0, -self.translate))
self.program.bind(gloo.VertexBuffer(data))
self.program['u_colormap'] = gloo.Texture2D(cmap)
self.program['u_size'] = 5. / self.translate
self.program['u_model'] = self.model
self.program['u_view'] = self.view
self.apply_zoom()
gloo.set_state(depth_test=False, blend=True,
blend_func=('src_alpha', 'one'), clear_color='black')
# Start the timer upon initialization.
self.timer = app.Timer('auto', connect=self.on_timer)
self.timer.start()
self.show()
def on_key_press(self, event):
if event.text == ' ':
if self.timer.running:
self.timer.stop()
else:
self.timer.start()
def on_timer(self, event):
self.theta += .11
self.phi += .13
self.model = np.dot(rotate(self.theta, (0, 0, 1)),
rotate(self.phi, (0, 1, 0)))
self.program['u_model'] = self.model
self.update()
def on_resize(self, event):
self.apply_zoom()
def on_mouse_wheel(self, event):
self.translate -= event.delta[1]
self.translate = max(2, self.translate)
self.view = translate((0, 0, -self.translate))
self.program['u_view'] = self.view
self.program['u_size'] = 5 / self.translate
self.update()
def on_draw(self, event):
gloo.clear()
self.program.draw('points')
def apply_zoom(self):
gloo.set_viewport(0, 0, self.physical_size[0], self.physical_size[1])
self.projection = perspective(45.0, self.size[0] /
float(self.size[1]), 1.0, 1000.0)
self.program['u_projection'] = self.projection
if __name__ == '__main__':
c = Canvas()
app.run()
|
|
"""Generate and tweet GIFs based on SDO imagery."""
import datetime
import json
import logging
import logging.handlers
import os
import tempfile
from time import sleep
import shutil
import subprocess
import time
import urllib.parse
import backoff
import click
import lxml.html
from PIL import Image
import requests
import twython
# =======
# Globals
# =======
logger = logging.getLogger(__name__)
start_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
SDO_URL_TEMPLATE = ("http://sdo.gsfc.nasa.gov/assets/img/browse/"
"{year:04d}/{month:02d}/{day:02d}/")
DEST_FILENAME_TEMPLATE = "{year:04d}_{month:02d}_{day:02d}_{hour:02d}.gif"
DELETION_LIMIT = 24 * 60 * 60
# ======================
# CLI callback functions
# ======================
def process_keyfile(ctx, param, value):
"""Read keyfile and load JSON."""
if value is not None:
try:
auth_info = json.load(value)['twitter']
except:
click.echo('A valid JSON keyfile is required!')
raise
return auth_info
else:
return value
def validate_dirs(ctx, param, value):
"""Confirm that the work directory has the right subdirectories."""
if value is not None:
originals = os.path.join(value, 'originals')
gifs = os.path.join(value, 'gifs')
if not all([os.path.isdir(originals), os.path.isdir(gifs)]):
click.echo("Error: working directory requires "
"'originals' and 'gifs' subdirectories to exist!")
ctx.exit(1)
return value
def select_level(ctx, param, value):
"""Select logging level from accepted options."""
return {'debug': logging.DEBUG, 'info': logging.INFO}[value]
def oauth_dance(ctx, param, value):
"""Set up OAuth."""
if not value or ctx.resilient_parsing:
return
# set up
try:
auth_info = ctx.params['auth_info']
except KeyError:
click.echo("Error: --keyfile option is required to request access")
ctx.exit(1)
pre_auth_twitter = twython.Twython(auth_info['consumer_key'],
auth_info['consumer_secret'])
twitter_auth = pre_auth_twitter.get_authentication_tokens()
# prompt user to go to web and get verifier code
click.echo("Open: {}".format(twitter_auth['auth_url']))
verifier = click.prompt("Please enter the code provided by Twitter")
post_auth_twitter = twython.Twython(auth_info['consumer_key'],
auth_info['consumer_secret'],
twitter_auth['oauth_token'],
twitter_auth['oauth_token_secret'])
access_info = post_auth_twitter.get_authorized_tokens(verifier)
click.echo("")
click.echo("Access key: {}".format(access_info['oauth_token']))
click.echo("Access secret: {}".format(access_info['oauth_token_secret']))
ctx.exit()
# ======================
# Command-line interface
# ======================
@click.command(help=__doc__)
@click.argument('work_dir', required=True, callback=validate_dirs,
type=click.Path(exists=True, file_okay=False, dir_okay=True,
writable=True, readable=True,
resolve_path=True))
@click.option('--tweet/--no-tweet', default=True,
help='Generate a GIF and tweet or skip tweeting.')
@click.option('--keyfile', 'auth_info', type=click.File('r'), required=True,
callback=process_keyfile,
help='JSON file with Twitter keys and secrets.')
@click.option('--logfile', type=click.Path(writable=True), default=None)
@click.option('--loglevel', type=click.Choice(['debug', 'info']),
callback=select_level, default=None)
@click.option('--request-access', default=False, is_flag=True,
callback=oauth_dance, expose_value=False,
help='Request access key and secret.')
def cli(work_dir, tweet, auth_info, logfile, loglevel):
configure_logging(logfile, loglevel)
logger.debug("Command-line interface proccessed")
with open(make_sun_gif(work_dir), 'rb') as fp:
if not tweet:
logger.warn("--no-tweet option selected, not tweeting")
return
twitter = twython.Twython(auth_info['consumer_key'],
auth_info['consumer_secret'],
auth_info['access_key'],
auth_info['access_secret'])
attempts = 0
limit = 3
while True:
try:
attempts += 1
logger.debug("Tweeting (attempt %d of %d)", attempts, limit)
media_id = twitter.upload_media(media=fp)[u'media_id']
tweet_response = twitter.update_status(media_ids=[media_id])
logger.info("Tweeted http://twitter.com/starnearyou/status/%s",
tweet_response[u'id_str'])
clean_up(work_dir)
return
except twython.exceptions.TwythonError as err:
logger.exception("Tweeting failed: %r", err)
if attempts < limit:
time.sleep(1)
continue
else:
logger.critical("Tweeting failed %s times, aborting.",
attempts)
break
# =====================
# Logging configuration
# =====================
def configure_logging(filename=None, level=logging.INFO):
"""Configure logging.
The console will always print logs at the WARNING level, but uses INFO by
default. Log files will only be created if selected."""
logger.setLevel(min([logging.WARNING, level]))
# log to screen
console = logging.StreamHandler()
console_formatter = logging.Formatter("%(levelname)s - %(message)s")
console.setFormatter(console_formatter)
console.setLevel(logging.WARNING)
logger.addHandler(console)
logging.getLogger('backoff').addHandler(console)
logging.getLogger('backoff').setLevel(logging.INFO)
# log to file
if filename is not None:
logfile = logging.handlers.RotatingFileHandler(filename,
maxBytes=5 * 10 ** 6,
backupCount=4)
file_fmt = "%(asctime)s - %(levelname)s - %(message)s"
file_formatter = logging.Formatter(file_fmt)
logfile.setFormatter(file_formatter)
logfile.setLevel(level)
logger.addHandler(logfile)
logging.getLogger('backoff').addHandler(logfile)
# =======================
# GIF generating pipeline
# =======================
def make_sun_gif(work_dir):
"""Fetch and make the latest Sun GIF in `work_dir`."""
download_dir = os.path.join(work_dir, 'originals')
gifs_dir = os.path.join(work_dir, 'gifs')
urls = list(frame_urls())
downloaded_filenames = (download_frame(url, download_dir) for url in urls)
processed_images = (process_image(fname) for fname in downloaded_filenames)
try:
temp_dir = tempfile.mkdtemp()
temp_files = []
for image, url in zip(processed_images, urls):
temp_file = os.path.join(temp_dir, split_url(url))
image.save(temp_file)
temp_files.append(temp_file)
logger.info("%s frames processed", len(temp_files))
dest_filename = DEST_FILENAME_TEMPLATE.format(year=start_time.year,
month=start_time.month,
day=start_time.day,
hour=start_time.hour)
original_filename = os.path.join(temp_dir, dest_filename)
final_filename = os.path.join(gifs_dir, dest_filename)
convert_to_gif(temp_files, original_filename)
optimize_gif(original_filename, final_filename)
logger.info("Final GIF saved: %s", final_filename)
finally:
logger.debug("Cleaning up temporary files")
shutil.rmtree(temp_dir)
return final_filename
# ==============
# Image fetching
# ==============
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
max_tries=8)
def frame_urls(limit=32):
"""Yield the URLs of frames."""
sdo_url = SDO_URL_TEMPLATE.format(year=start_time.year,
month=start_time.month,
day=start_time.day,
hour=start_time.hour)
logger.info("Fetching frames index: %s", sdo_url)
response = requests.get(sdo_url, stream=True, timeout=5 * 60)
response.raw.decode_content = True
logger.debug("Frames index reponse: %s", response.status_code)
sdo_index = lxml.html.parse(response.raw, base_url=sdo_url).getroot()
sdo_index.make_links_absolute(sdo_url)
link_tags = sdo_index.xpath("//a[contains(@href, '_1024_0193.jpg')]")
logger.info("%s frame URLs found (limit: %s)", len(link_tags), limit)
for link in link_tags[-1 * limit:]:
yield link.get('href')
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
max_tries=8)
def download_frame(url, download_dir):
"""Download the URL to a given directory, if it doesn't already exist."""
filename = os.path.join(download_dir, split_url(url))
try:
with open(filename) as fp:
logger.debug("Skipping frame: %s", url)
logger.debug("File already exists: %s", filename)
logger.debug("Using existing frame: %s", url)
except IOError:
logger.debug("File does not exist: %s", filename)
logger.debug("Downloading frame: %s", url)
sleep(.250) # rate limit
response = requests.get(url, stream=True)
response.raw.decode_content = True
with open(filename, 'wb') as fp:
shutil.copyfileobj(response.raw, fp)
logger.debug("Frame saved: %s", filename)
logger.debug("Downloaded and saved: %s", url)
return filename
# ================
# Image processing
# ================
def process_image(filename):
"""Crop, rotate, and resize the image."""
logger.debug("Cropping, rotating, and resizing %s", filename)
with open(filename, 'rb') as fp:
image = Image.open(fp)
origin = 0
width = 1024
height = 1024
assert image.size == (width, height)
crop_box = (
origin, # left
origin + 72, # top, except the first 72 pixels
width - (width / 2), # right, except second half
height - 72, # bottom, except the last 72 pixels
)
image = image.crop(crop_box)
# rotate for a funkier presentation, since GIFs get too big with the
# full disk
image = image.rotate(-90, Image.NEAREST, expand=True)
# cut it down to near 440 x 220, which is optimal-ish for the Twitter
# timeline
# also, thumbnail works in place, rather than making a copy, for some
# reason
image.thumbnail((image.size[0] / 2, image.size[1] / 2), Image.LANCZOS)
logger.debug("Cropped, rotated, and resized %s", filename)
return image
# ==============================
# Generating and optimizing GIFs
# ==============================
def convert_to_gif(frame_filenames, dest_filename):
"""Convert `frame_filenames` to an animated gif at path `dest_filename`."""
logger.info("Converting %s frames to GIF", len(frame_filenames))
convert_cmd = ['convert', '-delay', '15'] + \
[f for f in frame_filenames] + \
[dest_filename]
subprocess.call(convert_cmd)
logger.debug("Preliminary GIF saved: %s", dest_filename)
def optimize_gif(source, dest):
"""Shrink GIF size."""
logger.debug("Optimizing file size of %s", source)
optimize_cmd = 'gifsicle --colors 256 --optimize=02 {0} > {1}'
subprocess.call(optimize_cmd.format(source, dest), shell=True)
logger.debug("Optimized GIF saved: %s", dest)
def clean_up(work_dir):
logger.debug("Cleaning up downloaded files")
download_dir = os.path.join(work_dir, 'originals')
deletion_count = 0
for dirpath, dirnames, filenames in os.walk(download_dir):
for filename in filenames:
fpath = os.path.join(dirpath, filename)
if is_file_too_old(fpath):
logger.debug('Deleting %s', fpath)
deletion_count += 1
os.remove(fpath)
logger.info('Deleted %s files', deletion_count)
# =========
# Utilities
# =========
def split_url(url):
"""Get the filename portion of a URL."""
return os.path.basename(urllib.parse.urlparse(url).path)
def is_file_too_old(fpath):
"""Return whether the file is older than the `DELETION_LIMIT` threshhold in
seconds.
"""
mtime = os.path.getmtime(fpath)
return (time.time() - mtime) > DELETION_LIMIT
if __name__ == '__main__':
cli()
|
|
# Copyright (c) Hynek Schlawack, Richard Wall
# See LICENSE for details.
from __future__ import absolute_import, division, print_function
import getdns
from twisted.trial.unittest import SynchronousTestCase
from twisted.python.filepath import FilePath
from OpenSSL import crypto
from danex import _dane
class TLSADomainNameTests(SynchronousTestCase):
def test_tlsaDomainName(self):
"""
L{_dane.tlsaDomainName} returns the port, proto and parent domain as
labels of a new domain name string.
"""
self.assertEqual(
"_443._tcp.example.com",
_dane.tlsaDomainName('example.com', 443, 'tcp')
)
class GetdnsResponseErrorTests(SynchronousTestCase):
def test_errorText(self):
"""
L{_dane.GetdnsResponseError} has an C{errorText} attribute which is the name
of the corresponding L{getdns} constant.
"""
self.assertEqual(
"GETDNS_RESPSTATUS_NO_NAME",
_dane.GetdnsResponseError(getdns.GETDNS_RESPSTATUS_NO_NAME).errorText
)
class TLSARecordTests(SynchronousTestCase):
def test_matchesCertificateCertTrue(self):
"""
"""
serverCertBytes = FilePath(__file__).sibling('example_cert.bin').open().read()
serverCert = crypto.load_certificate(crypto.FILETYPE_ASN1, serverCertBytes)
self.assertEqual(
True,
_dane.TLSARecord(
payload=serverCertBytes,
usage=0,
selector=_dane.SELECTOR.CERT.value,
matchingType=_dane.MATCHING_TYPE.FULL.value
).matchesCertificate(serverCert)
)
def test_matchesCertificateCertFalse(self):
"""
"""
serverCertBytesOriginal = FilePath(__file__).sibling('example_cert.bin').open().read()
serverCert = crypto.load_certificate(crypto.FILETYPE_ASN1, serverCertBytesOriginal)
originalSerial = serverCert.get_serial_number()
serverCert.set_serial_number(100)
self.assertNotEqual(originalSerial, serverCert.get_serial_number())
serverCertBytesNew = crypto.dump_certificate(crypto.FILETYPE_ASN1, serverCert)
self.assertNotEqual(serverCertBytesOriginal, serverCertBytesNew)
self.assertEqual(
False,
_dane.TLSARecord(
payload=serverCertBytesNew,
usage=0,
selector=_dane.SELECTOR.CERT.value,
matchingType=_dane.MATCHING_TYPE.FULL.value
).matchesCertificate(serverCert)
)
test_matchesCertificateCertFalse.skip = True
def test_matchesCertificateSPKITrue(self):
"""
"""
serverCertBytesOriginal = FilePath(__file__).sibling('example_cert.bin').open().read()
serverCert = crypto.load_certificate(crypto.FILETYPE_ASN1, serverCertBytesOriginal)
serverCert.set_serial_number(100)
serverCertBytes = crypto.dump_certificate(crypto.FILETYPE_ASN1, serverCert)
self.assertEqual(serverCertBytesOriginal, serverCertBytes)
# import pdb; pdb.set_trace()
self.assertEqual(
False,
_dane.TLSARecord(
payload=serverCertBytes + b'xxx',
usage=0,
selector=_dane.SELECTOR.SPKI.value,
matchingType=_dane.MATCHING_TYPE.FULL.value
).matchesCertificate(serverCert)
)
test_matchesCertificateSPKITrue.skip = True
class FakeGetdns(object):
"""
An in memory fake of the getdns api for testing.
"""
def __init__(self, generalResult=None):
self._generalResult = generalResult
for k, v in getdns.__dict__.items():
if k.startswith('GETDNS_'):
setattr(self, k, v)
def context_create(self):
"""
"""
def general(self, context, name, request_type, extensions):
"""
"""
return self._generalResult
class TLSATests(SynchronousTestCase):
def test_tlsaCert(self):
"""
L{_dane.lookup_tlsa_records} returns a L{_dane.TLSARecord} instance if
the domain name exists and a verified record is found and the record
selector type is CERT.
"""
fakeGetdns = FakeGetdns(
generalResult=createResults(status=getdns.GETDNS_RESPSTATUS_GOOD,
selector=_dane.SELECTOR.CERT.value,
certificate_association_data=b'FOOBAR'))
_, (res,) = _dane.lookup_tlsa_records(
'example.com', 443, 'tcp', getdns=fakeGetdns)
self.assertEqual(
(_dane.SELECTOR.CERT, b'FOOBAR'),
(res.selector, res.payload)
)
def test_tlsaSPKI(self):
"""
L{_dane.lookup_tlsa_records} returns a L{_dane.TLSARecord} instance if
the domain name exists and a verfied record is found and the record
selector type is SPKI.
"""
fakeGetdns = FakeGetdns(
generalResult=createResults(status=getdns.GETDNS_RESPSTATUS_GOOD,
selector=_dane.SELECTOR.SPKI.value,
certificate_association_data=b'FOOBAR'))
_, (res,) = _dane.lookup_tlsa_records(
'example.com', 443, 'tcp', getdns=fakeGetdns)
self.assertEqual(
(_dane.SELECTOR.SPKI, b'FOOBAR'),
(res.selector, res.payload)
)
def test_tlsaNoname(self):
"""
L{_dane.lookup_tlsa_records} raises LookupError if the domain name does
not exist.
"""
e = self.assertRaises(
_dane.GetdnsResponseError,
_dane.lookup_tlsa_records, 'example.com', 443, 'tcp',
getdns=FakeGetdns(
generalResult=createResults(
status=getdns.GETDNS_RESPSTATUS_NO_NAME
)
)
)
self.assertEqual(
getdns.GETDNS_RESPSTATUS_NO_NAME,
e.errorCode
)
def createResults(status=getdns.GETDNS_RESPSTATUS_GOOD,
selector=None,
certificate_association_data=b"",):
return {'answer_type': 800,
'canonical_name': '_443._tcp.getdnsapi.org.',
'just_address_answers': [],
# 'replies_full': [<read-only buffer ptr 0x7fe2e0029e80, size 636 at 0x7fe2e4e58fb0>],
'replies_tree': [{'answer': [{'class': 1,
'name': '_443._tcp.getdnsapi.org.',
'rdata': {
'certificate_association_data': certificate_association_data,
'certificate_usage': 3,
'matching_type': 1,
# 'rdata_raw': "",
'selector': selector
},
# 'ttl': 450,
'type': 52,
# {'class': 1,
# 'name': '_443._tcp.getdnsapi.org.',
# 'rdata': {'algorithm': 7,
# 'key_tag': 49262,
# 'labels': 4,
# 'original_ttl': 450,
# 'rdata_raw': <read-only buffer ptr 0x7fe2e0261b70, size 161 at 0x7fe2e4e60130>,
# 'signature': <read-only buffer ptr 0x7fe2e0254c40, size 128 at 0x7fe2e4e60170>,
# 'signature_expiration': 1399325172,
# 'signature_inception': 1398100703,
# 'signers_name': 'getdnsapi.org.',
# 'type_covered': 52},
# 'ttl': 450,
# 'type': 46
}
],
'answer_type': 800,
# 'authority': [{'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'nsdname': 'ns.secret-wg.org.',
# 'rdata_raw': 'ns.secret-wg.org.'},
# 'ttl': 450,
# 'type': 2},
# {'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'nsdname': 'mcvax.nlnetlabs.nl.',
# 'rdata_raw': 'mcvax.nlnetlabs.nl.'},
# 'ttl': 450,
# 'type': 2},
# {'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'nsdname': 'open.nlnetlabs.nl.',
# 'rdata_raw': 'open.nlnetlabs.nl.'},
# 'ttl': 450,
# 'type': 2},
# {'class': 1,
# 'name': 'getdnsapi.org.',
# 'rdata': {'algorithm': 7,
# 'key_tag': 49262,
# 'labels': 2,
# 'original_ttl': 450,
# 'rdata_raw': <read-only buffer ptr 0x7fe2e0261f90, size 161 at 0x7fe2e4e601f0>,
# 'signature': <read-only buffer ptr 0x7fe2e0028120, size 128 at 0x7fe2e4e60230>,
# 'signature_expiration': 1399278072,
# 'signature_inception': 1398093503,
# 'signers_name': 'getdnsapi.org.',
# 'type_covered': 2},
# 'ttl': 450,
# 'type': 46}],
'canonical_name': '_443._tcp.getdnsapi.org.',
'dnssec_status': 400,
'header': {'aa': 0,
'ad': 1,
'ancount': 2,
'arcount': 0,
'cd': 0,
'id': 0,
'nscount': 4,
'opcode': 0,
'qdcount': 1,
'qr': 1,
'ra': 1,
'rcode': 0,
'rd': 1,
'tc': 0,
'z': 0},
'question': {'qclass': 1,
'qname': '_443._tcp.getdnsapi.org.',
'qtype': 52}}],
'status': status}
|
|
#!/usr/bin/env python3
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (successful) {
put_a_to_python
if (successful) {
put_b_to_python
if (successful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import os
import time
import copy
from .auxfuncs import (
applyrules, debugcapi, dictappend, errmess, gentitle, getargs2,
hascallstatement, hasexternals, hasinitvalue, hasnote, hasresultnote,
isarray, isarrayofstrings, iscomplex, iscomplexarray,
iscomplexfunction, iscomplexfunction_warn, isdummyroutine, isexternal,
isfunction, isfunction_wrap, isint1array, isintent_aux, isintent_c,
isintent_callback, isintent_copy, isintent_hide, isintent_inout,
isintent_nothide, isintent_out, isintent_overwrite, islogical,
islong_complex, islong_double, islong_doublefunction, islong_long,
islong_longfunction, ismoduleroutine, isoptional, isrequired, isscalar,
issigned_long_longarray, isstring, isstringarray, isstringfunction,
issubroutine, issubroutine_wrap, isthreadsafe, isunsigned,
isunsigned_char, isunsigned_chararray, isunsigned_long_long,
isunsigned_long_longarray, isunsigned_short, isunsigned_shortarray,
l_and, l_not, l_or, outmess, replace, stripcomma,
)
from . import capi_maps
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
options = {}
sepdict = {}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr', 'method',
'pyobjfrom', 'closepyobjfrom',
'freemem',
'userincludes',
'includes0', 'includes', 'typedefs', 'typedefs_generated',
'cppmacros', 'cfuncs', 'callbacks',
'latexdoc',
'restdoc',
'routine_defs', 'externroutines',
'initf2pywraphooks',
'commonhooks', 'initcommonhooks',
'f90modhooks', 'initf90modhooks']:
sepdict[k] = '\n'
#################### Rules for C/API module #################
generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
module_rules = {
'modulebody': """\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <[email protected]>.
* Generation date: """ + time.asctime(time.gmtime(generationtime)) + """
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
""" + gentitle("See f2py2e/cfuncs.py: includes") + """
#includes#
#includes0#
""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """
#typedefs#
""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """
#typedefs_generated#
""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """
#cppmacros#
""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """
#cfuncs#
""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """
#userincludes#
""" + gentitle("See f2py2e/capi_rules.py: usercode") + """
#usercode#
/* See f2py2e/rules.py */
#externroutines#
""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """
#usercode1#
""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """
#callbacks#
""" + gentitle("See f2py2e/rules.py: buildapi") + """
#body#
""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """
#f90modhooks#
""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """
""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """
#commonhooks#
""" + gentitle("See f2py2e/rules.py") + """
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
PyMODINIT_FUNC PyInit_#modulename#(void) {
\tint i;
\tPyObject *m,*d, *s, *tmp;
\tm = #modulename#_module = PyModule_Create(&moduledef);
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
\tPy_DECREF(s);
\ts = PyUnicode_FromString(
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\tPy_DECREF(s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\t/*
\t * Store the error object inside the dict, so that it could get deallocated.
\t * (in practice, this is a module, so it likely will not and cannot.)
\t */
\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
\tPy_DECREF(#modulename#_error);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) {
\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
\t\tPy_DECREF(tmp);
\t}
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn m;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor': {'latexdoc': '\n\n',
'restdoc': '\n\n'},
'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc': ['Module #modulename#\n' + '=' * 80,
'\n#restdoc#']
}
defmod_rules = [
{'body': '/*eof body*/',
'method': '/*eof method*/',
'externroutines': '/*eof externroutines*/',
'routine_defs': '/*eof routine_defs*/',
'initf90modhooks': '/*eof initf90modhooks*/',
'initf2pywraphooks': '/*eof initf2pywraphooks*/',
'initcommonhooks': '/*eof initcommonhooks*/',
'latexdoc': '',
'restdoc': '',
'modnote': {hasnote: '#note#', l_not(hasnote): ''},
}
]
routine_rules = {
'separatorsfor': sepdict,
'body': """
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat#|#keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs': '#routine_def#',
'initf2pywraphooks': '#initf2pywraphook#',
'externroutines': '#declfortranroutine#',
'doc': '#docreturn##name#(#docsignature#)',
'docshort': '#docreturn##name#(#docsignatureshort#)',
'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n',
'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
'cppmacros': {debugcapi: '#define DEBUGCFUNCS'},
'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80,
]
}
################## Rules for C/API function ##############
rout_rules = [
{ # Init
'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n',
'routdebugleave': '\n', 'routdebugfailure': '\n',
'setjmpbuf': ' || ',
'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n',
'docstrcbs': '\n', 'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '',
'docsign': '', 'docsignopt': '', 'decl': '/*decl*/',
'freemem': '/*freemem*/',
'docsignshort': '', 'docsignoptshort': '',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': '\\nParameters\\n----------',
'docstropt': '\\nOther Parameters\\n----------------',
'docstrout': '\\nReturns\\n-------',
'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'args_capi': '', 'keys_capi': '', 'functype': '',
'frompyobj': '/*frompyobj*/',
# this list will be reversed
'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'],
'pyobjfrom': '/*pyobjfrom*/',
# this list will be reversed
'closepyobjfrom': ['/*end of closepyobjfrom*/'],
'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/',
'routdebugenter': '/*routdebugenter*/',
'routdebugfailure': '/*routdebugfailure*/',
'callfortranroutine': '/*callfortranroutine*/',
'argformat': '', 'keyformat': '', 'need_cfuncs': '',
'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '',
'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '',
'initf2pywraphook': '',
'routnote': {hasnote: '--- #note#', l_not(hasnote): ''},
}, {
'apiname': 'f2py_rout_#modulename#_#name#',
'pyname': '#modulename#.#name#',
'decl': '',
'_check': l_not(ismoduleroutine)
}, {
'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname': '#modulename#.#f90modulename#.#name#',
'decl': '',
'_check': ismoduleroutine
}, { # Subroutine
'functype': 'void',
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine: '',
isdummyroutine: ''
},
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement: '''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals: """\t\t}"""}
],
'_check': l_and(issubroutine, l_not(issubroutine_wrap)),
}, { # Wrapped function
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
s = PyUnicode_FromString("#name#");
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t(*f2py_func)(#callfortran#);'},
{hascallstatement:
'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'}
],
'_check': isfunction_wrap,
}, { # Wrapped subroutine
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
PyObject_SetAttrString(o,"_cpointer", tmp);
Py_DECREF(tmp);
s = PyUnicode_FromString("#name#");
PyObject_SetAttrString(o,"__name__", s);
Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
"""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t(*f2py_func)(#callfortran#);'},
{hascallstatement:
'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'}
],
'_check': issubroutine_wrap,
}, { # Function
'functype': '#ctype#',
'docreturn': {l_not(isintent_hide): '#rname#,'},
'docstrout': '#pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote: '--- #resultnote#'}],
'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi, l_not(isstringfunction)): """\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check': l_and(isfunction, l_not(isfunction_wrap))
}, { # Scalar function
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine: ''
},
'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:
'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine': [
{hasexternals: """\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement: '''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement, isdummyroutine))
: '\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe: '\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t}'},
{l_and(debugcapi, iscomplexfunction)
: '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need': [{l_not(isdummyroutine): 'F_FUNC'},
{iscomplexfunction: 'pyobj_from_#ctype#1'},
{islong_longfunction: 'long_long'},
{islong_doublefunction: 'long_double'}],
'returnformat': {l_not(isintent_hide): '#rformat#'},
'return': {iscomplexfunction: ',#name#_return_value_capi',
l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'},
'_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap))
}, { # String function # in use for --no-wrap
'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)):
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c):
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl': ['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals: """\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe: '\t\tPy_END_ALLOW_THREADS'},
{hasexternals: '\t\t}'},
{debugcapi:
'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat': '#rformat#',
'return': ',#name#_return_value',
'freemem': '\tSTRINGFREE(#name#_return_value);',
'need': ['F_FUNC', '#ctype#', 'STRINGFREE'],
'_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check': debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long: 'long_long',
islong_double: 'long_double',
islong_complex: 'complex_long_double',
isunsigned_char: 'unsigned_char',
isunsigned_short: 'unsigned_short',
isunsigned: 'unsigned',
isunsigned_long_long: 'unsigned_long_long',
isunsigned_chararray: 'unsigned_char',
isunsigned_shortarray: 'unsigned_short',
isunsigned_long_longarray: 'unsigned_long_long',
issigned_long_longarray: 'long_long',
}
aux_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': ['\t/* Processing auxiliary variable #varname# */',
{debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'need': typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl': '\t#ctype# #varname# = 0;',
'need': {hasinitvalue: 'math.h'},
'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
'_check': l_and(isscalar, l_not(iscomplex)),
},
{
'return': ',#varname#',
'docstrout': '#pydocsignout#',
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': l_and(isscalar, l_not(iscomplex), isintent_out),
},
# Complex scalars
{ # Common
'decl': '\t#ctype# #varname#;',
'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': iscomplex
},
# String
{ # Common
'decl': ['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl': ['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
arg_rules = [
{
'separatorsfor': sepdict
},
{ # Common
'frompyobj': ['\t/* Processing variable #varname# */',
{debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'_depend': '',
'need': typedef_need_dict,
},
# Doc signatures
{
'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'},
'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'},
'docstrout': {isintent_out: '#pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide): '--- #note#',
l_and(hasnote, isintent_nothide): '--- See above.'}]},
'depend': ''
},
# Required/Optional arguments
{
'kwlist': '"#varname#",',
'docsign': '#varname#,',
'_check': l_and(isintent_nothide, l_not(isoptional))
},
{
'kwlistopt': '"#varname#",',
'docsignopt': '#varname#=#showinit#,',
'docsignoptshort': '#varname#,',
'_check': l_and(isintent_nothide, isoptional)
},
# Docstring/BuildValue
{
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'},
'docsignxashort': {isintent_nothide: '#varname#_extra_args,'},
'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs': '#cbdocstr#',
'latexdocstrcbs': '\\item[] #cblatexdocstr#',
'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl': ['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):
'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'},
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'xaformat': {isintent_nothide: 'O!'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf': '(setjmp(#cbname#_jmpbuf))',
'callfortran': {l_not(isintent_callback): '#varname#_cptr,'},
'need': ['#cbname#', 'setjmp.h'],
'_check':isexternal
},
{
'frompyobj': [{l_not(isintent_callback): """\
if(F2PyCapsule_Check(#varname#_capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""}, {isintent_callback: """\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp) {
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
Py_DECREF(capi_tmp);
}
else {
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
}
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi: ["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback): """\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need': ['SWAP', 'create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl': '\t#ctype# #varname# = 0;',
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'return': {isintent_out: ',#varname#'},
'_check': l_and(isscalar, l_not(iscomplex))
}, {
'need': {hasinitvalue: 'math.h'},
'_check': l_and(isscalar, l_not(iscomplex)),
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: """\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide)
}, {
'frompyobj': [
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend': ''},
{l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)',
'_depend': ''},
{l_not(islogical): '''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical: '''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/',
'need': {l_not(islogical): '#ctype#_from_pyobj'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide),
'_depend': ''
}, { # Hidden
'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
'need': typedef_need_dict,
'_check': l_and(isscalar, l_not(iscomplex), isintent_hide),
'_depend': ''
}, { # Common
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check': l_and(isscalar, l_not(iscomplex)),
'_depend': ''
},
# Complex scalars
{ # Common
'decl': '\t#ctype# #varname#;',
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return': {isintent_out: ',#varname#_capi'},
'_check': iscomplex
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'pyobjfrom': {isintent_inout: """\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check': l_and(iscomplex, isintent_nothide)
}, {
'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional, l_not(hasinitvalue))
: '\tif (#varname#_capi != Py_None)'},
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/',
'need': ['#ctype#_from_pyobj'],
'_check': l_and(iscomplex, isintent_nothide),
'_depend': ''
}, { # Hidden
'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'},
'_check': l_and(iscomplex, isintent_hide)
}, {
'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': l_and(iscomplex, isintent_hide),
'_depend': ''
}, { # Common
'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need': ['pyobj_from_#ctype#1'],
'_check': iscomplex
}, {
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check': iscomplex,
'_depend': ''
},
# String
{ # Common
'decl': ['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'return': {isintent_out: ',#varname#'},
'need': ['len..'], # 'STRINGFREE'],
'_check':isstring
}, { # Common
'frompyobj': """\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj': """\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE'],
'_check':isstring,
'_depend':''
}, { # Not hidden
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: '''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isstring, isintent_nothide)
}, { # Hidden
'_check': l_and(isstring, isintent_hide)
}, {
'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check': isstring,
'_depend': ''
},
# Array
{ # Common
'decl': ['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out: ',capi_#varname#_tmp'},
'need': 'len..',
'_check': isarray
}, { # intent(overwrite) array
'decl': '\tint capi_overwrite_#varname# = 1;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=1,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
'_check': l_and(isarray, isintent_overwrite),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_overwrite),
'_depend': '',
},
{ # intent(copy) array
'decl': '\tint capi_overwrite_#varname# = 0;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=0,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
'_check': l_and(isarray, isintent_copy),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_copy),
'_depend': '',
}, {
'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray,
'_depend': ''
}, { # Not hidden
'decl': '\tPyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'_check': l_and(isarray, isintent_nothide)
}, {
'frompyobj': ['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:
'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:
'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tPyObject *exc, *val, *tb;
\t\tPyErr_Fetch(&exc, &val, &tb);
\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
\t} else {
\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
{hasinitvalue: [
{isintent_nothide:
'\tif (#varname#_capi == Py_None) {'},
{isintent_hide: '\t{'},
{iscomplexarray: '\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tPyObject *exc, *val, *tb;
\t\t\tPyErr_Fetch(&exc, &val, &tb);
\t\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj': [ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out, isintent_hide)): """\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide, l_not(isintent_out))
: """\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'},
],
'_check': isarray,
'_depend': ''
},
# Scalararray
{ # Common
'_check': l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need': '#ctype#',
'_check': isint1array,
'_depend': ''
},
# Integer*-1 array
{'need': '#ctype#',
'_check': isunsigned_chararray,
'_depend': ''
},
# Integer*-2 array
{'need': '#ctype#',
'_check': isunsigned_shortarray,
'_depend': ''
},
# Integer*-8 array
{'need': '#ctype#',
'_check': isunsigned_long_longarray,
'_depend': ''
},
# Complexarray
{'need': '#ctype#',
'_check': iscomplexarray,
'_depend': ''
},
# Stringarray
{
'callfortranappend': {isarrayofstrings: 'flen(#varname#),'},
'need': 'string',
'_check': isstringarray
}
]
################# Rules for checking ###############
check_rules = [
{
'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need': 'len..'
}, {
'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/',
'need': 'CHECKSCALAR',
'_check': l_and(isscalar, l_not(iscomplex)),
'_break': ''
}, {
'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/',
'need': 'CHECKSTRING',
'_check': isstring,
'_break': ''
}, {
'need': 'CHECKARRAY',
'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/',
'_check': isarray,
'_break': ''
}, {
'need': 'CHECKGENERIC',
'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m, um):
"""
Return
"""
outmess('\tBuilding module "%s"...\n' % (m['name']))
ret = {}
mod_rules = defmod_rules[:]
vrd = capi_maps.modsign2map(m)
rd = dictappend({'f2py_version': f2py_version}, vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb = None
for bi in m['body']:
if not bi['block'] == 'interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name'] == n:
nb = b
break
if not nb:
errmess(
'buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n' % (n))
continue
nb_list = [nb]
if 'entry' in nb:
for k, a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
api, wrap = buildapi(nb)
if wrap:
if ismoduleroutine(nb):
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar = applyrules(api, vrd)
rd = dictappend(rd, ar)
# Construct COMMON block support
cr, wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar = applyrules(cr, vrd)
rd = dictappend(rd, ar)
# Construct F90 module support
mr, wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar = applyrules(mr, vrd)
rd = dictappend(rd, ar)
for u in um:
ar = use_rules.buildusevars(u, m['use'][u['name']])
rd = dictappend(rd, ar)
needs = cfuncs.get_needs()
code = {}
for n in needs.keys():
code[n] = []
for k in needs[n]:
c = ''
if k in cfuncs.includes0:
c = cfuncs.includes0[k]
elif k in cfuncs.includes:
c = cfuncs.includes[k]
elif k in cfuncs.userincludes:
c = cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c = cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c = cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c = cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c = cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c = cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c = cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c = cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n' % (repr(k)))
continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar = applyrules(r, vrd, m)
rd = dictappend(rd, ar)
ar = applyrules(module_rules, rd)
fn = os.path.join(options['buildpath'], vrd['coutput'])
ret['csrc'] = fn
with open(fn, 'w') as f:
f.write(ar['modulebody'].replace('\t', 2 * ' '))
outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.rest')
with open(fn, 'w') as f:
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.tex')
ret['ltx'] = fn
with open(fn, 'w') as f:
f.write(
'%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
if 'shortlatex' not in options:
f.write(
'\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
with open(wn, 'w') as f:
f.write('C -*- fortran -*-\n')
f.write(
'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
if l and l[0] == ' ':
while len(l) >= 66:
lines.append(l[:66] + '\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
ret['fsrc'] = wn
with open(wn, 'w') as f:
f.write('! -*- f90 -*-\n')
f.write(
'! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
f.write(
'! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
if len(l) > 72 and l[0] == ' ':
lines.append(l[:72] + '&\n &')
l = l[72:]
while len(l) > 66:
lines.append(l[:66] + '&\n &')
l = l[66:]
lines.append(l + '\n')
else:
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
################## Build C/API function #############
stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th',
6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'}
def buildapi(rout):
rout, wrap = func2subr.assubr(rout)
args, depargs = getargs2(rout)
capi_maps.depargs = depargs
var = rout['vars']
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' %
(rout['modulename'], rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name']))
# Routine
vrd = capi_maps.routsign2map(rout)
rd = dictappend({}, vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar = applyrules(r, vrd, rout)
rd = dictappend(rd, ar)
# Args
nth, nthk = 0, 0
savevrd = {}
for a in args:
vrd = capi_maps.sign2map(a, var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth = nth + 1
vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument'
else:
nthk = nthk + 1
vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword'
else:
vrd['nth'] = 'hidden'
savevrd[a] = vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd = savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar = applyrules(r, vrd, var[a])
rd = dictappend(rd, ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check'] = c
ar = applyrules(check_rules, vrd, var[a])
rd = dictappend(rd, ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign': rd['docsign'],
'docsignopt': rd['docsignopt'],
'docsignxa': rd['docsignxa']}))
optargs = stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa': rd['docsignxashort'],
'docsignopt': rd['docsignoptshort']}
))
if optargs == '':
rd['docsignatureshort'] = stripcomma(
replace('#docsign#', {'docsign': rd['docsign']}))
else:
rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_')
rd['latexdocsignatureshort'] = rd[
'latexdocsignatureshort'].replace(',', ', ')
cfs = stripcomma(replace('#callfortran##callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
if len(rd['callfortranappend']) > 1:
rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', {
'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']}))
else:
rd['callcompaqfortran'] = cfs
rd['callfortran'] = cfs
if isinstance(rd['docreturn'], list):
rd['docreturn'] = stripcomma(
replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = '
rd['docstrsigns'] = []
rd['latexdocstrsigns'] = []
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns'] = rd['docstrsigns'] + rd[k]
k = 'latex' + k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n' % (ar['docshort']))
else:
outmess('\t\t %s\n' % (ar['docshort']))
return ar, wrap
#################### EOF rules.py #######################
|
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, print_function
import contextlib
import itertools
import os
import platform
import random
import subprocess
import sys
from contextlib import contextmanager
from textwrap import dedent
from pex.common import (
atomic_directory,
open_zip,
safe_mkdir,
safe_mkdtemp,
safe_rmtree,
safe_sleep,
temporary_dir,
)
from pex.compatibility import to_unicode
from pex.executor import Executor
from pex.interpreter import PythonInterpreter
from pex.pex import PEX
from pex.pex_builder import PEXBuilder
from pex.pex_info import PexInfo
from pex.pip import get_pip
from pex.targets import LocalInterpreter
from pex.third_party.pkg_resources import Distribution
from pex.typing import TYPE_CHECKING
from pex.util import DistributionHelper, named_temporary_file
if TYPE_CHECKING:
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Set,
Text,
Tuple,
Union,
)
import attr # vendor:skip
else:
from pex.third_party import attr
PY_VER = sys.version_info[:2]
IS_PYPY = hasattr(sys, "pypy_version_info")
IS_PYPY2 = IS_PYPY and sys.version_info[0] == 2
IS_PYPY3 = IS_PYPY and sys.version_info[0] == 3
NOT_CPYTHON27 = IS_PYPY or PY_VER != (2, 7)
IS_LINUX = platform.system() == "Linux"
IS_MAC = platform.system() == "Darwin"
IS_NOT_LINUX = not IS_LINUX
NOT_CPYTHON27_OR_OSX = NOT_CPYTHON27 or IS_NOT_LINUX
@contextlib.contextmanager
def temporary_filename():
# type: () -> Iterator[str]
"""Creates a temporary filename.
This is useful when you need to pass a filename to an API. Windows requires all handles to a
file be closed before deleting/renaming it, so this makes it a bit simpler.
"""
with named_temporary_file() as fp:
fp.write(b"")
fp.close()
yield fp.name
def random_bytes(length):
# type: (int) -> bytes
return "".join(map(chr, (random.randint(ord("a"), ord("z")) for _ in range(length)))).encode(
"utf-8"
)
def get_dep_dist_names_from_pex(pex_path, match_prefix=""):
# type: (str, str) -> Set[str]
"""Given an on-disk pex, extract all of the unique first-level paths under `.deps`."""
with open_zip(pex_path) as pex_zip:
dep_gen = (f.split(os.sep)[1] for f in pex_zip.namelist() if f.startswith(".deps/"))
return set(item for item in dep_gen if item.startswith(match_prefix))
@contextlib.contextmanager
def temporary_content(content_map, interp=None, seed=31337, perms=0o644):
# type: (Mapping[str, Union[int, str]], Optional[Dict[str, Any]], int, int) -> Iterator[str]
"""Write content to disk where content is map from string => (int, string).
If target is int, write int random bytes. Otherwise write contents of string.
"""
random.seed(seed)
interp = interp or {}
with temporary_dir() as td:
for filename, size_or_content in content_map.items():
dest = os.path.join(td, filename)
safe_mkdir(os.path.dirname(dest))
with open(dest, "wb") as fp:
if isinstance(size_or_content, int):
fp.write(random_bytes(size_or_content))
else:
fp.write((size_or_content % interp).encode("utf-8"))
os.chmod(dest, perms)
yield td
@contextlib.contextmanager
def make_project(
name="my_project", # type: str
version="0.0.0", # type: str
zip_safe=True, # type: bool
install_reqs=None, # type: Optional[List[str]]
extras_require=None, # type: Optional[Dict[str, List[str]]]
entry_points=None, # type: Optional[Union[str, Dict[str, List[str]]]]
python_requires=None, # type: Optional[str]
universal=False, # type: bool
):
# type: (...) -> Iterator[str]
project_content = {
"setup.py": dedent(
"""
from setuptools import setup
setup(
name=%(project_name)r,
version=%(version)r,
zip_safe=%(zip_safe)r,
packages=[%(project_name)r],
scripts=[
'scripts/hello_world',
'scripts/shell_script',
],
package_data={%(project_name)r: ['package_data/*.dat']},
install_requires=%(install_requires)r,
extras_require=%(extras_require)r,
entry_points=%(entry_points)r,
python_requires=%(python_requires)r,
options={'bdist_wheel': {'universal': %(universal)r}},
)
"""
),
"scripts/hello_world": '#!/usr/bin/env python\nprint("hello world from py script!")\n',
"scripts/shell_script": "#!/usr/bin/env bash\necho hello world from shell script\n",
os.path.join(name, "__init__.py"): 0,
os.path.join(name, "my_module.py"): 'def do_something():\n print("hello world!")\n',
os.path.join(name, "package_data/resource1.dat"): 1000,
os.path.join(name, "package_data/resource2.dat"): 1000,
} # type: Dict[str, Union[str, int]]
interp = {
"project_name": name,
"version": version,
"zip_safe": zip_safe,
"install_requires": install_reqs or [],
"extras_require": extras_require or {},
"entry_points": entry_points or {},
"python_requires": python_requires,
"universal": universal,
}
with temporary_content(project_content, interp=interp) as td:
yield td
class WheelBuilder(object):
"""Create a wheel distribution from an unpacked setup.py-based project."""
class BuildFailure(Exception):
pass
def __init__(
self,
source_dir, # type: str
interpreter=None, # type: Optional[PythonInterpreter]
wheel_dir=None, # type: Optional[str]
verify=True, # type: bool
):
# type: (...) -> None
"""Create a wheel from an unpacked source distribution in source_dir."""
self._source_dir = source_dir
self._wheel_dir = wheel_dir or safe_mkdtemp()
self._interpreter = interpreter or PythonInterpreter.get()
self._verify = verify
def bdist(self):
# type: () -> str
get_pip(interpreter=self._interpreter).spawn_build_wheels(
distributions=[self._source_dir],
wheel_dir=self._wheel_dir,
interpreter=self._interpreter,
verify=self._verify,
).wait()
dists = os.listdir(self._wheel_dir)
if len(dists) == 0:
raise self.BuildFailure("No distributions were produced!")
if len(dists) > 1:
raise self.BuildFailure("Ambiguous source distributions found: %s" % (" ".join(dists)))
return os.path.join(self._wheel_dir, dists[0])
@contextlib.contextmanager
def built_wheel(
name="my_project", # type: str
version="0.0.0", # type: str
zip_safe=True, # type: bool
install_reqs=None, # type: Optional[List[str]]
extras_require=None, # type: Optional[Dict[str, List[str]]]
entry_points=None, # type: Optional[Union[str, Dict[str, List[str]]]]
interpreter=None, # type: Optional[PythonInterpreter]
python_requires=None, # type: Optional[str]
universal=False, # type: bool
**kwargs # type: Any
):
# type: (...) -> Iterator[str]
with make_project(
name=name,
version=version,
zip_safe=zip_safe,
install_reqs=install_reqs,
extras_require=extras_require,
entry_points=entry_points,
python_requires=python_requires,
universal=universal,
) as td:
builder = WheelBuilder(td, interpreter=interpreter, **kwargs)
yield builder.bdist()
@contextlib.contextmanager
def make_source_dir(
name="my_project", # type: str
version="0.0.0", # type: str
install_reqs=None, # type: Optional[List[str]]
extras_require=None, # type: Optional[Dict[str, List[str]]]
):
# type: (...) -> Iterator[str]
with make_project(
name=name, version=version, install_reqs=install_reqs, extras_require=extras_require
) as td:
yield td
@contextlib.contextmanager
def make_bdist(
name="my_project", # type: str
version="0.0.0", # type: str
zip_safe=True, # type: bool
interpreter=None, # type: Optional[PythonInterpreter]
**kwargs # type: Any
):
# type: (...) -> Iterator[Distribution]
with built_wheel(
name=name, version=version, zip_safe=zip_safe, interpreter=interpreter, **kwargs
) as dist_location:
install_dir = os.path.join(safe_mkdtemp(), os.path.basename(dist_location))
get_pip(interpreter=interpreter).spawn_install_wheel(
wheel=dist_location,
install_dir=install_dir,
target=LocalInterpreter.create(interpreter),
).wait()
dist = DistributionHelper.distribution_from_path(install_dir)
assert dist is not None
yield dist
COVERAGE_PREAMBLE = """
try:
from coverage import coverage
cov = coverage(auto_data=True, data_suffix=True)
cov.start()
except ImportError:
pass
"""
def write_simple_pex(
td, # type: str
exe_contents=None, # type: Optional[str]
dists=None, # type: Optional[Iterable[Distribution]]
sources=None, # type: Optional[Iterable[Tuple[str, str]]]
coverage=False, # type: bool
interpreter=None, # type: Optional[PythonInterpreter]
pex_info=None, # type: Optional[PexInfo]
):
# type: (...) -> PEXBuilder
"""Write a pex file that optionally contains an executable entry point.
:param td: temporary directory path
:param exe_contents: entry point python file
:param dists: distributions to include, typically sdists or bdists
:param sources: sources to include, as a list of pairs (env_filename, contents)
:param coverage: include coverage header
:param interpreter: a custom interpreter to use to build the pex
:param pex_info: a custom PexInfo to use to build the pex.
"""
dists = dists or []
sources = sources or []
safe_mkdir(td)
pb = PEXBuilder(
path=td,
preamble=COVERAGE_PREAMBLE if coverage else None,
interpreter=interpreter,
pex_info=pex_info,
)
for dist in dists:
pb.add_dist_location(dist.location if isinstance(dist, Distribution) else dist)
for env_filename, contents in sources:
src_path = os.path.join(td, env_filename)
safe_mkdir(os.path.dirname(src_path))
with open(src_path, "w") as fp:
fp.write(contents)
pb.add_source(src_path, env_filename)
if exe_contents:
with open(os.path.join(td, "exe.py"), "w") as fp:
fp.write(exe_contents)
pb.set_executable(os.path.join(td, "exe.py"))
pb.freeze()
return pb
@attr.s(frozen=True)
class IntegResults(object):
"""Convenience object to return integration run results."""
output = attr.ib() # type: Text
error = attr.ib() # type: Text
return_code = attr.ib() # type: int
def assert_success(self):
# type: () -> None
assert (
self.return_code == 0
), "integration test failed: return_code={}, output={}, error={}".format(
self.return_code, self.output, self.error
)
def assert_failure(self):
# type: () -> None
assert self.return_code != 0
def create_pex_command(
args=None, # type: Optional[Iterable[str]]
python=None, # type: Optional[str]
quiet=False, # type: bool
):
# type: (...) -> List[str]
cmd = [python or sys.executable, "-mpex"]
if not quiet:
cmd.append("-vvvvv")
if args:
cmd.extend(args)
return cmd
def run_pex_command(
args, # type: Iterable[str]
env=None, # type: Optional[Dict[str, str]]
python=None, # type: Optional[str]
quiet=False, # type: bool
):
# type: (...) -> IntegResults
"""Simulate running pex command for integration testing.
This is different from run_simple_pex in that it calls the pex command rather than running a
generated pex. This is useful for testing end to end runs with specific command line arguments
or env options.
"""
cmd = create_pex_command(args, python=python, quiet=quiet)
process = Executor.open_process(
cmd=cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, error = process.communicate()
return IntegResults(output.decode("utf-8"), error.decode("utf-8"), process.returncode)
def run_simple_pex(
pex, # type: str
args=(), # type: Iterable[str]
interpreter=None, # type: Optional[PythonInterpreter]
stdin=None, # type: Optional[bytes]
**kwargs # type: Any
):
# type: (...) -> Tuple[bytes, int]
p = PEX(pex, interpreter=interpreter)
process = p.run(
args=args,
blocking=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs
)
stdout, _ = process.communicate(input=stdin)
return stdout.replace(b"\r", b""), process.returncode
def run_simple_pex_test(
body, # type: str
args=(), # type: Iterable[str]
env=None, # type: Optional[Mapping[str, str]]
dists=None, # type: Optional[Iterable[Distribution]]
coverage=False, # type: bool
interpreter=None, # type: Optional[PythonInterpreter]
):
# type: (...) -> Tuple[bytes, int]
with temporary_dir() as td1, temporary_dir() as td2:
pb = write_simple_pex(td1, body, dists=dists, coverage=coverage, interpreter=interpreter)
pex = os.path.join(td2, "app.pex")
pb.build(pex)
return run_simple_pex(pex, args=args, env=env, interpreter=interpreter)
def bootstrap_python_installer(dest):
# type: (str) -> None
for _ in range(3):
try:
subprocess.check_call(["git", "clone", "https://github.com/pyenv/pyenv.git", dest])
except subprocess.CalledProcessError as e:
print("caught exception: %r" % e)
continue
else:
break
else:
raise RuntimeError("Helper method could not clone pyenv from git after 3 tries")
# NB: We keep the pool of bootstrapped interpreters as small as possible to avoid timeouts in CI
# otherwise encountered when fetching and building too many on a cache miss. In the past we had
# issues with the combination of 7 total unique interpreter versions and a Travis-CI timeout of 50
# minutes for a shard.
PY27 = "2.7.18"
PY37 = "3.7.11"
PY310 = "3.10.1"
ALL_PY_VERSIONS = (PY27, PY37, PY310)
_ALL_PY3_VERSIONS = (PY37, PY310)
def ensure_python_distribution(version):
# type: (str) -> Tuple[str, str, Callable[[Iterable[str]], Text]]
if version not in ALL_PY_VERSIONS:
raise ValueError("Please constrain version to one of {}".format(ALL_PY_VERSIONS))
pyenv_root = os.path.abspath(
os.path.join(
os.path.expanduser(os.environ.get("_PEX_TEST_PYENV_ROOT", "~/.pex_dev")),
"pyenv",
)
)
interpreter_location = os.path.join(pyenv_root, "versions", version)
pyenv = os.path.join(pyenv_root, "bin", "pyenv")
pyenv_env = os.environ.copy()
pyenv_env["PYENV_ROOT"] = pyenv_root
pip = os.path.join(interpreter_location, "bin", "pip")
with atomic_directory(target_dir=os.path.join(pyenv_root), exclusive=True) as target_dir:
if not target_dir.is_finalized():
bootstrap_python_installer(target_dir.work_dir)
with atomic_directory(
target_dir=interpreter_location, exclusive=True
) as interpreter_target_dir:
if not interpreter_target_dir.is_finalized():
subprocess.check_call(
[
"git",
"--git-dir={}".format(os.path.join(pyenv_root, ".git")),
"--work-tree={}".format(pyenv_root),
"pull",
"--ff-only",
"https://github.com/pyenv/pyenv.git",
]
)
env = pyenv_env.copy()
if sys.platform.lower().startswith("linux"):
env["CONFIGURE_OPTS"] = "--enable-shared"
# The pyenv builder detects `--enable-shared` and sets up `RPATH` via
# `LDFLAGS=-Wl,-rpath=... $LDFLAGS` to ensure the built python binary links the
# correct libpython shared lib. Some versions of compiler set the `RUNPATH` instead
# though which is searched _after_ the `LD_LIBRARY_PATH` environment variable. To
# ensure an inopportune `LD_LIBRARY_PATH` doesn't fool the pyenv python binary into
# linking the wrong libpython, force `RPATH`, which is searched 1st by the linker,
# with with `--disable-new-dtags`.
env["LDFLAGS"] = "-Wl,--disable-new-dtags"
subprocess.check_call([pyenv, "install", "--keep", version], env=env)
subprocess.check_call([pip, "install", "-U", "pip"])
major, minor = version.split(".")[:2]
python = os.path.join(
interpreter_location, "bin", "python{major}.{minor}".format(major=major, minor=minor)
)
def run_pyenv(args):
# type: (Iterable[str]) -> Text
return to_unicode(subprocess.check_output([pyenv] + list(args), env=pyenv_env))
return python, pip, run_pyenv
def ensure_python_venv(version, latest_pip=True, system_site_packages=False):
python, pip, _ = ensure_python_distribution(version)
venv = safe_mkdtemp()
if version in _ALL_PY3_VERSIONS:
args = [python, "-m", "venv", venv]
if system_site_packages:
args.append("--system-site-packages")
subprocess.check_call(args=args)
else:
subprocess.check_call(args=[pip, "install", "virtualenv==16.7.10"])
args = [python, "-m", "virtualenv", venv, "-q"]
if system_site_packages:
args.append("--system-site-packages")
subprocess.check_call(args=args)
python, pip = tuple(os.path.join(venv, "bin", exe) for exe in ("python", "pip"))
if latest_pip:
subprocess.check_call(args=[pip, "install", "-U", "pip"])
return python, pip
def ensure_python_interpreter(version):
# type: (str) -> str
python, _, _ = ensure_python_distribution(version)
return python
@contextmanager
def environment_as(**kwargs):
# type: (**Any) -> Iterator[None]
existing = {key: os.environ.get(key) for key in kwargs}
def adjust_environment(mapping):
for key, value in mapping.items():
if value is not None:
os.environ[key] = str(value)
else:
os.environ.pop(key, None)
adjust_environment(kwargs)
try:
yield
finally:
adjust_environment(existing)
@contextmanager
def pushd(directory):
# type: (str) -> Iterator[None]
cwd = os.getcwd()
try:
os.chdir(directory)
yield
finally:
os.chdir(cwd)
def make_env(**kwargs):
# type: (**Any) -> Dict[str, str]
"""Create a copy of the current environment with the given modifications.
The given kwargs add to or update the environment when they have a non-`None` value. When they
have a `None` value, the environment variable is removed from the environment.
All non-`None` values are converted to strings by apply `str`.
"""
env = os.environ.copy()
env.update((k, str(v)) for k, v in kwargs.items() if v is not None)
for k, v in kwargs.items():
if v is None:
env.pop(k, None)
return env
def run_commands_with_jitter(
commands, # type: Iterable[Iterable[str]]
path_argument, # type: str
extra_env=None, # type: Optional[Mapping[str, str]]
delay=2.0, # type: float
):
# type: (...) -> List[str]
"""Runs the commands with tactics that attempt to introduce randomness in outputs.
Each command will run against a clean Pex cache with a unique path injected as the value for
`path_argument`. A unique `PYTHONHASHSEED` is set in the environment for each execution as well.
Additionally, a delay is inserted between executions. By default, this delay is 2s to ensure zip
precision is stressed. See: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT.
"""
td = safe_mkdtemp()
pex_root = os.path.join(td, "pex_root")
paths = []
for index, command in enumerate(commands):
path = os.path.join(td, str(index))
cmd = list(command) + [path_argument, path]
# Note that we change the `PYTHONHASHSEED` to ensure that there are no issues resulting
# from the random seed, such as data structures, as Tox sets this value by default.
# See:
# https://tox.readthedocs.io/en/latest/example/basic.html#special-handling-of-pythonhashseed
env = make_env(PEX_ROOT=pex_root, PYTHONHASHSEED=(index * 497) + 4)
if extra_env:
env.update(extra_env)
if index > 0:
safe_sleep(delay)
# Ensure the PEX is fully rebuilt.
safe_rmtree(pex_root)
subprocess.check_call(args=cmd, env=env)
paths.append(path)
return paths
def run_command_with_jitter(
args, # type: Iterable[str]
path_argument, # type: str
extra_env=None, # type: Optional[Mapping[str, str]]
delay=2.0, # type: float
count=3, # type: int
):
# type: (...) -> List[str]
"""Runs the command `count` times in an attempt to introduce randomness.
Each run of the command will run against a clean Pex cache with a unique path injected as the
value for `path_argument`. A unique `PYTHONHASHSEED` is set in the environment for each
execution as well.
Additionally, a delay is inserted between executions. By default, this delay is 2s to ensure zip
precision is stressed. See: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT.
"""
return run_commands_with_jitter(
commands=list(itertools.repeat(list(args), count)),
path_argument=path_argument,
extra_env=extra_env,
delay=delay,
)
def pex_project_dir():
# type: () -> str
return str(
subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("ascii").strip()
)
|
|
from __future__ import absolute_import, print_function, division
import itertools
import operator
from collections import OrderedDict
from petl.compat import next, string_types, reduce, text_type
from petl.errors import ArgumentError
from petl.util.base import Table, iterpeek, rowgroupby
from petl.util.base import values
from petl.util.counting import nrows
from petl.transform.sorts import sort, mergesort
from petl.transform.basics import cut
from petl.transform.dedup import distinct
def rowreduce(table, key, reducer, header=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
"""
Group rows under the given key then apply `reducer` to produce a single
output row for each input group of rows. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar'],
... ['a', 3],
... ['a', 7],
... ['b', 2],
... ['b', 1],
... ['b', 9],
... ['c', 4]]
>>> def sumbar(key, rows):
... return [key, sum(row[1] for row in rows)]
...
>>> table2 = etl.rowreduce(table1, key='foo', reducer=sumbar,
... header=['foo', 'barsum'])
>>> table2
+-----+--------+
| foo | barsum |
+=====+========+
| 'a' | 10 |
+-----+--------+
| 'b' | 12 |
+-----+--------+
| 'c' | 4 |
+-----+--------+
N.B., this is not strictly a "reduce" in the sense of the standard Python
:func:`reduce` function, i.e., the `reducer` function is *not* applied
recursively to values within a group, rather it is applied once to each row
group as a whole.
See also :func:`petl.transform.reductions.aggregate` and
:func:`petl.transform.reductions.fold`.
"""
return RowReduceView(table, key, reducer, header=header,
presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.rowreduce = rowreduce
class RowReduceView(Table):
def __init__(self, source, key, reducer, header=None,
presorted=False, buffersize=None, tempdir=None, cache=True):
if presorted:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.header = header
self.reducer = reducer
def __iter__(self):
return iterrowreduce(self.source, self.key, self.reducer, self.header)
def iterrowreduce(source, key, reducer, header):
if header is None:
# output header from source
header, source = iterpeek(source)
yield tuple(header)
for key, rows in rowgroupby(source, key):
yield tuple(reducer(key, rows))
def aggregate(table, key, aggregation=None, value=None, presorted=False,
buffersize=None, tempdir=None, cache=True, field='value'):
"""Apply aggregation functions.
E.g.::
>>> import petl as etl
>>>
>>> table1 = [['foo', 'bar', 'baz'],
... ['a', 3, True],
... ['a', 7, False],
... ['b', 2, True],
... ['b', 2, False],
... ['b', 9, False],
... ['c', 4, True]]
>>> # aggregate whole rows
... table2 = etl.aggregate(table1, 'foo', len)
>>> table2
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 2 |
+-----+-------+
| 'b' | 3 |
+-----+-------+
| 'c' | 1 |
+-----+-------+
>>> # aggregate whole rows without a key
>>> etl.aggregate(table1, None, len)
+-------+
| value |
+=======+
| 6 |
+-------+
>>> # aggregate single field
... table3 = etl.aggregate(table1, 'foo', sum, 'bar')
>>> table3
+-----+-------+
| foo | value |
+=====+=======+
| 'a' | 10 |
+-----+-------+
| 'b' | 13 |
+-----+-------+
| 'c' | 4 |
+-----+-------+
>>> # aggregate single field without a key
>>> etl.aggregate(table1, None, sum, 'bar')
+-------+
| value |
+=======+
| 27 |
+-------+
>>> # alternative signature using keyword args
... table4 = etl.aggregate(table1, key=('foo', 'bar'),
... aggregation=list, value=('bar', 'baz'))
>>> table4
+-----+-----+-------------------------+
| foo | bar | value |
+=====+=====+=========================+
| 'a' | 3 | [(3, True)] |
+-----+-----+-------------------------+
| 'a' | 7 | [(7, False)] |
+-----+-----+-------------------------+
| 'b' | 2 | [(2, True), (2, False)] |
+-----+-----+-------------------------+
| 'b' | 9 | [(9, False)] |
+-----+-----+-------------------------+
| 'c' | 4 | [(4, True)] |
+-----+-----+-------------------------+
>>> # alternative signature using keyword args without a key
>>> etl.aggregate(table1, key=None,
... aggregation=list, value=('bar', 'baz'))
+-----------------------------------------------------------------------+
| value |
+=======================================================================+
| [(3, True), (7, False), (2, True), (2, False), (9, False), (4, True)] |
+-----------------------------------------------------------------------+
>>> # aggregate multiple fields
... from collections import OrderedDict
>>> import petl as etl
>>>
>>> aggregation = OrderedDict()
>>> aggregation['count'] = len
>>> aggregation['minbar'] = 'bar', min
>>> aggregation['maxbar'] = 'bar', max
>>> aggregation['sumbar'] = 'bar', sum
>>> # default aggregation function is list
... aggregation['listbar'] = 'bar'
>>> aggregation['listbarbaz'] = ('bar', 'baz'), list
>>> aggregation['bars'] = 'bar', etl.strjoin(', ')
>>> table5 = etl.aggregate(table1, 'foo', aggregation)
>>> table5
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| foo | count | minbar | maxbar | sumbar | listbar | listbarbaz | bars |
+=====+=======+========+========+========+===========+=====================================+===========+
| 'a' | 2 | 3 | 7 | 10 | [3, 7] | [(3, True), (7, False)] | '3, 7' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'b' | 3 | 2 | 9 | 13 | [2, 2, 9] | [(2, True), (2, False), (9, False)] | '2, 2, 9' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
| 'c' | 1 | 4 | 4 | 4 | [4] | [(4, True)] | '4' |
+-----+-------+--------+--------+--------+-----------+-------------------------------------+-----------+
>>> # aggregate multiple fields without a key
>>> etl.aggregate(table1, None, aggregation)
+-------+--------+--------+--------+--------------------+-----------------------------------------------------------------------+--------------------+
| count | minbar | maxbar | sumbar | listbar | listbarbaz | bars |
+=======+========+========+========+====================+=======================================================================+====================+
| 6 | 2 | 9 | 27 | [3, 7, 2, 2, 9, 4] | [(3, True), (7, False), (2, True), (2, False), (9, False), (4, True)] | '3, 7, 2, 2, 9, 4' |
+-------+--------+--------+--------+--------------------+-----------------------------------------------------------------------+--------------------+
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
If `key` is None, sorting is not necessary.
"""
if callable(aggregation):
return SimpleAggregateView(table, key, aggregation=aggregation,
value=value, presorted=presorted,
buffersize=buffersize, tempdir=tempdir,
cache=cache, field=field)
elif aggregation is None or isinstance(aggregation, (list, tuple, dict)):
# ignore value arg
return MultiAggregateView(table, key, aggregation=aggregation,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
else:
raise ArgumentError('expected aggregation is callable, list, tuple, dict '
'or None')
Table.aggregate = aggregate
class SimpleAggregateView(Table):
def __init__(self, table, key, aggregation=list, value=None,
presorted=False, buffersize=None, tempdir=None,
cache=True, field='value'):
if presorted or key is None:
self.table = table
else:
self.table = sort(table, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.aggregation = aggregation
self.value = value
self.field = field
def __iter__(self):
return itersimpleaggregate(self.table, self.key, self.aggregation,
self.value, self.field)
def itersimpleaggregate(table, key, aggregation, value, field):
# special case counting
if aggregation == len and key is not None:
aggregation = lambda g: sum(1 for _ in g) # count length of iterable
# special case where length of key is 1
if isinstance(key, (list, tuple)) and len(key) == 1:
key = key[0]
# determine output header
if isinstance(key, (list, tuple)):
outhdr = tuple(key) + (field,)
elif callable(key):
outhdr = ('key', field)
elif key is None:
outhdr = field,
else:
outhdr = (key, field)
yield outhdr
# generate data
if isinstance(key, (list, tuple)):
for k, grp in rowgroupby(table, key, value):
yield tuple(k) + (aggregation(grp),)
elif key is None:
# special case counting
if aggregation == len:
yield nrows(table),
else:
yield aggregation(values(table, value)),
else:
for k, grp in rowgroupby(table, key, value):
yield k, aggregation(grp)
class MultiAggregateView(Table):
def __init__(self, source, key, aggregation=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted or key is None:
self.source = source
else:
self.source = sort(source, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
if aggregation is None:
self.aggregation = OrderedDict()
elif isinstance(aggregation, (list, tuple)):
self.aggregation = OrderedDict()
for t in aggregation:
self.aggregation[t[0]] = t[1:]
elif isinstance(aggregation, dict):
self.aggregation = aggregation
else:
raise ArgumentError(
'expected aggregation is None, list, tuple or dict, found %r'
% aggregation
)
def __iter__(self):
return itermultiaggregate(self.source, self.key, self.aggregation)
def __setitem__(self, key, value):
self.aggregation[key] = value
def itermultiaggregate(source, key, aggregation):
aggregation = OrderedDict(aggregation.items()) # take a copy
it = iter(source)
hdr = next(it)
# push back header to ensure we iterate only once
it = itertools.chain([hdr], it)
# normalise aggregators
for outfld in aggregation:
agg = aggregation[outfld]
if callable(agg):
aggregation[outfld] = None, agg
elif isinstance(agg, string_types):
aggregation[outfld] = agg, list # list is default
elif len(agg) == 1 and isinstance(agg[0], string_types):
aggregation[outfld] = agg[0], list # list is default
elif len(agg) == 1 and callable(agg[0]):
aggregation[outfld] = None, agg[0] # aggregate whole rows
elif len(agg) == 2:
pass # no need to normalise
else:
raise ArgumentError('invalid aggregation: %r, %r' % (outfld, agg))
# determine output header
if isinstance(key, (list, tuple)):
outhdr = list(key)
elif callable(key):
outhdr = ['key']
elif key is None:
outhdr = []
else:
outhdr = [key]
for outfld in aggregation:
outhdr.append(outfld)
yield tuple(outhdr)
if key is None:
grouped = rowgroupby(it, lambda x: None)
else:
grouped = rowgroupby(it, key)
# generate data
for k, rows in grouped:
rows = list(rows) # may need to iterate over these more than once
# handle compound key
if isinstance(key, (list, tuple)):
outrow = list(k)
elif key is None:
outrow = []
else:
outrow = [k]
for outfld in aggregation:
srcfld, aggfun = aggregation[outfld]
if srcfld is None:
aggval = aggfun(rows)
outrow.append(aggval)
elif isinstance(srcfld, (list, tuple)):
idxs = [hdr.index(f) for f in srcfld]
valgetter = operator.itemgetter(*idxs)
vals = (valgetter(row) for row in rows)
aggval = aggfun(vals)
outrow.append(aggval)
else:
idx = hdr.index(srcfld)
# try using generator comprehension
vals = (row[idx] for row in rows)
aggval = aggfun(vals)
outrow.append(aggval)
yield tuple(outrow)
def groupcountdistinctvalues(table, key, value):
"""Group by the `key` field then count the number of distinct values in the
`value` field."""
s1 = cut(table, key, value)
s2 = distinct(s1)
s3 = aggregate(s2, key, len)
return s3
Table.groupcountdistinctvalues = groupcountdistinctvalues
def groupselectfirst(table, key, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the first row within each group.
E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> table2 = etl.groupselectfirst(table1, key='foo')
>>> table2
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'B' | 2 | False |
+-----+-----+-------+
| 'C' | 7 | False |
+-----+-----+-------+
See also :func:`petl.transform.reductions.groupselectlast`,
:func:`petl.transform.dedup.distinct`.
"""
def _reducer(k, rows):
return next(rows)
return rowreduce(table, key, reducer=_reducer, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.groupselectfirst = groupselectfirst
def groupselectlast(table, key, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the last row within each group.
E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> table2 = etl.groupselectlast(table1, key='foo')
>>> table2
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'B' | 2 | False |
+-----+-----+-------+
| 'C' | 9 | True |
+-----+-----+-------+
See also :func:`petl.transform.reductions.groupselectfirst`,
:func:`petl.transform.dedup.distinct`.
.. versionadded:: 1.1.0
"""
def _reducer(k, rows):
row = None
for row in rows:
pass
return row
return rowreduce(table, key, reducer=_reducer, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.groupselectlast = groupselectlast
def groupselectmin(table, key, value, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the row with the minimum of the
`value` field within each group. N.B., will only return one row for each
group, even if multiple rows have the same (minimum) value."""
return groupselectfirst(sort(table, value, reverse=False), key,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
Table.groupselectmin = groupselectmin
def groupselectmax(table, key, value, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""Group by the `key` field then return the row with the maximum of the
`value` field within each group. N.B., will only return one row for each
group, even if multiple rows have the same (maximum) value."""
return groupselectfirst(sort(table, value, reverse=True), key,
presorted=presorted, buffersize=buffersize,
tempdir=tempdir, cache=cache)
Table.groupselectmax = groupselectmax
def mergeduplicates(table, key, missing=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""
Merge duplicate rows under the given key. E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... ['A', 1, 2.7],
... ['B', 2, None],
... ['D', 3, 9.4],
... ['B', None, 7.8],
... ['E', None, 42.],
... ['D', 3, 12.3],
... ['A', 2, None]]
>>> table2 = etl.mergeduplicates(table1, 'foo')
>>> table2
+-----+------------------+-----------------------+
| foo | bar | baz |
+=====+==================+=======================+
| 'A' | Conflict({1, 2}) | 2.7 |
+-----+------------------+-----------------------+
| 'B' | 2 | 7.8 |
+-----+------------------+-----------------------+
| 'D' | 3 | Conflict({9.4, 12.3}) |
+-----+------------------+-----------------------+
| 'E' | None | 42.0 |
+-----+------------------+-----------------------+
Missing values are overridden by non-missing values. Conflicting values are
reported as an instance of the Conflict class (sub-class of frozenset).
If `presorted` is True, it is assumed that the data are already sorted by
the given key, and the `buffersize`, `tempdir` and `cache` arguments are
ignored. Otherwise, the data are sorted, see also the discussion of the
`buffersize`, `tempdir` and `cache` arguments under the
:func:`petl.transform.sorts.sort` function.
See also :func:`petl.transform.dedup.conflicts`.
"""
return MergeDuplicatesView(table, key, missing=missing, presorted=presorted,
buffersize=buffersize, tempdir=tempdir,
cache=cache)
Table.mergeduplicates = mergeduplicates
class MergeDuplicatesView(Table):
def __init__(self, table, key, missing=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted:
self.table = table
else:
self.table = sort(table, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.missing = missing
def __iter__(self):
return itermergeduplicates(self.table, self.key, self.missing)
def itermergeduplicates(table, key, missing):
it = iter(table)
hdr, it = iterpeek(it)
flds = list(map(text_type, hdr))
# determine output fields
if isinstance(key, string_types):
outhdr = [key]
keyflds = {key}
else:
outhdr = list(key)
keyflds = set(key)
valflds = [f for f in flds if f not in keyflds]
valfldidxs = [flds.index(f) for f in valflds]
outhdr.extend(valflds)
yield tuple(outhdr)
# do the work
for k, grp in rowgroupby(it, key):
grp = list(grp)
if isinstance(key, string_types):
outrow = [k]
else:
outrow = list(k)
mergedvals = [set(row[i] for row in grp
if len(row) > i and row[i] != missing)
for i in valfldidxs]
normedvals = [vals.pop() if len(vals) == 1
else missing if len(vals) == 0
else Conflict(vals)
for vals in mergedvals]
outrow.extend(normedvals)
yield tuple(outrow)
def merge(*tables, **kwargs):
"""
Convenience function to combine multiple tables (via
:func:`petl.transform.sorts.mergesort`) then combine duplicate rows by
merging under the given key (via
:func:`petl.transform.reductions.mergeduplicates`). E.g.::
>>> import petl as etl
>>> table1 = [['foo', 'bar', 'baz'],
... [1, 'A', True],
... [2, 'B', None],
... [4, 'C', True]]
>>> table2 = [['bar', 'baz', 'quux'],
... ['A', True, 42.0],
... ['B', False, 79.3],
... ['C', False, 12.4]]
>>> table3 = etl.merge(table1, table2, key='bar')
>>> table3
+-----+-----+-------------------------+------+
| bar | foo | baz | quux |
+=====+=====+=========================+======+
| 'A' | 1 | True | 42.0 |
+-----+-----+-------------------------+------+
| 'B' | 2 | False | 79.3 |
+-----+-----+-------------------------+------+
| 'C' | 4 | Conflict({False, True}) | 12.4 |
+-----+-----+-------------------------+------+
Keyword arguments are the same as for
:func:`petl.transform.sorts.mergesort`, except `key` is required.
"""
assert 'key' in kwargs, 'keyword argument "key" is required'
key = kwargs['key']
t1 = mergesort(*tables, **kwargs)
t2 = mergeduplicates(t1, key=key, presorted=True)
return t2
Table.merge = merge
class Conflict(frozenset):
def __new__(cls, items):
s = super(Conflict, cls).__new__(cls, items)
return s
def fold(table, key, f, value=None, presorted=False, buffersize=None,
tempdir=None, cache=True):
"""
Reduce rows recursively via the Python standard :func:`reduce` function.
E.g.::
>>> import petl as etl
>>> table1 = [['id', 'count'],
... [1, 3],
... [1, 5],
... [2, 4],
... [2, 8]]
>>> import operator
>>> table2 = etl.fold(table1, 'id', operator.add, 'count',
... presorted=True)
>>> table2
+-----+-------+
| key | value |
+=====+=======+
| 1 | 8 |
+-----+-------+
| 2 | 12 |
+-----+-------+
See also :func:`petl.transform.reductions.aggregate`,
:func:`petl.transform.reductions.rowreduce`.
"""
return FoldView(table, key, f, value=value, presorted=presorted,
buffersize=buffersize, tempdir=tempdir, cache=cache)
Table.fold = fold
class FoldView(Table):
def __init__(self, table, key, f, value=None, presorted=False,
buffersize=None, tempdir=None, cache=True):
if presorted:
self.table = table
else:
self.table = sort(table, key, buffersize=buffersize,
tempdir=tempdir, cache=cache)
self.key = key
self.f = f
self.value = value
def __iter__(self):
return iterfold(self.table, self.key, self.f, self.value)
def iterfold(table, key, f, value):
yield ('key', 'value')
for k, grp in rowgroupby(table, key, value):
yield k, reduce(f, grp)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
import inspect
import logging
from sqlalchemy import orm
from quantum.common import exceptions as exc
from quantum.db import db_base_plugin_v2
from quantum.db import models_v2
from quantum.openstack.common import importutils
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.common import cisco_exceptions as cexc
from quantum.plugins.cisco.common import cisco_utils as cutil
from quantum.plugins.cisco.db import network_db_v2 as cdb
from quantum.plugins.cisco import l2network_plugin_configuration as conf
LOG = logging.getLogger(__name__)
class PluginV2(db_base_plugin_v2.QuantumDbPluginV2):
"""
Meta-Plugin with v2 API support for multiple sub-plugins.
"""
supported_extension_aliases = ["Cisco Credential", "Cisco qos"]
_methods_to_delegate = ['create_network',
'delete_network', 'update_network', 'get_network',
'get_networks',
'create_port', 'delete_port',
'update_port', 'get_port', 'get_ports',
'create_subnet',
'delete_subnet', 'update_subnet',
'get_subnet', 'get_subnets', ]
_master = True
def __init__(self):
"""
Loads the model class.
"""
self._model = importutils.import_object(conf.MODEL_CLASS)
if hasattr(self._model, "MANAGE_STATE") and self._model.MANAGE_STATE:
self._master = False
LOG.debug(_("Model %s manages state"), conf.MODEL_CLASS)
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._model.__class__.__name__)
self.__native_bulk_support = getattr(self._model,
native_bulk_attr_name, False)
if hasattr(self._model, "supported_extension_aliases"):
self.supported_extension_aliases.extend(
self._model.supported_extension_aliases)
LOG.debug(_("Plugin initialization complete"))
def __getattribute__(self, name):
"""
When the configured model class offers to manage the state of the
logical resources, we delegate the core API calls directly to it.
Note: Bulking calls will be handled by this class, and turned into
non-bulking calls to be considered for delegation.
"""
master = object.__getattribute__(self, "_master")
methods = object.__getattribute__(self, "_methods_to_delegate")
if not master and name in methods:
return getattr(object.__getattribute__(self, "_model"),
name)
else:
return object.__getattribute__(self, name)
def __getattr__(self, name):
"""
This delegates the calls to the extensions explicitly implemented by
the model.
"""
if hasattr(self._model, name):
return getattr(self._model, name)
else:
# Must make sure we re-raise the error that led us here, since
# otherwise getattr() and even hasattr() doesn't work corretly.
raise AttributeError("'%s' object has no attribute '%s'" %
(self._model, name))
"""
Core API implementation
"""
def create_network(self, context, network):
"""
Creates a new Virtual Network, and assigns it
a symbolic name.
"""
LOG.debug(_("create_network() called"))
new_network = super(PluginV2, self).create_network(context,
network)
try:
self._invoke_device_plugins(self._func_name(), [context,
new_network])
return new_network
except:
super(PluginV2, self).delete_network(context,
new_network['id'])
raise
def update_network(self, context, id, network):
"""
Updates the symbolic name belonging to a particular
Virtual Network.
"""
LOG.debug(_("update_network() called"))
upd_net_dict = super(PluginV2, self).update_network(context, id,
network)
self._invoke_device_plugins(self._func_name(), [context, id,
upd_net_dict])
return upd_net_dict
def delete_network(self, context, id):
"""
Deletes the network with the specified network identifier
belonging to the specified tenant.
"""
LOG.debug(_("delete_network() called"))
#We first need to check if there are any ports on this network
with context.session.begin():
network = self._get_network(context, id)
filter = {'network_id': [id]}
ports = self.get_ports(context, filters=filter)
# check if there are any tenant owned ports in-use
prefix = db_base_plugin_v2.AGENT_OWNER_PREFIX
only_svc = all(p['device_owner'].startswith(prefix) for p in ports)
if not only_svc:
raise exc.NetworkInUse(net_id=id)
context.session.close()
#Network does not have any ports, we can proceed to delete
try:
network = self._get_network(context, id)
kwargs = {const.NETWORK: network,
const.BASE_PLUGIN_REF: self}
self._invoke_device_plugins(self._func_name(), [context, id,
kwargs])
return super(PluginV2, self).delete_network(context, id)
except:
raise
def get_network(self, context, id, fields=None):
"""
Gets a particular network
"""
LOG.debug(_("get_network() called"))
return super(PluginV2, self).get_network(context, id, fields)
def get_networks(self, context, filters=None, fields=None):
"""
Gets all networks
"""
LOG.debug(_("get_networks() called"))
return super(PluginV2, self).get_networks(context, filters, fields)
def create_port(self, context, port):
"""
Creates a port on the specified Virtual Network.
"""
LOG.debug(_("create_port() called"))
new_port = super(PluginV2, self).create_port(context, port)
try:
self._invoke_device_plugins(self._func_name(), [context, new_port])
return new_port
except:
super(PluginV2, self).delete_port(context, new_port['id'])
raise
def delete_port(self, context, id):
"""
Deletes a port
"""
LOG.debug(_("delete_port() called"))
port = self._get_port(context, id)
"""
TODO (Sumit): Disabling this check for now, check later
#Allow deleting a port only if the administrative state is down,
#and its operation status is also down
if port['admin_state_up'] or port['status'] == 'ACTIVE':
raise exc.PortInUse(port_id=id, net_id=port['network_id'],
att_id=port['device_id'])
"""
try:
kwargs = {const.PORT: port}
# TODO (Sumit): Might first need to check here if port is active
self._invoke_device_plugins(self._func_name(), [context, id,
kwargs])
return super(PluginV2, self).delete_port(context, id)
except:
raise
def update_port(self, context, id, port):
"""
Updates the state of a port and returns the updated port
"""
LOG.debug(_("update_port() called"))
try:
self._invoke_device_plugins(self._func_name(), [context, id,
port])
return super(PluginV2, self).update_port(context, id, port)
except:
raise
def create_subnet(self, context, subnet):
"""
Create a subnet, which represents a range of IP addresses
that can be allocated to devices.
"""
LOG.debug(_("create_subnet() called"))
new_subnet = super(PluginV2, self).create_subnet(context, subnet)
try:
self._invoke_device_plugins(self._func_name(), [context,
new_subnet])
return new_subnet
except:
super(PluginV2, self).delete_subnet(context, new_subnet['id'])
raise
def update_subnet(self, context, id, subnet):
"""
Updates the state of a subnet and returns the updated subnet
"""
LOG.debug(_("update_subnet() called"))
try:
self._invoke_device_plugins(self._func_name(), [context, id,
subnet])
return super(PluginV2, self).update_subnet(context, id, subnet)
except:
raise
def delete_subnet(self, context, id):
"""
Deletes a subnet
"""
LOG.debug(_("delete_subnet() called"))
with context.session.begin():
subnet = self._get_subnet(context, id)
# Check if ports are using this subnet
allocated_qry = context.session.query(models_v2.IPAllocation)
allocated_qry = allocated_qry.options(orm.joinedload('ports'))
allocated = allocated_qry.filter_by(subnet_id=id).all()
prefix = db_base_plugin_v2.AGENT_OWNER_PREFIX
if not all(not a.port_id or a.ports.device_owner.startswith(prefix)
for a in allocated):
raise exc.SubnetInUse(subnet_id=id)
context.session.close()
try:
kwargs = {const.SUBNET: subnet}
self._invoke_device_plugins(self._func_name(), [context, id,
kwargs])
return super(PluginV2, self).delete_subnet(context, id)
except:
raise
"""
Extension API implementation
"""
def get_all_qoss(self, tenant_id):
"""Get all QoS levels"""
LOG.debug(_("get_all_qoss() called"))
qoslist = cdb.get_all_qoss(tenant_id)
return qoslist
def get_qos_details(self, tenant_id, qos_id):
"""Get QoS Details"""
LOG.debug(_("get_qos_details() called"))
try:
qos_level = cdb.get_qos(tenant_id, qos_id)
except Exception:
raise cexc.QosNotFound(tenant_id=tenant_id,
qos_id=qos_id)
return qos_level
def create_qos(self, tenant_id, qos_name, qos_desc):
"""Create a QoS level"""
LOG.debug(_("create_qos() called"))
qos = cdb.add_qos(tenant_id, qos_name, str(qos_desc))
return qos
def delete_qos(self, tenant_id, qos_id):
"""Delete a QoS level"""
LOG.debug(_("delete_qos() called"))
try:
qos_level = cdb.get_qos(tenant_id, qos_id)
except Exception:
raise cexc.QosNotFound(tenant_id=tenant_id,
qos_id=qos_id)
return cdb.remove_qos(tenant_id, qos_id)
def rename_qos(self, tenant_id, qos_id, new_name):
"""Rename QoS level"""
LOG.debug(_("rename_qos() called"))
try:
qos_level = cdb.get_qos(tenant_id, qos_id)
except Exception:
raise cexc.QosNotFound(tenant_id=tenant_id,
qos_id=qos_id)
qos = cdb.update_qos(tenant_id, qos_id, new_name)
return qos
def get_all_credentials(self, tenant_id):
"""Get all credentials"""
LOG.debug(_("get_all_credentials() called"))
credential_list = cdb.get_all_credentials(tenant_id)
return credential_list
def get_credential_details(self, tenant_id, credential_id):
"""Get a particular credential"""
LOG.debug(_("get_credential_details() called"))
try:
credential = cdb.get_credential(tenant_id, credential_id)
except Exception:
raise cexc.CredentialNotFound(tenant_id=tenant_id,
credential_id=credential_id)
return credential
def create_credential(self, tenant_id, credential_name, user_name,
password):
"""Create a new credential"""
LOG.debug(_("create_credential() called"))
credential = cdb.add_credential(tenant_id, credential_name,
user_name, password)
return credential
def delete_credential(self, tenant_id, credential_id):
"""Delete a credential"""
LOG.debug(_("delete_credential() called"))
try:
credential = cdb.get_credential(tenant_id, credential_id)
except Exception:
raise cexc.CredentialNotFound(tenant_id=tenant_id,
credential_id=credential_id)
credential = cdb.remove_credential(tenant_id, credential_id)
return credential
def rename_credential(self, tenant_id, credential_id, new_name):
"""Rename the particular credential resource"""
LOG.debug(_("rename_credential() called"))
try:
credential = cdb.get_credential(tenant_id, credential_id)
except Exception:
raise cexc.CredentialNotFound(tenant_id=tenant_id,
credential_id=credential_id)
credential = cdb.update_credential(tenant_id, credential_id, new_name)
return credential
def schedule_host(self, tenant_id, instance_id, instance_desc):
"""Provides the hostname on which a dynamic vnic is reserved"""
LOG.debug(_("schedule_host() called"))
host_list = self._invoke_device_plugins(self._func_name(),
[tenant_id,
instance_id,
instance_desc])
return host_list
def associate_port(self, tenant_id, instance_id, instance_desc):
"""
Get the portprofile name and the device name for the dynamic vnic
"""
LOG.debug(_("associate_port() called"))
return self._invoke_device_plugins(self._func_name(), [tenant_id,
instance_id,
instance_desc])
def detach_port(self, tenant_id, instance_id, instance_desc):
"""
Remove the association of the VIF with the dynamic vnic
"""
LOG.debug(_("detach_port() called"))
return self._invoke_device_plugins(self._func_name(), [tenant_id,
instance_id,
instance_desc])
"""
Private functions
"""
def _invoke_device_plugins(self, function_name, args):
"""
Device-specific calls including core API and extensions are
delegated to the model.
"""
if hasattr(self._model, function_name):
return getattr(self._model, function_name)(*args)
def _func_name(self, offset=0):
"""Getting the name of the calling funciton"""
return inspect.stack()[1 + offset][3]
|
|
"""The tests for the Template vacuum platform."""
import logging
import pytest
from homeassistant import setup
from homeassistant.const import STATE_ON, STATE_OFF, STATE_UNKNOWN, STATE_UNAVAILABLE
from homeassistant.components.vacuum import (
ATTR_BATTERY_LEVEL,
STATE_CLEANING,
STATE_DOCKED,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
)
from tests.common import async_mock_service, assert_setup_component
from tests.components.vacuum import common
_LOGGER = logging.getLogger(__name__)
_TEST_VACUUM = "vacuum.test_vacuum"
_STATE_INPUT_SELECT = "input_select.state"
_SPOT_CLEANING_INPUT_BOOLEAN = "input_boolean.spot_cleaning"
_LOCATING_INPUT_BOOLEAN = "input_boolean.locating"
_FAN_SPEED_INPUT_SELECT = "input_select.fan_speed"
_BATTERY_LEVEL_INPUT_NUMBER = "input_number.battery_level"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
# Configuration tests #
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_vacuum": {"start": {"service": "script.vacuum_start"}}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN, None)
async def test_missing_start_config(hass, calls):
"""Test: missing 'start' will fail."""
with assert_setup_component(0, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {"test_vacuum": {"value_template": "{{ 'on' }}"}},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
async def test_invalid_config(hass, calls):
"""Test: invalid config structure will fail."""
with assert_setup_component(0, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"platform": "template",
"vacuums": {
"test_vacuum": {"start": {"service": "script.vacuum_start"}}
},
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all() == []
# End of configuration tests #
# Template tests #
async def test_templates_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_vacuum": {
"value_template": "{{ states('input_select.state') }}",
"battery_level_template": "{{ states('input_number.battery_level') }}",
"start": {"service": "script.vacuum_start"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN, None)
hass.states.async_set(_STATE_INPUT_SELECT, STATE_CLEANING)
hass.states.async_set(_BATTERY_LEVEL_INPUT_NUMBER, 100)
await hass.async_block_till_done()
_verify(hass, STATE_CLEANING, 100)
async def test_templates_with_valid_values(hass, calls):
"""Test templates with valid values."""
with assert_setup_component(1, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_vacuum": {
"value_template": "{{ 'cleaning' }}",
"battery_level_template": "{{ 100 }}",
"start": {"service": "script.vacuum_start"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_CLEANING, 100)
async def test_templates_invalid_values(hass, calls):
"""Test templates with invalid values."""
with assert_setup_component(1, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_vacuum": {
"value_template": "{{ 'abc' }}",
"battery_level_template": "{{ 101 }}",
"start": {"service": "script.vacuum_start"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN, None)
async def test_invalid_templates(hass, calls):
"""Test invalid templates."""
with assert_setup_component(1, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_vacuum": {
"value_template": "{{ this_function_does_not_exist() }}",
"battery_level_template": "{{ this_function_does_not_exist() }}",
"fan_speed_template": "{{ this_function_does_not_exist() }}",
"start": {"service": "script.vacuum_start"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN, None)
async def test_available_template_with_entities(hass, calls):
"""Test availability templates with values from other entities."""
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_template_vacuum": {
"availability_template": "{{ is_state('availability_state.state', 'on') }}",
"start": {"service": "script.vacuum_start"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
# When template returns true..
hass.states.async_set("availability_state.state", STATE_ON)
await hass.async_block_till_done()
# Device State should not be unavailable
assert hass.states.get("vacuum.test_template_vacuum").state != STATE_UNAVAILABLE
# When Availability template returns false
hass.states.async_set("availability_state.state", STATE_OFF)
await hass.async_block_till_done()
# device state should be unavailable
assert hass.states.get("vacuum.test_template_vacuum").state == STATE_UNAVAILABLE
async def test_invalid_availability_template_keeps_component_available(hass, caplog):
"""Test that an invalid availability keeps the device available."""
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_template_vacuum": {
"availability_template": "{{ x - 12 }}",
"start": {"service": "script.vacuum_start"},
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.get("vacuum.test_template_vacuum") != STATE_UNAVAILABLE
assert ("UndefinedError: 'x' is undefined") in caplog.text
# End of template tests #
# Function tests #
async def test_state_services(hass, calls):
"""Test state services."""
await _register_components(hass)
# Start vacuum
await common.async_start(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_STATE_INPUT_SELECT).state == STATE_CLEANING
_verify(hass, STATE_CLEANING, None)
# Pause vacuum
await common.async_pause(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_STATE_INPUT_SELECT).state == STATE_PAUSED
_verify(hass, STATE_PAUSED, None)
# Stop vacuum
await common.async_stop(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_STATE_INPUT_SELECT).state == STATE_IDLE
_verify(hass, STATE_IDLE, None)
# Return vacuum to base
await common.async_return_to_base(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_STATE_INPUT_SELECT).state == STATE_RETURNING
_verify(hass, STATE_RETURNING, None)
async def test_unused_services(hass, calls):
"""Test calling unused services should not crash."""
await _register_basic_vacuum(hass)
# Pause vacuum
await common.async_pause(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# Stop vacuum
await common.async_stop(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# Return vacuum to base
await common.async_return_to_base(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# Spot cleaning
await common.async_clean_spot(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# Locate vacuum
await common.async_locate(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# Set fan's speed
await common.async_set_fan_speed(hass, "medium", _TEST_VACUUM)
await hass.async_block_till_done()
_verify(hass, STATE_UNKNOWN, None)
async def test_clean_spot_service(hass, calls):
"""Test clean spot service."""
await _register_components(hass)
# Clean spot
await common.async_clean_spot(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_SPOT_CLEANING_INPUT_BOOLEAN).state == STATE_ON
async def test_locate_service(hass, calls):
"""Test locate service."""
await _register_components(hass)
# Locate vacuum
await common.async_locate(hass, _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_LOCATING_INPUT_BOOLEAN).state == STATE_ON
async def test_set_fan_speed(hass, calls):
"""Test set valid fan speed."""
await _register_components(hass)
# Set vacuum's fan speed to high
await common.async_set_fan_speed(hass, "high", _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_FAN_SPEED_INPUT_SELECT).state == "high"
# Set fan's speed to medium
await common.async_set_fan_speed(hass, "medium", _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_FAN_SPEED_INPUT_SELECT).state == "medium"
async def test_set_invalid_fan_speed(hass, calls):
"""Test set invalid fan speed when fan has valid speed."""
await _register_components(hass)
# Set vacuum's fan speed to high
await common.async_set_fan_speed(hass, "high", _TEST_VACUUM)
await hass.async_block_till_done()
# verify
assert hass.states.get(_FAN_SPEED_INPUT_SELECT).state == "high"
# Set vacuum's fan speed to 'invalid'
await common.async_set_fan_speed(hass, "invalid", _TEST_VACUUM)
await hass.async_block_till_done()
# verify fan speed is unchanged
assert hass.states.get(_FAN_SPEED_INPUT_SELECT).state == "high"
def _verify(hass, expected_state, expected_battery_level):
"""Verify vacuum's state and speed."""
state = hass.states.get(_TEST_VACUUM)
attributes = state.attributes
assert state.state == expected_state
assert attributes.get(ATTR_BATTERY_LEVEL) == expected_battery_level
async def _register_basic_vacuum(hass):
"""Register basic vacuum with only required options for testing."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{"input_select": {"state": {"name": "State", "options": [STATE_CLEANING]}}},
)
with assert_setup_component(1, "vacuum"):
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {
"test_vacuum": {
"start": {
"service": "input_select.select_option",
"data": {
"entity_id": _STATE_INPUT_SELECT,
"option": STATE_CLEANING,
},
}
}
},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
async def _register_components(hass):
"""Register basic components for testing."""
with assert_setup_component(2, "input_boolean"):
assert await setup.async_setup_component(
hass,
"input_boolean",
{"input_boolean": {"spot_cleaning": None, "locating": None}},
)
with assert_setup_component(2, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"state": {
"name": "State",
"options": [
STATE_CLEANING,
STATE_DOCKED,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
],
},
"fan_speed": {
"name": "Fan speed",
"options": ["", "low", "medium", "high"],
},
}
},
)
with assert_setup_component(1, "vacuum"):
test_vacuum_config = {
"value_template": "{{ states('input_select.state') }}",
"fan_speed_template": "{{ states('input_select.fan_speed') }}",
"start": {
"service": "input_select.select_option",
"data": {"entity_id": _STATE_INPUT_SELECT, "option": STATE_CLEANING},
},
"pause": {
"service": "input_select.select_option",
"data": {"entity_id": _STATE_INPUT_SELECT, "option": STATE_PAUSED},
},
"stop": {
"service": "input_select.select_option",
"data": {"entity_id": _STATE_INPUT_SELECT, "option": STATE_IDLE},
},
"return_to_base": {
"service": "input_select.select_option",
"data": {"entity_id": _STATE_INPUT_SELECT, "option": STATE_RETURNING},
},
"clean_spot": {
"service": "input_boolean.turn_on",
"entity_id": _SPOT_CLEANING_INPUT_BOOLEAN,
},
"locate": {
"service": "input_boolean.turn_on",
"entity_id": _LOCATING_INPUT_BOOLEAN,
},
"set_fan_speed": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _FAN_SPEED_INPUT_SELECT,
"option": "{{ fan_speed }}",
},
},
"fan_speeds": ["low", "medium", "high"],
}
assert await setup.async_setup_component(
hass,
"vacuum",
{
"vacuum": {
"platform": "template",
"vacuums": {"test_vacuum": test_vacuum_config},
}
},
)
await hass.async_start()
await hass.async_block_till_done()
|
|
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas import (
Index,
NaT,
Period,
PeriodIndex,
Series,
date_range,
offsets,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import PeriodArray
class TestPeriodIndex:
def test_construction_base_constructor(self):
# GH 13664
arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")]
tm.assert_index_equal(Index(arr), PeriodIndex(arr))
tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr)))
arr = [np.nan, NaT, Period("2011-03", freq="M")]
tm.assert_index_equal(Index(arr), PeriodIndex(arr))
tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr)))
arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="D")]
tm.assert_index_equal(Index(arr), Index(arr, dtype=object))
tm.assert_index_equal(Index(np.array(arr)), Index(np.array(arr), dtype=object))
def test_base_constructor_with_period_dtype(self):
dtype = PeriodDtype("D")
values = ["2011-01-01", "2012-03-04", "2014-05-01"]
result = Index(values, dtype=dtype)
expected = PeriodIndex(values, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"values_constructor", [list, np.array, PeriodIndex, PeriodArray._from_sequence]
)
def test_index_object_dtype(self, values_constructor):
# Index(periods, dtype=object) is an Index (not an PeriodIndex)
periods = [
Period("2011-01", freq="M"),
NaT,
Period("2011-03", freq="M"),
]
values = values_constructor(periods)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
expected = period_range(start="4/2/2012", periods=10, freq="B")
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, Index(years))
tm.assert_index_equal(pindex.quarter, Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx.asi8)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx.asi8))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([NaT, NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([NaT, NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([NaT, NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")])
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")]
)
)
# first element is NaT
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex([NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")])
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"):
idx._simple_new(idx, name="p")
result = idx._simple_new(idx._data, name="p")
tm.assert_index_equal(result, idx)
msg = "Should be numpy array of type i8"
with pytest.raises(AssertionError, match=msg):
# Need ndarray, not Int64Index
type(idx._data)._simple_new(Index(idx.asi8), freq=idx.freq)
arr = type(idx._data)._simple_new(idx.asi8, freq=idx.freq)
result = idx._simple_new(arr, name="p")
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"):
idx._simple_new(idx, name="p")
result = idx._simple_new(idx._data, name="p")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
with pytest.raises(AssertionError, match="<class "):
PeriodIndex._simple_new(floats)
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
PeriodIndex(floats)
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = [f"{t[0]:d}Q{t[1]:d}" for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
pidx = period_range(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
pidx = period_range(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
pidx = period_range(end="2014-01-01 17:00", freq="4H", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4H"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize("freq", ["A", "M", "D", "T", "S"])
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start="2014-04-01", freq=freqstr, periods=10)
expected = date_range(start="2014-04-01", freq=freqstr, periods=10).to_period(
freqstr
)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1H", "1H1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25H")
for freq in ["1D1H", "1H1D"]:
pidx = period_range(start="2016-01-01", periods=2, freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 01:00"], freq="25H")
tm.assert_index_equal(pidx, expected)
def test_constructor(self):
pi = period_range(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009")
assert len(pi) == 4 * 9
pi = period_range(freq="M", start="1/1/2001", end="12/1/2009")
assert len(pi) == 12 * 9
pi = period_range(freq="D", start="1/1/2001", end="12/31/2009")
assert len(pi) == 365 * 9 + 2
pi = period_range(freq="B", start="1/1/2001", end="12/31/2009")
assert len(pi) == 261 * 9
pi = period_range(freq="H", start="1/1/2001", end="12/31/2001 23:00")
assert len(pi) == 365 * 24
pi = period_range(freq="Min", start="1/1/2001", end="1/1/2001 23:59")
assert len(pi) == 24 * 60
pi = period_range(freq="S", start="1/1/2001", end="1/1/2001 23:59:59")
assert len(pi) == 24 * 60 * 60
start = Period("02-Apr-2005", "B")
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period("2006-12-31", "1w")
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2005-05-01", "B")
i1 = period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period("2006-12-31", "w")]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(vals)
# tuple freq disallowed GH#34703
with pytest.raises(TypeError, match="pass as a string instead"):
Period("2006-12-31", ("w", 1))
@pytest.mark.parametrize(
"freq", ["M", "Q", "A", "D", "B", "T", "S", "L", "U", "N", "H"]
)
def test_recreate_from_data(self, freq):
org = period_range(start="2001/04/01", freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq="A")
expected = Index([str(num) for num in raw])
res = index.map(str)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, str) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestShallowCopy:
def test_shallow_copy_empty(self):
# GH#13067
idx = PeriodIndex([], freq="M")
result = idx._view()
expected = idx
tm.assert_index_equal(result, expected)
def test_shallow_copy_disallow_i8(self):
# GH#24391
pi = period_range("2018-01-01", periods=3, freq="2D")
with pytest.raises(AssertionError, match="ndarray"):
pi._shallow_copy(pi.asi8)
def test_shallow_copy_requires_disallow_period_index(self):
pi = period_range("2018-01-01", periods=3, freq="2D")
with pytest.raises(AssertionError, match="PeriodIndex"):
pi._shallow_copy(pi)
class TestSeriesPeriod:
def test_constructor_cant_cast_period(self):
msg = "Cannot cast PeriodIndex to dtype float64"
with pytest.raises(TypeError, match=msg):
Series(period_range("2000-01-01", periods=10, freq="D"), dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range("1/1/2000", periods=10), dtype=PeriodDtype("D"))
exp = Series(period_range("1/1/2000", periods=10))
tm.assert_series_equal(s, exp)
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Util functions and classes for cloudstorage_api."""
__all__ = ['set_default_retry_params',
'RetryParams',
]
import copy
import httplib
import logging
import math
import os
import threading
import time
import urllib
try:
from google.appengine.api import urlfetch
from google.appengine.datastore import datastore_rpc
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import utils
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
except ImportError:
from google.appengine.api import urlfetch
from google.appengine.datastore import datastore_rpc
from google.appengine import runtime
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext.ndb import eventloop
from google.appengine.ext.ndb import utils
_RETRIABLE_EXCEPTIONS = (urlfetch.DownloadError,
apiproxy_errors.Error)
_thread_local_settings = threading.local()
_thread_local_settings.default_retry_params = None
def set_default_retry_params(retry_params):
"""Set a default RetryParams for current thread current request."""
_thread_local_settings.default_retry_params = copy.copy(retry_params)
def _get_default_retry_params():
"""Get default RetryParams for current request and current thread.
Returns:
A new instance of the default RetryParams.
"""
default = getattr(_thread_local_settings, 'default_retry_params', None)
if default is None or not default.belong_to_current_request():
return RetryParams()
else:
return copy.copy(default)
def _quote_filename(filename):
"""Quotes filename to use as a valid URI path.
Args:
filename: user provided filename. /bucket/filename.
Returns:
The filename properly quoted to use as URI's path component.
"""
return urllib.quote(filename)
def _unquote_filename(filename):
"""Unquotes a valid URI path back to its filename.
This is the opposite of _quote_filename.
Args:
filename: a quoted filename. /bucket/some%20filename.
Returns:
The filename unquoted.
"""
return urllib.unquote(filename)
def _should_retry(resp):
"""Given a urlfetch response, decide whether to retry that request."""
return (resp.status_code == httplib.REQUEST_TIMEOUT or
(resp.status_code >= 500 and
resp.status_code < 600))
class RetryParams(object):
"""Retry configuration parameters."""
@datastore_rpc._positional(1)
def __init__(self,
backoff_factor=2.0,
initial_delay=0.1,
max_delay=10.0,
min_retries=3,
max_retries=6,
max_retry_period=30.0,
urlfetch_timeout=None,
save_access_token=False):
"""Init.
This object is unique per request per thread.
Library will retry according to this setting when App Engine Server
can't call urlfetch, urlfetch timed out, or urlfetch got a 408 or
500-600 response.
Args:
backoff_factor: exponential backoff multiplier.
initial_delay: seconds to delay for the first retry.
max_delay: max seconds to delay for every retry.
min_retries: min number of times to retry. This value is automatically
capped by max_retries.
max_retries: max number of times to retry. Set this to 0 for no retry.
max_retry_period: max total seconds spent on retry. Retry stops when
this period passed AND min_retries has been attempted.
urlfetch_timeout: timeout for urlfetch in seconds. Could be None,
in which case the value will be chosen by urlfetch module.
save_access_token: persist access token to datastore to avoid
excessive usage of GetAccessToken API. Usually the token is cached
in process and in memcache. In some cases, memcache isn't very
reliable.
"""
self.backoff_factor = self._check('backoff_factor', backoff_factor)
self.initial_delay = self._check('initial_delay', initial_delay)
self.max_delay = self._check('max_delay', max_delay)
self.max_retry_period = self._check('max_retry_period', max_retry_period)
self.max_retries = self._check('max_retries', max_retries, True, int)
self.min_retries = self._check('min_retries', min_retries, True, int)
if self.min_retries > self.max_retries:
self.min_retries = self.max_retries
self.urlfetch_timeout = None
if urlfetch_timeout is not None:
self.urlfetch_timeout = self._check('urlfetch_timeout', urlfetch_timeout)
self.save_access_token = self._check('save_access_token', save_access_token,
True, bool)
self._request_id = os.getenv('REQUEST_LOG_ID')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def _check(cls, name, val, can_be_zero=False, val_type=float):
"""Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in.
"""
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val
def belong_to_current_request(self):
return os.getenv('REQUEST_LOG_ID') == self._request_id
def delay(self, n, start_time):
"""Calculate delay before the next retry.
Args:
n: the number of current attempt. The first attempt should be 1.
start_time: the time when retry started in unix time.
Returns:
Number of seconds to wait before next retry. -1 if retry should give up.
"""
if (n > self.max_retries or
(n > self.min_retries and
time.time() - start_time > self.max_retry_period)):
return -1
return min(
math.pow(self.backoff_factor, n-1) * self.initial_delay,
self.max_delay)
def _retry_fetch(url, retry_params, **kwds):
"""A blocking fetch function similar to urlfetch.fetch.
This function should be used when a urlfetch has timed out or the response
shows http request timeout. This function will put current thread to
sleep between retry backoffs.
Args:
url: url to fetch.
retry_params: an instance of RetryParams.
**kwds: keyword arguments for urlfetch. If deadline is specified in kwds,
it precedes the one in RetryParams. If none is specified, it's up to
urlfetch to use its own default.
Returns:
A urlfetch response from the last retry. None if no retry was attempted.
Raises:
Whatever exception encountered during the last retry.
"""
n = 1
start_time = time.time()
delay = retry_params.delay(n, start_time)
if delay <= 0:
return
logging.info('Will retry request to %s.', url)
while delay > 0:
resp = None
try:
logging.info('Retry in %s seconds.', delay)
time.sleep(delay)
resp = urlfetch.fetch(url, **kwds)
except runtime.DeadlineExceededError:
logging.info(
'Urlfetch retry %s will exceed request deadline '
'after %s seconds total', n, time.time() - start_time)
raise
except _RETRIABLE_EXCEPTIONS, e:
pass
n += 1
delay = retry_params.delay(n, start_time)
if resp and not _should_retry(resp):
break
elif resp:
logging.info(
'Got status %s from GCS.', resp.status_code)
else:
logging.info(
'Got exception "%r" while contacting GCS.', e)
if resp:
return resp
logging.info('Urlfetch failed after %s retries and %s seconds in total.',
n - 1, time.time() - start_time)
raise
def _run_until_rpc():
"""Eagerly evaluate tasklets until it is blocking on some RPC.
Usually ndb eventloop el isn't run until some code calls future.get_result().
When an async tasklet is called, the tasklet wrapper evaluates the tasklet
code into a generator, enqueues a callback _help_tasklet_along onto
the el.current queue, and returns a future.
_help_tasklet_along, when called by the el, will
get one yielded value from the generator. If the value if another future,
set up a callback _on_future_complete to invoke _help_tasklet_along
when the dependent future fulfills. If the value if a RPC, set up a
callback _on_rpc_complete to invoke _help_tasklet_along when the RPC fulfills.
Thus _help_tasklet_along drills down
the chain of futures until some future is blocked by RPC. El runs
all callbacks and constantly check pending RPC status.
"""
el = eventloop.get_event_loop()
while el.current:
el.run0()
def _eager_tasklet(tasklet):
"""Decorator to turn tasklet to run eagerly."""
@utils.wrapping(tasklet)
def eager_wrapper(*args, **kwds):
fut = tasklet(*args, **kwds)
_run_until_rpc()
return fut
return eager_wrapper
|
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mox
from neutronclient.common import exceptions as n_exc
from neutronclient.v2_0 import client
from nova.api.openstack.compute.contrib import security_groups
from nova import context
from nova import exception
from nova.network import neutronv2
from nova.network.security_group import neutron_driver
from nova import test
class TestNeutronDriver(test.NoDBTestCase):
def setUp(self):
super(TestNeutronDriver, self).setUp()
self.mox.StubOutWithMock(neutronv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
def test_list_with_project(self):
project_id = '0af70a4d22cf4652824ddc1f2435dd85'
security_groups_list = {'security_groups': []}
self.moxed_client.list_security_groups(tenant_id=project_id).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
sg_api.list(self.context, project=project_id)
def test_get_with_name_duplicated(self):
sg_name = 'web_server'
expected_sg_id = '85cc3048-abc3-43cc-89b3-377341426ac5'
list_security_groups = {'security_groups':
[{'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server',
'rules': []}
]}
self.moxed_client.list_security_groups(name=sg_name, fields='id',
tenant_id=self.context.tenant).AndReturn(list_security_groups)
expected_sg = {'security_group': {'name': sg_name,
'id': expected_sg_id,
'tenant_id': self.context.tenant,
'description': 'server', 'rules': []}}
self.moxed_client.show_security_group(expected_sg_id).AndReturn(
expected_sg)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
observed_sg = sg_api.get(self.context, name=sg_name)
expected_sg['security_group']['project_id'] = self.context.tenant
del expected_sg['security_group']['tenant_id']
self.assertEqual(expected_sg['security_group'], observed_sg)
def test_create_security_group_exceed_quota(self):
name = 'test-security-group'
description = 'test-security-group'
body = {'security_group': {'name': name,
'description': description}}
message = "Quota exceeded for resources: ['security_group']"
self.moxed_client.create_security_group(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = security_groups.NativeNeutronSecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.create_security_group, self.context, name,
description)
def test_create_security_group_rules_exceed_quota(self):
vals = {'protocol': 'tcp', 'cidr': '0.0.0.0/0',
'parent_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'group_id': None, 'from_port': 1025, 'to_port': 1025}
body = {'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 1025, 'port_range_min': 1025,
'security_group_id': '7ae75663-277e-4a0e-8f87-56ea4e70cb47',
'remote_ip_prefix': '0.0.0.0/0'}]}
name = 'test-security-group'
message = "Quota exceeded for resources: ['security_group_rule']"
self.moxed_client.create_security_group_rule(
body).AndRaise(n_exc.NeutronClientException(status_code=409,
message=message))
self.mox.ReplayAll()
sg_api = security_groups.NativeNeutronSecurityGroupAPI()
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.add_rules, self.context, None, name, [vals])
def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self):
sg1 = {'description': 'default',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6',
'security_group_rules':
[{'direction': 'ingress',
'ethertype': 'IPv4',
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb',
'port_range_max': None,
'port_range_min': None,
'protocol': '51',
'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id':
'07f1362f-34f6-4136-819a-2dcde112269e',
'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]}
self.moxed_client.list_security_groups().AndReturn(
{'security_groups': [sg1]})
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.list(self.context)
expected = [{'rules':
[{'from_port': -1, 'protocol': '51', 'to_port': -1,
'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e',
'cidr': '0.0.0.0/0', 'group_id': None,
'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}],
'project_id': 'c166d9316f814891bcb66b96c4c891d6',
'id': '07f1362f-34f6-4136-819a-2dcde112269e',
'name': 'default', 'description': 'default'}]
self.assertEqual(expected, result)
def test_instances_security_group_bindings(self):
server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1'
port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0'
port2_id = 'b3b31a53-6e29-479f-ae5c-00b7b71a6d44'
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
servers = [{'id': server_id}]
ports = [{'id': port1_id, 'device_id': server_id,
'security_groups': [sg1_id]},
{'id': port2_id, 'device_id': server_id,
'security_groups': [sg2_id]}]
port_list = {'ports': ports}
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
sg_bindings = {server_id: [{'name': 'wol'}, {'name': 'eor'}]}
self.moxed_client.list_ports(device_id=[server_id]).AndReturn(
port_list)
self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def _test_instances_security_group_bindings_scale(self, num_servers):
max_query = 150
sg1_id = '2f7ce969-1a73-4ef9-bbd6-c9a91780ecd4'
sg2_id = '20c89ce5-9388-4046-896e-64ffbd3eb584'
sg1 = {'id': sg1_id, 'name': 'wol'}
sg2 = {'id': sg2_id, 'name': 'eor'}
security_groups_list = {'security_groups': [sg1, sg2]}
servers = []
device_ids = []
ports = []
sg_bindings = {}
for i in xrange(0, num_servers):
server_id = "server-%d" % i
port_id = "port-%d" % i
servers.append({'id': server_id})
device_ids.append(server_id)
ports.append({'id': port_id,
'device_id': server_id,
'security_groups': [sg1_id, sg2_id]})
sg_bindings[server_id] = [{'name': 'wol'}, {'name': 'eor'}]
for x in xrange(0, num_servers, max_query):
self.moxed_client.list_ports(
device_id=device_ids[x:x + max_query]).\
AndReturn({'ports': ports[x:x + max_query]})
self.moxed_client.list_security_groups(id=[sg2_id, sg1_id]).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instances_security_group_bindings_less_than_max(self):
self._test_instances_security_group_bindings_scale(100)
def test_instances_security_group_bindings_max(self):
self._test_instances_security_group_bindings_scale(150)
def test_instances_security_group_bindings_more_then_max(self):
self._test_instances_security_group_bindings_scale(300)
def test_instances_security_group_bindings_with_hidden_sg(self):
servers = [{'id': 'server_1'}]
ports = [{'id': '1', 'device_id': 'dev_1', 'security_groups': ['1']},
{'id': '2', 'device_id': 'dev_1', 'security_groups': ['2']}]
port_list = {'ports': ports}
sg1 = {'id': '1', 'name': 'wol'}
# User doesn't have access to sg2
security_groups_list = {'security_groups': [sg1]}
sg_bindings = {'dev_1': [{'name': 'wol'}]}
self.moxed_client.list_ports(device_id=['server_1']).AndReturn(
port_list)
self.moxed_client.list_security_groups(id=['1', '2']).AndReturn(
security_groups_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEqual(result, sg_bindings)
def test_instance_empty_security_groups(self):
port_list = {'ports': [{'id': 1, 'device_id': '1',
'security_groups': []}]}
self.moxed_client.list_ports(device_id=['1']).AndReturn(port_list)
self.mox.ReplayAll()
sg_api = neutron_driver.SecurityGroupAPI()
result = sg_api.get_instance_security_groups(self.context, '1')
self.assertEqual([], result)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Service.url'
db.alter_column(u'operations_service', 'url', self.gf('django.db.models.fields.URLField')(max_length=600))
# Changing field 'Agency.url'
db.alter_column(u'operations_agency', 'url', self.gf('django.db.models.fields.URLField')(max_length=600, null=True))
# Changing field 'Event.product_feed_url'
db.alter_column(u'operations_event', 'product_feed_url', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'Service.url'
db.alter_column(u'operations_service', 'url', self.gf('django.db.models.fields.URLField')(max_length=200))
# Changing field 'Agency.url'
db.alter_column(u'operations_agency', 'url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True))
# Changing field 'Event.product_feed_url'
db.alter_column(u'operations_event', 'product_feed_url', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.map': {
'Meta': {'object_name': 'Map'},
'center_x': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'center_y': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'projection': ('django.db.models.fields.CharField', [], {'default': "'EPSG:4326'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'}),
'zoom': ('django.db.models.fields.IntegerField', [], {})
},
u'operations.agency': {
'Meta': {'ordering': "['name']", 'object_name': 'Agency'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '600', 'null': 'True', 'blank': 'True'})
},
u'operations.deployment': {
'Meta': {'object_name': 'Deployment'},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deployers': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '250', 'to': u"orm['auth.User']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'deployment_location': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['operations.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'})
},
u'operations.event': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'Event'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['operations.Agency']", 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'collaboration_link': ('django.db.models.fields.URLField', [], {'default': "'https://connect.dco.dod.mil/r3ops?launcher=false'", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'event_location': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'filedropoff_path': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'geowidgets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['operations.GeoWidget']", 'null': 'True', 'blank': 'True'}),
'gq_job_ids': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'gq_project_ids': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.Map']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'poc': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'posture': ('django.db.models.fields.CharField', [], {'default': "'Monitoring'", 'max_length': '25'}),
'product_feed_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rfi_generator_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'services': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['operations.Service']", 'null': 'True', 'blank': 'True'}),
'show_deployments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_event_on_map': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_geomedia_triage': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_notes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_products': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_related_files': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_rfis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_services': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_supporting_agencies': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_supporting_apps': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'})
},
u'operations.geowidget': {
'Meta': {'ordering': "['name']", 'object_name': 'GeoWidget'},
'above_zoom': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'below_zoom': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'listName': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'selectorLink': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'selectorName': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'selectorPoint': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'selectorShowIf': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'selectorSummary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tabToOpenIn': ('django.db.models.fields.CharField', [], {'default': "'_new'", 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url_if_local': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'operations.lessonlearned': {
'Meta': {'ordering': "['-created']", 'unique_together': "(('submitted_by', 'description', 'event'),)", 'object_name': 'LessonLearned'},
'action': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '250', 'related_name': "'lesson_learned_assignment'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '50', 'to': u"orm['operations.LessonLearnedCategory']", 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True'}),
'due': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['operations.Event']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'Low'", 'max_length': '25', 'null': 'True', 'blank': 'True'}),
'resolution': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1', 'null': 'True', 'blank': 'True'}),
'submitted_by': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '250', 'related_name': "'lesson_learned_submission'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'work_around': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
u'operations.lessonlearnedcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'LessonLearnedCategory'},
'description': ('tinymce.models.HTMLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'operations.service': {
'Meta': {'ordering': "['name']", 'object_name': 'Service'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'service_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['operations.ServiceType']", 'symmetrical': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '600'})
},
u'operations.servicetype': {
'Meta': {'ordering': "['name']", 'object_name': 'ServiceType'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {'max_length': '800', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1'})
},
u'operations.sitrep': {
'Meta': {'ordering': "['-created']", 'object_name': 'SitRep'},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'content': ('tinymce.models.HTMLField', [], {'max_length': '6000'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['operations.Event']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'max_length': '250', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'max_length': '1', 'null': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['operations']
|
|
from sfepy.terms.terms import *
from sfepy.terms.terms_base import ScalarScalar
class DiffusionTerm( ScalarScalar, Term ):
r"""
:Description:
General diffusion term with permeability :math:`K_{ij}`. Can be
evaluated. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} K_{ij} \nabla_i q \nabla_j p \mbox{ , } \int_{\Omega}
K_{ij} \nabla_i \bar{p} \nabla_j r
:Arguments 1:
material : :math:`K_{ij}`,
virtual : :math:`q`,
state : :math:`p`
:Arguments 2:
material : :math:`K_{ij}`,
parameter_1 : :math:`\bar{p}`,
parameter_2 : :math:`r`
"""
name = 'dw_diffusion'
arg_types = (('material', 'virtual', 'state'),
('material', 'parameter_1', 'parameter_2'))
modes = ('weak', 'eval')
symbolic = {'expression': 'div( K * grad( u ) )',
'map' : {'u' : 'state', 'K' : 'material'}}
def get_fargs_weak( self, diff_var = None, chunk_size = None, **kwargs ):
mat, virtual, state = self.get_args( **kwargs )
ap, vg = self.get_approximation(virtual)
self.set_data_shape( ap )
shape, mode = self.get_shape( diff_var, chunk_size )
vec = self.get_vector( state )
n_el, n_qp, dim, n_ep = self.data_shape
if state.is_real():
return (vec, 0, mat, vg, ap.econn), shape, mode
else:
ac = nm.ascontiguousarray
mode += 1j
return [(ac(vec.real), 0, mat, vg, ap.econn),
(ac(vec.imag), 0, mat, vg, ap.econn)], shape, mode
def get_fargs_eval( self, diff_var = None, chunk_size = None, **kwargs ):
mat, par1, par2 = self.get_args( **kwargs )
ap, vg = self.get_approximation(par1)
self.set_data_shape( ap )
n_el, n_qp, dim, n_ep = self.data_shape
cache = self.get_cache( 'grad_scalar', 0 )
gp1 = cache('grad', self, 0, state=par1, get_vector=self.get_vector)
cache = self.get_cache( 'grad_scalar', 1 )
gp2 = cache('grad', self, 0, state=par2, get_vector=self.get_vector)
return (gp1, gp2, mat, vg), (chunk_size, 1, 1, 1), 0
def set_arg_types( self ):
if self.mode == 'weak':
self.function = terms.dw_diffusion
use_method_with_name( self, self.get_fargs_weak, 'get_fargs' )
else:
self.function = terms.d_diffusion
use_method_with_name( self, self.get_fargs_eval, 'get_fargs' )
self.use_caches = {'grad_scalar' : [['parameter_1'],
['parameter_2']]}
class LaplaceTerm(DiffusionTerm):
r"""
:Description:
Laplace term with :math:`c` coefficient. Can be
evaluated. Can use derivatives.
:Definition:
.. math::
\int_{\Omega} c \nabla q \cdot \nabla p \mbox{ , } \int_{\Omega}
c \nabla \bar{p} \cdot \nabla r
:Arguments 1:
material : :math:`c`,
virtual : :math:`q`,
state : :math:`p`
:Arguments 2:
material : :math:`c`,
parameter_1 : :math:`\bar{p}`,
parameter_2 : :math:`r`
"""
name = 'dw_laplace'
arg_types = (('material', 'virtual', 'state'),
('material', 'parameter_1', 'parameter_2'))
modes = ('weak', 'eval')
symbolic = {'expression': 'c * div( grad( u ) )',
'map' : {'u' : 'state', 'c' : 'material'}}
def set_arg_types(self):
if self.mode == 'weak':
self.function = terms.dw_laplace
use_method_with_name(self, self.get_fargs_weak, 'get_fargs')
else:
self.function = terms.d_laplace
use_method_with_name(self, self.get_fargs_eval, 'get_fargs')
self.use_caches = {'grad_scalar' : [['parameter_1'],
['parameter_2']]}
class PermeabilityRTerm( Term ):
r"""
:Description:
Special-purpose diffusion-like term with permeability :math:`K_{ij}` (to
use on the right-hand side).
:Definition:
.. math::
\int_{\Omega} K_{ij} \nabla_j q
:Arguments:
material : :math:`K_{ij}`,
virtual : :math:`q`,
index : :math:`i`
"""
name = 'dw_permeability_r'
arg_types = ('material', 'virtual', 'index')
function = staticmethod(terms.dw_permeability_r)
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, virtual, index = self.get_args( **kwargs )
ap, vg = self.get_approximation(virtual)
n_el, n_qp, dim, n_ep = ap.get_v_data_shape(self.integral)
if diff_var is None:
shape = (chunk_size, 1, n_ep, 1)
else:
raise StopIteration
if isinstance(index, list):
index = index[0]
mat = nm.ascontiguousarray(mat[...,index:index+1])
for out, chunk in self.char_fun( chunk_size, shape ):
status = self.function( out, mat, vg, ap.econn, chunk )
yield out, chunk, status
class DiffusionRTerm( PermeabilityRTerm ):
r"""
:Description:
Diffusion-like term with material parameter :math:`K_{j}` (to
use on the right-hand side).
:Definition:
.. math::
\int_{\Omega} K_{j} \nabla_j q
:Arguments:
material : :math:`K_j`,
virtual : :math:`q`
"""
name = 'dw_diffusion_r'
arg_types = ('material', 'virtual')
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, virtual = self.get_args( **kwargs )
ap, vg = self.get_approximation(virtual)
n_el, n_qp, dim, n_ep = ap.get_v_data_shape(self.integral)
if diff_var is None:
shape = (chunk_size, 1, n_ep, 1)
else:
raise StopIteration
for out, chunk in self.char_fun( chunk_size, shape ):
status = self.function( out, mat, vg, ap.econn, chunk )
yield out, chunk, status
class DiffusionVelocityTerm( Term ):
r"""
:Description:
Diffusion velocity averaged in elements.
:Definition:
.. math::
\mbox{vector for } K \from \Ical_h: \int_{T_K} -K_{ij} \nabla_j r
/ \int_{T_K} 1
:Arguments:
material : :math:`K_{ij}`,
parameter : :math:`r`
"""
name = 'de_diffusion_velocity'
arg_types = ('material','parameter')
function = staticmethod(terms.de_diffusion_velocity)
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, parameter = self.get_args( **kwargs )
ap, vg = self.get_approximation(parameter)
n_el, n_qp, dim, n_ep = ap.get_v_data_shape(self.integral)
if diff_var is None:
shape = (chunk_size, 1, dim, 1)
else:
raise StopIteration
vec = parameter()
for out, chunk in self.char_fun( chunk_size, shape ):
status = self.function( out, vec, 0,
mat, vg, ap.econn, chunk )
out1 = out / vg.variable( 2 )[chunk]
yield out1, chunk, status
|
|
# -*- coding: utf-8 -*-
import pytest
import six
import pvl
class DictLike(object):
def keys(self):
return ['a', 'b', 'a']
def __getitem__(self, key):
return 42
def test_empty():
module = pvl.PVLModule()
assert len(module) == 0
assert module.get('c', 42) == 42
with pytest.raises(KeyError):
module['c']
def test_list_creation():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert len(module) == 3
assert module['a'] == 1
assert module['b'] == 2
assert module.getlist('a') == [1, 3]
with pytest.raises(KeyError):
module['c']
assert module.get('c', 42) == 42
with pytest.raises(TypeError):
pvl.PVLModule([], [])
module = pvl.PVLModule(DictLike())
assert len(module) == 3
assert module['a'] == 42
assert module['b'] == 42
assert module.getlist('a') == [42, 42]
with pytest.raises(KeyError):
module['c']
def test_dict_creation():
module = pvl.PVLModule({'a': 1, 'b': 2})
assert module['a'] == 1
assert module['b'] == 2
assert len(module) == 2
with pytest.raises(KeyError):
module['c']
assert module.get('c', 42) == 42
def test_keyword_creation():
module = pvl.PVLModule(a=1, b=2)
assert module['a'] == 1
assert module['b'] == 2
assert len(module) == 2
with pytest.raises(KeyError):
module['c']
assert module.get('c', 42) == 42
def test_key_access():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert module['a'] == 1
assert module['b'] == 2
with pytest.raises(KeyError):
module['c']
def test_index_access():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert module[0] == ('a', 1)
assert module[1] == ('b', 2)
assert module[2] == ('a', 3)
with pytest.raises(IndexError):
module[3]
def test_slice_access():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert module[0:3] == [('a', 1), ('b', 2), ('a', 3)]
assert module[1:] == [('b', 2), ('a', 3)]
assert module[:-1] == [('a', 1), ('b', 2)]
def test_set():
module = pvl.PVLModule()
module['a'] = 1
module['b'] = 2
module['a'] = 3
assert module['a'] == 3
assert module['b'] == 2
assert module.getlist('a') == [3]
assert len(module) == 2
with pytest.raises(KeyError):
module['c']
assert module.get('c', 42) == 42
assert list(module) == [('a', 3), ('b', 2)]
def test_delete():
module = pvl.PVLModule(a=1, b=2)
assert len(module) == 2
assert module['a'] == 1
del module['a']
assert len(module) == 1
with pytest.raises(KeyError):
module['a']
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert len(module) == 3
assert module['a'] == 1
del module['a']
assert len(module) == 1
with pytest.raises(KeyError):
module['a']
with pytest.raises(KeyError):
del module['c']
def test_clear():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert len(module) == 3
assert module['a'] == 1
module.clear()
assert len(module) == 0
assert module.getlist('a') == []
with pytest.raises(KeyError):
module['a']
with pytest.raises(KeyError):
module['b']
module['a'] = 42
assert len(module) == 1
assert module['a'] == 42
def test_discard():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert len(module) == 3
assert module['a'] == 1
module.discard('a')
assert len(module) == 1
assert module.getlist('a') == []
with pytest.raises(KeyError):
module['a']
assert module['b'] == 2
module.discard('b')
assert len(module) == 0
with pytest.raises(KeyError):
module['b']
module.discard('c')
assert len(module) == 0
def test_pop():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert len(module) == 3
assert module.pop('a') == 1
assert len(module) == 1
with pytest.raises(KeyError):
module['a']
with pytest.raises(KeyError):
module.pop('a')
assert module.pop('a', 42) == 42
assert module.pop('b') == 2
assert len(module) == 0
with pytest.raises(KeyError):
module.pop('b')
with pytest.raises(KeyError):
module['b']
assert module.pop('b', 42) == 42
with pytest.raises(KeyError):
module.pop('c')
assert module.pop('c', 42) == 42
def test_popitem():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert len(module) == 3
assert module.popitem() == ('a', 3)
assert len(module) == 2
assert module.popitem() == ('b', 2)
assert len(module) == 1
assert module.popitem() == ('a', 1)
assert len(module) == 0
with pytest.raises(KeyError):
module.popitem()
def test_update():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
module.update({'a': 42, 'c': 7})
assert len(module) == 3
assert module['a'] == 42
assert module['b'] == 2
assert module['c'] == 7
module.update()
assert len(module) == 3
assert module['a'] == 42
assert module['b'] == 2
assert module['c'] == 7
def test_append():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
module.append('a', 42)
assert len(module) == 4
assert module['a'] == 1
assert module.getlist('a') == [1, 3, 42]
module.append('c', 43)
assert len(module) == 5
assert module['c'] == 43
assert module.getlist('c') == [43]
def test_len():
module = pvl.PVLModule()
assert len(module) == 0
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert len(module) == 3
def test_repr():
module = pvl.PVLModule()
assert isinstance(repr(module), str)
assert repr(module) == 'PVLModule([])'
module = pvl.PVLModule(a=1)
assert isinstance(repr(module), str)
@pytest.mark.skipif(six.PY3, reason='requires python2')
def test_py2_items():
module = pvl.PVLModule()
assert isinstance(module.items(), list)
assert module.items() == []
assert isinstance(module.keys(), list)
assert module.keys() == []
assert isinstance(module.values(), list)
assert module.values() == []
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
items = module.items()
assert isinstance(items, list)
assert items == [('a', 1), ('b', 2), ('a', 3)]
assert items.index(('a', 1)) == 0
assert items.index(('b', 2)) == 1
assert items.index(('a', 3)) == 2
keys = module.keys()
assert isinstance(keys, list)
assert keys == ['a', 'b', 'a']
assert keys.index('a') == 0
assert keys.index('b') == 1
values = module.values()
assert isinstance(values, list)
assert values == [1, 2, 3]
assert values.index(1) == 0
assert values.index(2) == 1
assert values.index(3) == 2
@pytest.mark.skipif(six.PY2, reason='requires python3')
def test_py3_items():
module = pvl.PVLModule()
assert isinstance(module.items(), pvl._collections.ItemsView)
with pytest.raises(IndexError):
module.items()[0]
assert isinstance(module.keys(), pvl._collections.KeysView)
with pytest.raises(IndexError):
module.keys()[0]
assert isinstance(module.values(), pvl._collections.ValuesView)
with pytest.raises(IndexError):
module.values()[0]
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert isinstance(module.items(), pvl._collections.ItemsView)
items = module.items()
assert items[0] == ('a', 1)
assert items[1] == ('b', 2)
assert items[2] == ('a', 3)
assert items.index(('a', 1)) == 0
assert items.index(('b', 2)) == 1
assert items.index(('a', 3)) == 2
assert isinstance(module.keys(), pvl._collections.KeysView)
keys = module.keys()
assert keys[0] == 'a'
assert keys[1] == 'b'
assert keys[2] == 'a'
assert keys.index('a') == 0
assert keys.index('b') == 1
assert isinstance(module.values(), pvl._collections.ValuesView)
values = module.values()
assert values[0] == 1
assert values[1] == 2
assert values[2] == 3
assert values.index(1) == 0
assert values.index(2) == 1
assert values.index(3) == 2
if six.PY3:
def iteritems(module):
return module.items()
def iterkeys(module):
return module.keys()
def itervalues(module):
return module.values()
else:
def iteritems(module):
return module.iteritems()
def iterkeys(module):
return module.iterkeys()
def itervalues(module):
return module.itervalues()
def test_iterators():
module = pvl.PVLModule()
assert isinstance(iteritems(module), pvl._collections.MappingView)
assert list(iteritems(module)) == []
assert len(iteritems(module)) == 0
assert isinstance(repr(iteritems(module)), str)
assert ('a', 1) not in iteritems(module)
assert isinstance(iterkeys(module), pvl._collections.MappingView)
assert list(iterkeys(module)) == []
assert len(iterkeys(module)) == 0
assert isinstance(repr(iterkeys(module)), str)
assert 'a' not in iterkeys(module)
assert isinstance(itervalues(module), pvl._collections.MappingView)
assert list(itervalues(module)) == []
assert len(itervalues(module)) == 0
assert isinstance(repr(itervalues(module)), str)
assert 1 not in itervalues(module)
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
assert isinstance(iteritems(module), pvl._collections.MappingView)
assert list(iteritems(module)) == [('a', 1), ('b', 2), ('a', 3)]
assert len(iteritems(module)) == 3
assert isinstance(repr(iteritems(module)), str)
assert ('a', 1) in iteritems(module)
assert ('b', 2) in iteritems(module)
assert ('a', 3) in iteritems(module)
assert ('c', 4) not in iteritems(module)
assert isinstance(iterkeys(module), pvl._collections.MappingView)
assert list(iterkeys(module)) == ['a', 'b', 'a']
assert len(iterkeys(module)) == 3
assert isinstance(repr(iterkeys(module)), str)
assert 'a' in iterkeys(module)
assert 'b' in iterkeys(module)
assert 'c' not in iterkeys(module)
assert isinstance(itervalues(module), pvl._collections.MappingView)
assert list(itervalues(module)) == [1, 2, 3]
assert len(itervalues(module)) == 3
assert isinstance(repr(itervalues(module)), str)
assert 1 in itervalues(module)
assert 2 in itervalues(module)
assert 3 in itervalues(module)
assert 4 not in itervalues(module)
def test_equlity():
assert not pvl.PVLModule()
assert not pvl.PVLGroup()
assert not pvl.PVLObject()
assert not not pvl.PVLModule(a=1)
assert not not pvl.PVLGroup(a=1)
assert not not pvl.PVLObject(a=1)
assert pvl.PVLModule() == pvl.PVLModule()
assert pvl.PVLModule() != pvl.PVLGroup()
assert pvl.PVLModule() != pvl.PVLObject()
assert pvl.PVLGroup() != pvl.PVLModule()
assert pvl.PVLGroup() == pvl.PVLGroup()
assert pvl.PVLGroup() != pvl.PVLObject()
assert pvl.PVLObject() != pvl.PVLModule()
assert pvl.PVLObject() != pvl.PVLGroup()
assert pvl.PVLObject() == pvl.PVLObject()
assert pvl.PVLModule() != pvl.PVLModule(a=1)
assert pvl.PVLModule(a=1) == pvl.PVLModule(a=1)
assert pvl.PVLModule(a=1) == pvl.PVLModule([('a', 1)])
assert pvl.PVLModule(a=1) == pvl.PVLModule({'a': 1})
assert pvl.PVLModule(a=1) != pvl.PVLModule(b=1)
assert pvl.PVLModule(a=1) != pvl.PVLModule(a=2)
def test_copy():
module = pvl.PVLModule()
copy = module.copy()
assert module == copy
assert module is not copy
module['c'] = 42
assert module != copy
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
copy = module.copy()
assert module == copy
assert module is not copy
module['c'] = 42
assert module != copy
def test_conversion():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
expected_dict = {
'a': [1, 3],
'b': [2],
}
expected_list = [
('a', 1),
('b', 2),
('a', 3),
]
assert dict(module) == expected_dict
assert list(module) == expected_list
@pytest.mark.parametrize(
'expected_label, key, instance, expected_list, expected_value', [
([
('a', 4),
('a', 1),
('b', 2),
('a', 3),
('c', 5),
], 'a', 0, [4, 1, 3], 4),
([
('a', 1),
('a', 4),
('b', 2),
('a', 3),
('c', 5),
], 'b', 0, [1, 4, 3], 1),
([
('a', 1),
('b', 2),
('a', 4),
('a', 3),
('c', 5),
], 'a', 1, [1, 4, 3], 1),
([
('a', 1),
('b', 2),
('a', 3),
('a', 4),
('c', 5),
], 'c', 0, [1, 3, 4], 1)
])
def test_insert_before(expected_label, key, instance, expected_list,
expected_value):
module1 = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
('c', 5),
])
module2 = module1.copy()
expected_module = pvl.PVLModule(expected_label)
module1.insert_before(key, [('a', 4)], instance)
assert expected_module == module1
assert module1['a'] == expected_value
assert module1.getlist('a') == expected_list
module2.insert_before(key, pvl.PVLModule([('a', 4)]), instance)
assert module2 == expected_module
assert module1['a'] == expected_value
assert module1.getlist('a') == expected_list
@pytest.mark.parametrize(
'expected_label, key, instance, expected_list, expected_value', [
([
('a', 1),
('a', 4),
('b', 2),
('a', 3),
('c', 5),
], 'a', 0, [1, 4, 3], 1),
([
('a', 1),
('b', 2),
('a', 4),
('a', 3),
('c', 5),
], 'b', 0, [1, 4, 3], 1),
([
('a', 1),
('b', 2),
('a', 3),
('a', 4),
('c', 5),
], 'a', 1, [1, 3, 4], 1),
([
('a', 1),
('b', 2),
('a', 3),
('c', 5),
('a', 4),
], 'c', 0, [1, 3, 4], 1)
])
def test_insert_after(expected_label, key, instance, expected_list,
expected_value):
module1 = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
('c', 5),
])
module2 = module1.copy()
expected_module = pvl.PVLModule(expected_label)
module1.insert_after(key, [('a', 4)], instance)
assert expected_module == module1
assert module1['a'] == expected_value
assert module1.getlist('a') == expected_list
module2.insert_after(key, pvl.PVLModule([('a', 4)]), instance)
assert module2 == expected_module
assert module1['a'] == expected_value
assert module1.getlist('a') == expected_list
@pytest.mark.parametrize(
'key, instance, expected_index', [
('a', 0, 0),
('b', 0, 1),
('a', 1, 2)
])
def test_get_index_for_insert(key, instance, expected_index):
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
module._get_index_for_insert(key, instance) == expected_index
def test_insert_raises():
module = pvl.PVLModule([
('a', 1),
('b', 2),
('a', 3),
])
with pytest.raises(KeyError):
module.insert_before('error_key', [('foo', 'bar')])
with pytest.raises(KeyError):
module.insert_after('error_key', [('foo', 'bar')])
with pytest.raises(TypeError):
module.insert_before('a', ('foo', 'bar'))
with pytest.raises(TypeError):
module.insert_after('a', ('foo', 'bar'))
with pytest.raises(ValueError):
module.insert_before('a', [('foo', 'bar')], 2)
with pytest.raises(ValueError):
module.insert_after('a', [('foo', 'bar')], 2)
|
|
# Copyright 2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import traceback
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import vm_states
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
# States usable in resetState action
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
def authorize(context, action_name):
action = 'admin_actions:%s' % action_name
extensions.extension_authorizer('compute', action)(context)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
# TODO(bcwaldon): These action names should be prefixed with 'os-'
@wsgi.action('pause')
def _pause(self, req, id, body):
"""Permit Admins to pause the server"""
ctxt = req.environ['nova.context']
authorize(ctxt, 'pause')
try:
server = self.compute_api.get(ctxt, id)
self.compute_api.pause(ctxt, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'pause')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::pause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unpause')
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server"""
ctxt = req.environ['nova.context']
authorize(ctxt, 'unpause')
try:
server = self.compute_api.get(ctxt, id)
self.compute_api.unpause(ctxt, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unpause')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::unpause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('suspend')
def _suspend(self, req, id, body):
"""Permit admins to suspend the server"""
context = req.environ['nova.context']
authorize(context, 'suspend')
try:
server = self.compute_api.get(context, id)
self.compute_api.suspend(context, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'suspend')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("compute.api::suspend %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('resume')
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend"""
context = req.environ['nova.context']
authorize(context, 'resume')
try:
server = self.compute_api.get(context, id)
self.compute_api.resume(context, server)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resume')
except Exception:
readable = traceback.format_exc()
LOG.exception(_("compute.api::resume %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host"""
context = req.environ['nova.context']
authorize(context, 'migrate')
try:
instance = self.compute_api.get(context, id)
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'migrate')
except Exception, e:
LOG.exception(_("Error in migrate %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on an server"""
context = req.environ['nova.context']
authorize(context, 'resetNetwork')
try:
instance = self.compute_api.get(context, id)
self.compute_api.reset_network(context, instance)
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::reset_network %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server"""
context = req.environ['nova.context']
authorize(context, 'injectNetworkInfo')
try:
instance = self.compute_api.get(context, id)
self.compute_api.inject_network_info(context, instance)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::inject_network_info %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('lock')
def _lock(self, req, id, body):
"""Permit admins to lock a server"""
context = req.environ['nova.context']
authorize(context, 'lock')
try:
instance = self.compute_api.get(context, id)
self.compute_api.lock(context, instance)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::lock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('unlock')
def _unlock(self, req, id, body):
"""Permit admins to lock a server"""
context = req.environ['nova.context']
authorize(context, 'unlock')
try:
instance = self.compute_api.get(context, id)
self.compute_api.unlock(context, instance)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::unlock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@wsgi.action('createBackup')
def _create_backup(self, req, id, body):
"""Backup a server instance.
Images now have an `image_type` associated with them, which can be
'snapshot' or the backup type, like 'daily' or 'weekly'.
If the image_type is backup-like, then the rotation factor can be
included and that will cause the oldest backups that exceed the
rotation factor to be deleted.
"""
context = req.environ["nova.context"]
authorize(context, 'createBackup')
try:
entity = body["createBackup"]
except (KeyError, TypeError):
raise exc.HTTPBadRequest(_("Malformed request body"))
try:
image_name = entity["name"]
backup_type = entity["backup_type"]
rotation = entity["rotation"]
except KeyError as missing_key:
msg = _("createBackup entity requires %s attribute") % missing_key
raise exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _("Malformed createBackup entity")
raise exc.HTTPBadRequest(explanation=msg)
try:
rotation = int(rotation)
except ValueError:
msg = _("createBackup attribute 'rotation' must be an integer")
raise exc.HTTPBadRequest(explanation=msg)
if rotation < 0:
msg = _("createBackup attribute 'rotation' must be greater "
"than or equal to zero")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound(_("Instance not found"))
try:
image = self.compute_api.backup(context, instance, image_name,
backup_type, rotation, extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createBackup')
resp = webob.Response(status_int=202)
# build location of newly-created image entity if rotation is not zero
if rotation > 0:
image_id = str(image['id'])
image_ref = os.path.join(req.application_url, 'images', image_id)
resp.headers['Location'] = image_ref
return resp
@wsgi.action('os-migrateLive')
def _migrate_live(self, req, id, body):
"""Permit admins to (live) migrate a server to a new host"""
context = req.environ["nova.context"]
authorize(context, 'migrateLive')
try:
block_migration = body["os-migrateLive"]["block_migration"]
disk_over_commit = body["os-migrateLive"]["disk_over_commit"]
host = body["os-migrateLive"]["host"]
except (TypeError, KeyError):
msg = _("host and block_migration must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
self.compute_api.live_migrate(context, instance, block_migration,
disk_over_commit, host)
except exception.ComputeServiceUnavailable as ex:
raise exc.HTTPBadRequest(explanation=ex.format_message())
except Exception:
msg = _("Live migration of instance %(id)s to host %(host)s"
" failed") % locals()
LOG.exception(msg)
# Return messages from scheduler
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.action('os-resetState')
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, 'resetState')
# Identify the desired state from the body
try:
state = state_map[body["os-resetState"]["state"]]
except (TypeError, KeyError):
msg = _("Desired state must be specified. Valid states "
"are: %s") % ', '.join(sorted(state_map.keys()))
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(context, id)
self.compute_api.update(context, instance,
vm_state=state,
task_state=None)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
readable = traceback.format_exc()
LOG.exception(_("Compute.api::resetState %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
class Admin_actions(extensions.ExtensionDescriptor):
"""Enable admin-only server actions
Actions include: pause, unpause, suspend, resume, migrate,
resetNetwork, injectNetworkInfo, lock, unlock, createBackup
"""
name = "AdminActions"
alias = "os-admin-actions"
namespace = "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1"
updated = "2011-09-20T00:00:00+00:00"
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Manages subcommands in a script.
Each subcommand should look like this:
@usage('[pet name]')
def CMDpet(parser, args):
'''Prints a pet.
Many people likes pet. This command prints a pet for your pleasure.
'''
parser.add_option('--color', help='color of your pet')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('A pet name is required')
pet = args[0]
if options.color:
print('Nice %s %d' % (options.color, pet))
else:
print('Nice %s' % pet)
return 0
Explanation:
- usage decorator alters the 'usage: %prog' line in the command's help.
- docstring is used to both short help line and long help line.
- parser can be augmented with arguments.
- return the exit code.
- Every function in the specified module with a name starting with 'CMD' will
be a subcommand.
- The module's docstring will be used in the default 'help' page.
- If a command has no docstring, it will not be listed in the 'help' page.
Useful to keep compatibility commands around or aliases.
- If a command is an alias to another one, it won't be documented. E.g.:
CMDoldname = CMDnewcmd
will result in oldname not being documented but supported and redirecting to
newcmd. Make it a real function that calls the old function if you want it
to be documented.
- CMDfoo_bar will be command 'foo-bar'.
"""
import difflib
import sys
import textwrap
def usage(more):
"""Adds a 'usage_more' property to a CMD function."""
def hook(fn):
fn.usage_more = more
return fn
return hook
def epilog(text):
"""Adds an 'epilog' property to a CMD function.
It will be shown in the epilog. Usually useful for examples.
"""
def hook(fn):
fn.epilog = text
return fn
return hook
def CMDhelp(parser, args):
"""Prints list of commands or help for a specific command."""
# This is the default help implementation. It can be disabled or overriden if
# wanted.
if not any(i in ('-h', '--help') for i in args):
args = args + ['--help']
_, args = parser.parse_args(args)
# Never gets there.
assert False
def _get_color_module():
"""Returns the colorama module if available.
If so, assumes colors are supported and return the module handle.
"""
return sys.modules.get('colorama') or sys.modules.get('third_party.colorama')
def _function_to_name(name):
"""Returns the name of a CMD function."""
return name[3:].replace('_', '-')
class CommandDispatcher(object):
def __init__(self, module):
"""module is the name of the main python module where to look for commands.
The python builtin variable __name__ MUST be used for |module|. If the
script is executed in the form 'python script.py', __name__ == '__main__'
and sys.modules['script'] doesn't exist. On the other hand if it is unit
tested, __main__ will be the unit test's module so it has to reference to
itself with 'script'. __name__ always match the right value.
"""
self.module = sys.modules[module]
def enumerate_commands(self):
"""Returns a dict of command and their handling function.
The commands must be in the '__main__' modules. To import a command from a
submodule, use:
from mysubcommand import CMDfoo
Automatically adds 'help' if not already defined.
Normalizes '_' in the commands to '-'.
A command can be effectively disabled by defining a global variable to None,
e.g.:
CMDhelp = None
"""
cmds = dict(
(_function_to_name(name), getattr(self.module, name))
for name in dir(self.module) if name.startswith('CMD'))
cmds.setdefault('help', CMDhelp)
return cmds
def find_nearest_command(self, name_asked):
"""Retrieves the function to handle a command as supplied by the user.
It automatically tries to guess the _intended command_ by handling typos
and/or incomplete names.
"""
commands = self.enumerate_commands()
name_to_dash = name_asked.replace('_', '-')
if name_to_dash in commands:
return commands[name_to_dash]
# An exact match was not found. Try to be smart and look if there's
# something similar.
commands_with_prefix = [c for c in commands if c.startswith(name_asked)]
if len(commands_with_prefix) == 1:
return commands[commands_with_prefix[0]]
# A #closeenough approximation of levenshtein distance.
def close_enough(a, b):
return difflib.SequenceMatcher(a=a, b=b).ratio()
hamming_commands = sorted(
((close_enough(c, name_asked), c) for c in commands),
reverse=True)
if (hamming_commands[0][0] - hamming_commands[1][0]) < 0.3:
# Too ambiguous.
return
if hamming_commands[0][0] < 0.8:
# Not similar enough. Don't be a fool and run a random command.
return
return commands[hamming_commands[0][1]]
def _gen_commands_list(self):
"""Generates the short list of supported commands."""
commands = self.enumerate_commands()
docs = sorted(
(cmd_name, self._create_command_summary(cmd_name, handler))
for cmd_name, handler in commands.iteritems())
# Skip commands without a docstring.
docs = [i for i in docs if i[1]]
# Then calculate maximum length for alignment:
length = max(len(c) for c in commands)
# Look if color is supported.
colors = _get_color_module()
green = reset = ''
if colors:
green = colors.Fore.GREEN
reset = colors.Fore.RESET
return (
'Commands are:\n' +
''.join(
' %s%-*s%s %s\n' % (green, length, cmd_name, reset, doc)
for cmd_name, doc in docs))
def _add_command_usage(self, parser, command):
"""Modifies an OptionParser object with the function's documentation."""
cmd_name = _function_to_name(command.__name__)
if cmd_name == 'help':
cmd_name = '<command>'
# Use the module's docstring as the description for the 'help' command if
# available.
parser.description = (self.module.__doc__ or '').rstrip()
if parser.description:
parser.description += '\n\n'
parser.description += self._gen_commands_list()
# Do not touch epilog.
else:
# Use the command's docstring if available. For commands, unlike module
# docstring, realign.
lines = (command.__doc__ or '').rstrip().splitlines()
if lines[:1]:
rest = textwrap.dedent('\n'.join(lines[1:]))
parser.description = '\n'.join((lines[0], rest))
else:
parser.description = lines[0] if lines else ''
if parser.description:
parser.description += '\n'
parser.epilog = getattr(command, 'epilog', None)
if parser.epilog:
parser.epilog = '\n' + parser.epilog.strip() + '\n'
more = getattr(command, 'usage_more', '')
extra = '' if not more else ' ' + more
parser.set_usage('usage: %%prog %s [options]%s' % (cmd_name, extra))
@staticmethod
def _create_command_summary(cmd_name, command):
"""Creates a oneliner summary from the command's docstring."""
if cmd_name != _function_to_name(command.__name__):
# Skip aliases. For example using at module level:
# CMDfoo = CMDbar
return ''
doc = command.__doc__ or ''
line = doc.split('\n', 1)[0].rstrip('.')
if not line:
return line
return (line[0].lower() + line[1:]).strip()
def execute(self, parser, args):
"""Dispatches execution to the right command.
Fallbacks to 'help' if not disabled.
"""
# Unconditionally disable format_description() and format_epilog().
# Technically, a formatter should be used but it's not worth (yet) the
# trouble.
parser.format_description = lambda _: parser.description or ''
parser.format_epilog = lambda _: parser.epilog or ''
if args:
if args[0] in ('-h', '--help') and len(args) > 1:
# Inverse the argument order so 'tool --help cmd' is rewritten to
# 'tool cmd --help'.
args = [args[1], args[0]] + args[2:]
command = self.find_nearest_command(args[0])
if command:
if command.__name__ == 'CMDhelp' and len(args) > 1:
# Inverse the arguments order so 'tool help cmd' is rewritten to
# 'tool cmd --help'. Do it here since we want 'tool hel cmd' to work
# too.
args = [args[1], '--help'] + args[2:]
command = self.find_nearest_command(args[0]) or command
# "fix" the usage and the description now that we know the subcommand.
self._add_command_usage(parser, command)
return command(parser, args[1:])
cmdhelp = self.enumerate_commands().get('help')
if cmdhelp:
# Not a known command. Default to help.
self._add_command_usage(parser, cmdhelp)
return cmdhelp(parser, args)
# Nothing can be done.
return 2
|
|
# Copyright 2011 Alex K (wtwf.com)
__author__ = 'wtwf.com (Alex K)'
import os
import logging
import datetime
import urllib
import PyRSS2Gen as rss
from google.appengine.api import mail
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.ext import webapp
from google.appengine.ext.webapp.mail_handlers import InboundMailHandler
from google.appengine.api.mail import InboundEmailMessage
from wtwf import wtwfhandler
from wtwf.WtwfModel import WtwfNdbModel
from crud import crud_model
from crud import crud_handler
# wget -O - 'http://localhost:8080/mailfeed/test' | xmllint -format -
class MailFeed(crud_model.CrudNdbModel):
"""Info and options about a feed. None yet."""
name = ndb.StringProperty()
# automatic fields
created = ndb.DateTimeProperty(auto_now_add=True)
class MailFeedItem(crud_model.CrudNdbModel):
"""Stores Info about a post to a feed."""
parent_model_name = 'MailFeed'
subject = ndb.StringProperty()
body = ndb.TextProperty()
guid = ndb.StringProperty()
# automatic fields
created = ndb.DateTimeProperty(auto_now_add=True)
class EmailToFeed(InboundMailHandler):
def post(self, name):
"""Transforms body to email request."""
try:
name = urllib.unquote(name).split('@')[0]
except:
pass
# get the feed object
feed = MailFeed.query(MailFeed.name == name).get()
if feed is not None:
self.receive(mail.InboundEmailMessage(self.request.body), feed)
else:
# 404 ?
pass
def receive(self, mail_message, feed):
sender = None
if 'list-id' in mail_message.original:
sender = mail_message.original['list-id'].strip("""<>"'`""").split(".")[0]
else:
sender = mail_message.sender
if sender:
# strip it just to the domain name
try:
short_sender = sender.split("@")[1].split(".")[-2]
if short_sender in ['gmail', 'google', 'yahoo', 'aol', 'ca']:
# o.k. try the bit before the @ sign for gmail users
sender = sender.split("@")[0]
else:
sender = short_sender
except IndexError:
pass
subject = mail_message.subject
if sender:
subject = ": ".join([sender, subject])
body = mail_message.bodies().next()[1].decode()
logging.info("Subject is : %r", subject)
logging.debug("Body is : %r", body)
item = MailFeedItem(parent=feed.key, subject=subject, body=body)
item.put()
logging.info("Added a message for: " + feed.name)
class FeedFromEmail(webapp.RequestHandler):
"""output a feed for a given name."""
def get(self, name):
feed = MailFeed.query(MailFeed.name == name).get()
if not feed:
# no feed we need to make one
feed = MailFeed(name=name)
feed.put()
items = MailFeedItem.query(ancestor=feed.key).order(
-MailFeedItem.created).fetch(5)
f = rss.RSS2(
title="Generated Feed",
link="http://example.com/",
description="Generated Feed ",
lastBuildDate=datetime.datetime.now(),
)
for x in items:
guid = x.guid
if not guid:
x.guid = 'feedapp-%s-%s' % (name, x.created)
try:
x.save()
except:
pass
f.items.append(rss.RSSItem(
title=x.subject,
link=None,
description=x.body,
guid=rss.Guid(x.guid, False),
pubDate=x.created,
))
self.response.headers['Content-Type'] = 'text/xml'
f.write_xml(self.response.out)
class SetupDemo(webapp.RequestHandler):
"""Setup a Demo feed."""
def get(self):
if not users.is_current_user_admin():
self.error(401)
return
# remove the old test
name = "test_feed"
feed = MailFeed.query(MailFeed.name == name).get()
if feed:
for item in MailFeedItem.query(ancestor=feed.key):
item.key.delete()
feed.key.delete()
# now make some test stuff
feed = MailFeed(name=name)
feed.put()
logging.info('added new feed: %s', name)
testdata = os.path.join(os.path.dirname(__file__), 'testdata')
etf = EmailToFeed()
for x in range(1, 4):
filename = os.path.join(testdata, "email-%02d.txt" % x)
logging.info('adding: %s', filename)
self.response.out.write(filename + '</br>')
f = file(filename)
body = '\r\n'.join(line.rstrip() for line in f)
f.close()
# now inject this into the code where we process emails.
msg = InboundEmailMessage(body)
etf.receive(msg, feed)
self.response.out.write('<p><button onClick="history.back()">' +
'DONE</button></p>')
class BulkDeleteMailItems(webapp.RequestHandler):
"""Blow away old feed items."""
def get(self):
logging.info('BulkDeleteMailItems')
if not users.is_current_user_admin():
self.error(401)
return
logging.info('Passed Auth')
# olderthan = datetime.datetime(2018,1,1)
olderthan = datetime.datetime.now() - datetime.timedelta(days=180)
q = MailFeedItem.query().filter(MailFeedItem.created < olderthan)
items = q.fetch(5000, keys_only=True)
self.response.out.write('<p>%d items</p>' % len(items))
ndb.delete_multi(items)
self.response.out.write('<p><button onClick="history.back()">' +
'DONE</button></p>')
class MailItemDataHandler(crud_handler.GetCrudHandler(MailFeedItem)):
def postEntity(self, item, js):
if self.request.get('action') == 'tombstone':
item.body = item.subject = "This post is no longer available."
item.put()
# no need to updated id from key because item will never be new
js = item.AsJsonObject(js=js)
return js
|
|
#!/usr/bin/env python
"""This file defines the base classes for Flows.
A Flow is a state machine which executes actions on the
client. Messages are transmitted between the flow object and the
client with their responses introduced into a state handler within the
flow.
The flow can send messages to a client, or launch other child flows. While these
messages are processed, the flow can be suspended indefinitely into the data
store. When replies arrive from the client, or a child flow, the flow is woken
up and the responses are sent to one of the flow state methods.
In order for the flow to be suspended and restored, its state is
stored in a protobuf. Rather than storing the entire flow, the
preserved state is well defined and can be found in the flow's "state"
attribute. Note that this means that any parameters assigned to the
flow object itself are not preserved across state executions - only
parameters specifically stored in the state are preserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import traceback
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import type_info
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import random
from grr_response_core.stats import metrics
from grr_response_server import access_control
from grr_response_server import data_store
from grr_response_server.databases import db
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
GRR_FLOW_INVALID_FLOW_COUNT = metrics.Counter("grr_flow_invalid_flow_count")
class Error(Exception):
"""Base class for this package's exceptions."""
class CanNotStartFlowWithExistingIdError(Error):
"""Raises by StartFlow when trying to start a flow with an exising id."""
def __init__(self, client_id, flow_id):
message = ("Flow %s already exists on the client %s." %
(client_id, flow_id))
super(CanNotStartFlowWithExistingIdError, self).__init__(message)
self.client_id = client_id
self.flow_id = flow_id
class FlowResourcesExceededError(Error):
"""An error indicating that the flow used too many resources."""
# This is an implementation of an AttributedDict taken from
# http://stackoverflow.com/questions/4984647/accessing-dict-keys-like-an-attribute-in-python
# It works very well but there is a small drawback - there is no way
# to assign an attribute to this dict that does not get serialized. Do
# not inherit from this class, there might be interesting side
# effects.
class AttributedDict(dict):
def __init__(self, *args, **kwargs):
super(AttributedDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def FilterArgsFromSemanticProtobuf(protobuf, kwargs):
"""Assign kwargs to the protobuf, and remove them from the kwargs dict."""
for descriptor in protobuf.type_infos:
value = kwargs.pop(descriptor.name, None)
if value is not None:
setattr(protobuf, descriptor.name, value)
def GetOutputPluginStates(output_plugins, source=None, token=None):
"""Initializes state for a list of output plugins."""
output_plugins_states = []
for plugin_descriptor in output_plugins:
plugin_class = plugin_descriptor.GetPluginClass()
try:
_, plugin_state = plugin_class.CreatePluginAndDefaultState(
source_urn=source, args=plugin_descriptor.plugin_args, token=token)
except Exception as e: # pylint: disable=broad-except
raise ValueError("Plugin %s failed to initialize (%s)" %
(plugin_class, e))
# TODO(amoser): Those do not need to be inside the state, they
# could be part of the plugin descriptor.
plugin_state["logs"] = []
plugin_state["errors"] = []
output_plugins_states.append(
rdf_flow_runner.OutputPluginState(
plugin_state=plugin_state, plugin_descriptor=plugin_descriptor))
return output_plugins_states
def RandomFlowId():
"""Returns a random flow id encoded as a hex string."""
return "%08X" % random.PositiveUInt32()
def StartFlow(client_id=None,
cpu_limit=None,
creator=None,
flow_args=None,
flow_cls=None,
network_bytes_limit=None,
original_flow=None,
output_plugins=None,
start_at=None,
parent_flow_obj=None,
parent_hunt_id=None,
runtime_limit=None,
**kwargs):
"""The main factory function for creating and executing a new flow.
Args:
client_id: ID of the client this flow should run on.
cpu_limit: CPU limit in seconds for this flow.
creator: Username that requested this flow.
flow_args: An arg protocol buffer which is an instance of the required
flow's args_type class attribute.
flow_cls: Class of the flow that should be started.
network_bytes_limit: Limit on the network traffic this flow can generated.
original_flow: A FlowReference object in case this flow was copied from
another flow.
output_plugins: An OutputPluginDescriptor object indicating what output
plugins should be used for this flow.
start_at: If specified, flow will be started not immediately, but at a given
time.
parent_flow_obj: A parent flow object. None if this is a top level flow.
parent_hunt_id: String identifying parent hunt. Can't be passed together
with parent_flow_obj.
runtime_limit: Runtime limit as Duration for all ClientActions.
**kwargs: If args or runner_args are not specified, we construct these
protobufs from these keywords.
Returns:
the flow id of the new flow.
Raises:
ValueError: Unknown or invalid parameters were provided.
"""
if parent_flow_obj is not None and parent_hunt_id is not None:
raise ValueError(
"parent_flow_obj and parent_hunt_id are mutually exclusive.")
# Is the required flow a known flow?
try:
registry.FlowRegistry.FlowClassByName(flow_cls.__name__)
except ValueError:
GRR_FLOW_INVALID_FLOW_COUNT.Increment()
raise ValueError("Unable to locate flow %s" % flow_cls.__name__)
if not client_id:
raise ValueError("Client_id is needed to start a flow.")
# Now parse the flow args into the new object from the keywords.
if flow_args is None:
flow_args = flow_cls.args_type()
FilterArgsFromSemanticProtobuf(flow_args, kwargs)
# At this point we should exhaust all the keyword args. If any are left
# over, we do not know what to do with them so raise.
if kwargs:
raise type_info.UnknownArg("Unknown parameters to StartFlow: %s" % kwargs)
# Check that the flow args are valid.
flow_args.Validate()
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_class_name=flow_cls.__name__,
args=flow_args,
create_time=rdfvalue.RDFDatetime.Now(),
creator=creator,
output_plugins=output_plugins,
original_flow=original_flow,
flow_state="RUNNING")
if parent_hunt_id is not None and parent_flow_obj is None:
rdf_flow.flow_id = parent_hunt_id
else:
rdf_flow.flow_id = RandomFlowId()
# For better performance, only do conflicting IDs check for top-level flows.
if not parent_flow_obj:
try:
data_store.REL_DB.ReadFlowObject(client_id, rdf_flow.flow_id)
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
except db.UnknownFlowError:
pass
if parent_flow_obj: # A flow is a nested flow.
parent_rdf_flow = parent_flow_obj.rdf_flow
rdf_flow.long_flow_id = "%s/%s" % (parent_rdf_flow.long_flow_id,
rdf_flow.flow_id)
rdf_flow.parent_flow_id = parent_rdf_flow.flow_id
rdf_flow.parent_hunt_id = parent_rdf_flow.parent_hunt_id
rdf_flow.parent_request_id = parent_flow_obj.GetCurrentOutboundId()
if parent_rdf_flow.creator:
rdf_flow.creator = parent_rdf_flow.creator
elif parent_hunt_id: # A flow is a root-level hunt-induced flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
rdf_flow.parent_hunt_id = parent_hunt_id
else: # A flow is a root-level non-hunt flow.
rdf_flow.long_flow_id = "%s/%s" % (client_id, rdf_flow.flow_id)
if output_plugins:
rdf_flow.output_plugins_states = GetOutputPluginStates(
output_plugins,
rdf_flow.long_flow_id,
token=access_control.ACLToken(username=rdf_flow.creator))
if network_bytes_limit is not None:
rdf_flow.network_bytes_limit = network_bytes_limit
if cpu_limit is not None:
rdf_flow.cpu_limit = cpu_limit
if runtime_limit is not None:
rdf_flow.runtime_limit_us = runtime_limit
logging.info(u"Scheduling %s(%s) on %s (%s)", rdf_flow.long_flow_id,
rdf_flow.flow_class_name, client_id, start_at or "now")
rdf_flow.current_state = "Start"
flow_obj = flow_cls(rdf_flow)
# Prevent a race condition, where a flow is scheduled twice, because one
# worker inserts the row and another worker silently updates the existing row.
allow_update = False
if start_at is None:
# Store an initial version of the flow straight away. This is needed so the
# database doesn't raise consistency errors due to missing parent keys when
# writing logs / errors / results which might happen in Start().
try:
data_store.REL_DB.WriteFlowObject(flow_obj.rdf_flow, allow_update=False)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
allow_update = True
try:
# Just run the first state inline. NOTE: Running synchronously means
# that this runs on the thread that starts the flow. The advantage is
# that that Start method can raise any errors immediately.
flow_obj.Start()
# The flow does not need to actually remain running.
if not flow_obj.outstanding_requests:
flow_obj.RunStateMethod("End")
# Additional check for the correct state in case the End method raised
# and terminated the flow.
if flow_obj.IsRunning():
flow_obj.MarkDone()
except Exception as e: # pylint: disable=broad-except
# We catch all exceptions that happen in Start() and mark the flow as
# failed.
msg = compatibility.NativeStr(e)
if compatibility.PY2:
msg = msg.decode("utf-8", "replace")
flow_obj.Error(error_message=msg, backtrace=traceback.format_exc())
else:
flow_obj.CallState("Start", start_time=start_at)
flow_obj.PersistState()
try:
data_store.REL_DB.WriteFlowObject(
flow_obj.rdf_flow, allow_update=allow_update)
except db.FlowExistsError:
raise CanNotStartFlowWithExistingIdError(client_id, rdf_flow.flow_id)
if parent_flow_obj is not None:
# We can optimize here and not write requests/responses to the database
# since we have to do this for the parent flow at some point anyways.
parent_flow_obj.MergeQueuedMessages(flow_obj)
else:
flow_obj.FlushQueuedMessages()
return rdf_flow.flow_id
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''pyglet is a cross-platform games and multimedia package.
Detailed documentation is available at http://www.pyglet.org
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import os
import sys
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
#: The release version of this pyglet installation.
#:
#: Valid only if pyglet was installed from a source or binary distribution
#: (i.e. not in a checked-out copy from SVN).
#:
#: Use setuptools if you need to check for a specific release version, e.g.::
#:
#: >>> import pyglet
#: >>> from pkg_resources import parse_version
#: >>> parse_version(pyglet.version) >= parse_version('1.1')
#: True
#:
version = '1.2dev'
def _require_ctypes_version(version):
# Check ctypes version
import ctypes
req = [int(i) for i in version.split('.')]
have = [int(i) for i in ctypes.__version__.split('.')]
if not tuple(have) >= tuple(req):
raise ImportError('pyglet requires ctypes %s or later.' % version)
_require_ctypes_version('1.0.0')
_enable_optimisations = not __debug__
if getattr(sys, 'frozen', None):
_enable_optimisations = True
#: Global dict of pyglet options. To change an option from its default, you
#: must import ``pyglet`` before any sub-packages. For example::
#:
#: import pyglet
#: pyglet.options['debug_gl'] = False
#:
#: The default options can be overridden from the OS environment. The
#: corresponding environment variable for each option key is prefaced by
#: ``PYGLET_``. For example, in Bash you can set the ``debug_gl`` option with::
#:
#: PYGLET_DEBUG_GL=True; export PYGLET_DEBUG_GL
#:
#: For options requiring a tuple of values, separate each value with a comma.
#:
#: The non-development options are:
#:
#: audio
#: A sequence of the names of audio modules to attempt to load, in
#: order of preference. Valid driver names are:
#:
#: * directsound, the Windows DirectSound audio module (Windows only)
#: * pulse, the PulseAudio module (Linux only)
#: * openal, the OpenAL audio module
#: * silent, no audio
#: debug_lib
#: If True, prints the path of each dynamic library loaded.
#: debug_gl
#: If True, all calls to OpenGL functions are checked afterwards for
#: errors using ``glGetError``. This will severely impact performance,
#: but provides useful exceptions at the point of failure. By default,
#: this option is enabled if ``__debug__`` is (i.e., if Python was not run
#: with the -O option). It is disabled by default when pyglet is "frozen"
#: within a py2exe or py2app library archive.
#: shadow_window
#: By default, pyglet creates a hidden window with a GL context when
#: pyglet.gl is imported. This allows resources to be loaded before
#: the application window is created, and permits GL objects to be
#: shared between windows even after they've been closed. You can
#: disable the creation of the shadow window by setting this option to
#: False. Recommended for advanced devlopers only.
#:
#: **Since:** pyglet 1.1
#: vsync
#: If set, the `pyglet.window.Window.vsync` property is ignored, and
#: this option overrides it (to either force vsync on or off). If unset,
#: or set to None, the `pyglet.window.Window.vsync` property behaves
#: as documented.
#: xsync
#: If set (the default), pyglet will attempt to synchronise the drawing of
#: double-buffered windows to the border updates of the X11 window
#: manager. This improves the appearance of the window during resize
#: operations. This option only affects double-buffered windows on
#: X11 servers supporting the Xsync extension with a window manager
#: that implements the _NET_WM_SYNC_REQUEST protocol.
#:
#: **Since:** pyglet 1.1
#:
options = {
'audio': ('directsound', 'pulse', 'openal', 'silent'),
'font': ('gdiplus', 'win32'), # ignored outside win32; win32 is deprecated
'debug_font': False,
'debug_gl': not _enable_optimisations,
'debug_gl_trace': False,
'debug_gl_trace_args': False,
'debug_graphics_batch': False,
'debug_lib': False,
'debug_media': False,
'profile_media': False,
'debug_texture': False,
'debug_trace': False,
'debug_trace_args': False,
'debug_trace_depth': 1,
'debug_trace_flush': True,
'debug_win32': False,
'debug_x11': False,
'graphics_vbo': True,
'shadow_window': True,
'vsync': None,
'xsync': True,
'xlib_fullscreen_override_redirect': False,
}
_option_types = {
'audio': tuple,
'font': tuple,
'debug_font': bool,
'debug_gl': bool,
'debug_gl_trace': bool,
'debug_gl_trace_args': bool,
'debug_graphics_batch': bool,
'debug_lib': bool,
'debug_media': bool,
'profile_media': bool,
'debug_texture': bool,
'debug_trace': bool,
'debug_trace_args': bool,
'debug_trace_depth': int,
'debug_trace_flush': bool,
'debug_win32': bool,
'debug_x11': bool,
'graphics_vbo': bool,
'shadow_window': bool,
'vsync': bool,
'xsync': bool,
'xlib_fullscreen_override_redirect': bool,
}
def _read_environment():
'''Read defaults for options from environment'''
for key in options:
env = 'PYGLET_%s' % key.upper()
try:
value = os.environ[env]
if _option_types[key] is tuple:
options[key] = value.split(',')
elif _option_types[key] is bool:
options[key] = value in ('true', 'TRUE', 'True', '1')
elif _option_types[key] is int:
options[key] = int(value)
except KeyError:
pass
_read_environment()
if sys.platform == 'cygwin':
# This hack pretends that the posix-like ctypes provides windows
# functionality. COM does not work with this hack, so there is no
# DirectSound support.
import ctypes
ctypes.windll = ctypes.cdll
ctypes.oledll = ctypes.cdll
ctypes.WINFUNCTYPE = ctypes.CFUNCTYPE
ctypes.HRESULT = ctypes.c_long
# Call tracing
# ------------
_trace_filename_abbreviations = {}
def _trace_repr(value, size=40):
value = repr(value)
if len(value) > size:
value = value[:size//2-2] + '...' + value[-size//2-1:]
return value
def _trace_frame(thread, frame, indent):
from pyglet import lib
if frame.f_code is lib._TraceFunction.__call__.func_code:
is_ctypes = True
func = frame.f_locals['self']._func
name = func.__name__
location = '[ctypes]'
else:
is_ctypes = False
code = frame.f_code
name = code.co_name
path = code.co_filename
line = code.co_firstlineno
try:
filename = _trace_filename_abbreviations[path]
except KeyError:
# Trim path down
dir = ''
path, filename = os.path.split(path)
while len(dir + filename) < 30:
filename = os.path.join(dir, filename)
path, dir = os.path.split(path)
if not dir:
filename = os.path.join('', filename)
break
else:
filename = os.path.join('...', filename)
_trace_filename_abbreviations[path] = filename
location = '(%s:%d)' % (filename, line)
if indent:
name = 'Called from %s' % name
print '[%d] %s%s %s' % (thread, indent, name, location)
if _trace_args:
if is_ctypes:
args = [_trace_repr(arg) for arg in frame.f_locals['args']]
print ' %sargs=(%s)' % (indent, ', '.join(args))
else:
for argname in code.co_varnames[:code.co_argcount]:
try:
argvalue = _trace_repr(frame.f_locals[argname])
print ' %s%s=%s' % (indent, argname, argvalue)
except:
pass
if _trace_flush:
sys.stdout.flush()
def _thread_trace_func(thread):
def _trace_func(frame, event, arg):
if event == 'call':
indent = ''
for i in range(_trace_depth):
_trace_frame(thread, frame, indent)
indent += ' '
frame = frame.f_back
if not frame:
break
elif event == 'exception':
(exception, value, traceback) = arg
print 'First chance exception raised:', repr(exception)
return _trace_func
def _install_trace():
global _trace_thread_count
sys.setprofile(_thread_trace_func(_trace_thread_count))
_trace_thread_count += 1
_trace_thread_count = 0
_trace_args = options['debug_trace_args']
_trace_depth = options['debug_trace_depth']
_trace_flush = options['debug_trace_flush']
if options['debug_trace']:
_install_trace()
# Lazy loading
# ------------
class _ModuleProxy(object):
_module = None
def __init__(self, name):
self.__dict__['_module_name'] = name
def __getattr__(self, name):
try:
return getattr(self._module, name)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
return getattr(module, name)
def __setattr__(self, name, value):
try:
setattr(self._module, name, value)
except AttributeError:
if self._module is not None:
raise
import_name = 'pyglet.%s' % self._module_name
__import__(import_name)
module = sys.modules[import_name]
object.__setattr__(self, '_module', module)
globals()[self._module_name] = module
setattr(module, name, value)
if not _is_epydoc:
app = _ModuleProxy('app')
canvas = _ModuleProxy('canvas')
clock = _ModuleProxy('clock')
com = _ModuleProxy('com')
event = _ModuleProxy('event')
font = _ModuleProxy('font')
gl = _ModuleProxy('gl')
graphics = _ModuleProxy('graphics')
image = _ModuleProxy('image')
input = _ModuleProxy('input')
lib = _ModuleProxy('lib')
media = _ModuleProxy('media')
resource = _ModuleProxy('resource')
sprite = _ModuleProxy('sprite')
text = _ModuleProxy('text')
window = _ModuleProxy('window')
# Fool py2exe, py2app into including all top-level modules (doesn't understand
# lazy loading)
if False:
import app
import canvas
import clock
import com
import event
import font
import gl
import graphics
import input
import image
import lib
import media
import resource
import sprite
import text
import window
# Hack around some epydoc bug that causes it to think pyglet.window is None.
if _is_epydoc:
import window
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import numpy as np
import mxnet as mx
import random
import itertools
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from common import assert_raises_cudnn_not_satisfied, xfail_when_nonstandard_decimal_separator
import unittest
def test_box_nms_op():
def test_box_nms_forward(data, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1, cid=0, bid=-1,
force=False, in_format='corner', out_format='corner'):
for dtype in ['float16', 'float32', 'float64']:
data = mx.nd.array(data, dtype=dtype)
out = mx.contrib.nd.box_nms(data, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid, background_id=bid,
force_suppress=force, in_format=in_format, out_format=out_format)
assert_almost_equal(out.asnumpy(), expected.astype(dtype), rtol=1e-3, atol=1e-3)
def test_box_nms_backward(data, grad, expected, thresh=0.5, valid=0, topk=-1, coord=2, score=1,
cid=0, bid=-1, force=False, in_format='corner', out_format='corner'):
in_var = mx.sym.Variable('data')
arr_data = mx.nd.array(data)
arr_grad = mx.nd.empty(arr_data.shape)
op = mx.contrib.sym.box_nms(in_var, overlap_thresh=thresh, valid_thresh=valid, topk=topk,
coord_start=coord, score_index=score, id_index=cid, background_id=bid,
force_suppress=force, in_format=in_format, out_format=out_format)
exe = op._bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
exe.forward(is_train=True)
exe.backward(mx.nd.array(grad))
assert_almost_equal(arr_grad.asnumpy(), expected)
def corner_to_center(data):
out = np.reshape(data, (-1, 6)).copy()
out[:, 2] = (data[:, 2] + data[:, 4]) / 2.0
out[:, 3] = (data[:, 3] + data[:, 5]) / 2.0
out[:, 4] = data[:, 4] - data[:, 2]
out[:, 5] = data[:, 5] - data[:, 3]
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def center_to_corner(data):
data = np.reshape(data, (-1, 6)).copy()
out[:, 2] = data[:, 2] - data[:, 4] / 2.0
out[:, 3] = data[:, 3] - data[:, 5] / 2.0
out[:, 4] = data[:, 2] + data[:, 4] / 2.0
out[:, 5] = data[:, 3] + data[:, 5] / 2.0
invalid = np.where(data[:, 0] < 0)[0]
out[invalid, :] = -1
return out
def swap_position(data, expected, coord=2, score=1, cid=0, new_col=0):
data = np.reshape(data, (-1, 6))
expected = np.reshape(expected, (-1, 6))
new_coord = random.randint(0, 6 + new_col - 4)
others = list(range(new_coord)) + list(range(new_coord + 4, 6 + new_col))
random.shuffle(others)
new_score = others[0]
new_cid = others[1]
new_data = np.full((data.shape[0], data.shape[1] + new_col), -1.0)
new_expected = np.full((expected.shape[0], expected.shape[1] + new_col), -1.0)
new_data[:, new_coord:new_coord+4] = data[:, coord:coord+4]
new_data[:, new_score] = data[:, score]
new_data[:, new_cid] = data[:, cid]
new_expected[:, new_coord:new_coord+4] = expected[:, coord:coord+4]
new_expected[:, new_score] = expected[:, score]
new_expected[:, new_cid] = expected[:, cid]
return new_data, new_expected, new_coord, new_score, new_cid
# manually set up test cases
boxes = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [1, 0.4, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [2, 0.6, 0.5, 0.5, 0.7, 0.8]]
# case1
force = True
thresh = 0.5
expected = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[0, 0.3, 0.1, 0.1, 0.14, 0.14], [-1, -1, -1, -1, -1, -1]]
grad = np.random.rand(4, 6)
expected_in_grad = grad[(1, 3, 2, 0), :]
expected_in_grad[1, :] = 0
test_box_nms_forward(np.array(boxes), np.array(expected), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes), grad, expected_in_grad, force=force, thresh=thresh)
# case2: multi batch
boxes2 = [boxes] * 3
expected2 = [expected] * 3
grad2 = np.array([grad.tolist()] * 3)
expected_in_grad2 = np.array([expected_in_grad.tolist()] * 3)
test_box_nms_forward(np.array(boxes2), np.array(expected2), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes2), grad2, expected_in_grad2, force=force, thresh=thresh)
# another new dim
boxes2 = [boxes2] * 2
expected2 = [expected2] * 2
grad2 = np.array([grad2.tolist()] * 2)
expected_in_grad2 = np.array([expected_in_grad2.tolist()] * 2)
test_box_nms_forward(np.array(boxes2), np.array(expected2), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes2), grad2, expected_in_grad2, force=force, thresh=thresh)
# case3: thresh
thresh = 0.1
boxes3 = boxes
expected3 = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]
grad3 = np.random.rand(4, 6)
expected_in_grad3 = grad3[(1, 3, 2, 0), :]
expected_in_grad3[(1, 2), :] = 0
test_box_nms_forward(np.array(boxes3), np.array(expected3), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes3), grad3, expected_in_grad3, force=force, thresh=thresh)
# case4: non-force
boxes4 = boxes
force = False
expected4 = [[2, 0.6, 0.5, 0.5, 0.7, 0.8], [0, 0.5, 0.1, 0.1, 0.2, 0.2],
[1, 0.4, 0.1, 0.1, 0.2, 0.2], [-1, -1, -1, -1, -1, -1]]
grad4 = np.random.rand(4, 6)
expected_in_grad4 = grad4[(1, 2, 3, 0), :]
expected_in_grad4[2, :] = 0
test_box_nms_forward(np.array(boxes4), np.array(expected4), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes4), grad4, expected_in_grad4, force=force, thresh=thresh)
# case5: different coding
boxes5 = corner_to_center(np.array(boxes4))
test_box_nms_forward(np.array(boxes5), np.array(expected4), force=force, thresh=thresh,
in_format='center')
expected5 = corner_to_center(np.array(expected4))
test_box_nms_forward(np.array(boxes4), np.array(expected5), force=force, thresh=thresh,
out_format='center')
test_box_nms_forward(np.array(boxes5), np.array(expected5), force=force, thresh=thresh,
in_format='center', out_format='center')
# case6: different position
boxes6, expected6, new_coord, new_score, new_id = swap_position(np.array(boxes4),
np.array(expected4), new_col=2)
test_box_nms_forward(np.array(boxes6), np.array(expected6), force=force, thresh=thresh,
coord=new_coord, score=new_score, cid=new_id)
# case7: no id, should be same with force=True
force = False
thresh = 0.5
test_box_nms_forward(np.array(boxes), np.array(expected), force=force, thresh=thresh, cid=-1)
# case8: multi-batch thresh + topk
boxes8 = [[[1, 1, 0, 0, 10, 10], [1, 0.4, 0, 0, 10, 10], [1, 0.3, 0, 0, 10, 10]],
[[2, 1, 0, 0, 10, 10], [2, 0.4, 0, 0, 10, 10], [2, 0.3, 0, 0, 10, 10]],
[[3, 1, 0, 0, 10, 10], [3, 0.4, 0, 0, 10, 10], [3, 0.3, 0, 0, 10, 10]]]
expected8 = [[[1, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]],
[[2, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]],
[[3, 1, 0, 0, 10, 10], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]
grad8 = np.random.rand(3, 3, 6)
expected_in_grad8 = np.zeros((3, 3, 6))
expected_in_grad8[(0, 1, 2), (0, 0, 0), :] = grad8[(0, 1, 2), (0, 0, 0), :]
force = False
thresh = 0.5
valid = 0.5
topk = 2
test_box_nms_forward(np.array(boxes8), np.array(expected8), force=force, thresh=thresh, valid=valid, topk=topk)
test_box_nms_backward(np.array(boxes8), grad8, expected_in_grad8, force=force, thresh=thresh, valid=valid, topk=topk)
# case9: background id filter out
# default background id -1
boxes9 = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [0, 0.4, 0.1, 0.1, 0.2, 0.2],
[1, 0.3, 0.1, 0.1, 0.14, 0.14], [-1, 0.6, 0.5, 0.5, 0.7, 0.8]]
expected9 = [[0, 0.5, 0.1, 0.1, 0.2, 0.2], [1, 0.3, 0.1, 0.1, 0.14, 0.14],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]
force = True
thresh = 0.5
grad9 = np.random.rand(4, 6)
expected_in_grad9 = grad9[(0, 2, 1, 3), :]
expected_in_grad9[(1, 3), :] = 0
test_box_nms_forward(np.array(boxes9), np.array(expected9), force=force, thresh=thresh)
test_box_nms_backward(np.array(boxes9), grad9, expected_in_grad9, force=force, thresh=thresh)
# set background id
background_id = 0
expected9 = [[-1, 0.6, 0.5, 0.5, 0.7, 0.8], [1, 0.3, 0.1, 0.1, 0.14, 0.14],
[-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]
grad9 = np.random.rand(4, 6)
expected_in_grad9 = grad9[(2, 3, 1, 0), :]
expected_in_grad9[(0, 1), :] = 0
test_box_nms_forward(np.array(boxes9), np.array(expected9), force=force, thresh=thresh, bid=background_id)
test_box_nms_backward(np.array(boxes9), grad9, expected_in_grad9, force=force, thresh=thresh, bid=background_id)
def test_box_iou_op():
def numpy_box_iou(a, b, fmt='corner'):
def area(left, top, right, bottom):
return np.maximum(0, right - left) * np.maximum(0, bottom - top)
assert a.shape[-1] == 4
assert b.shape[-1] == 4
oshape = a.shape[:-1] + b.shape[:-1]
a = a.reshape((-1, 4))
ashape = a.shape
b = b.reshape((-1, 4))
a = np.tile(a, reps=[1, b.shape[0]]).reshape((-1, 4))
b = np.tile(b, reps=[ashape[0], 1]).reshape((-1, 4))
if fmt == 'corner':
al, at, ar, ab = np.split(a, 4, axis=-1)
bl, bt, br, bb = np.split(b, 4, axis=-1)
elif fmt == 'center':
ax, ay, aw, ah = np.split(a, 4, axis=-1)
bx, by, bw, bh = np.split(b, 4, axis=-1)
al, at, ar, ab = ax - aw / 2, ay - ah / 2, ax + aw / 2, ay + ah / 2
bl, bt, br, bb = bx - bw / 2, by - bh / 2, bx + bw / 2, by + bh / 2
else:
raise NotImplementedError("Fmt {} not supported".format(fmt))
width = np.maximum(0, np.minimum(ar, br) - np.maximum(al, bl))
height = np.maximum(0, np.minimum(ab, bb) - np.maximum(at, bt))
intersect = width * height
union = area(al, at, ar, ab) + area(bl, bt, br, bb) - intersect
union[np.where(intersect <= 0)] = 1e-12
iou = intersect / union
return iou.reshape(oshape)
def generate_boxes(dims):
s1, off1, s2, off2 = np.random.rand(4) * 100
xy = np.random.rand(*(dims + [2])) * s1 + off1
wh = np.random.rand(*(dims + [2])) * s2 + off2
xywh = np.concatenate([xy, wh], axis=-1)
ltrb = np.concatenate([xy - wh / 2, xy + wh / 2], axis=-1)
return xywh, ltrb
for ndima in range(1, 6):
for ndimb in range(1, 6):
dims_a = np.random.randint(low=1, high=3, size=ndima).tolist()
dims_b = np.random.randint(low=1, high=3, size=ndimb).tolist()
# generate left, top, right, bottom
xywh_a, ltrb_a = generate_boxes(dims_a)
xywh_b, ltrb_b = generate_boxes(dims_b)
iou_np = numpy_box_iou(ltrb_a, ltrb_b, fmt='corner')
iou_np2 = numpy_box_iou(xywh_a, xywh_b, fmt='center')
iou_mx = mx.nd.contrib.box_iou(mx.nd.array(ltrb_a), mx.nd.array(ltrb_b), format='corner')
iou_mx2 = mx.nd.contrib.box_iou(mx.nd.array(xywh_a), mx.nd.array(xywh_b), format='center')
assert_allclose(iou_np, iou_np2, rtol=1e-5, atol=1e-5)
assert_allclose(iou_np, iou_mx.asnumpy(), rtol=1e-5, atol=1e-5)
assert_allclose(iou_np, iou_mx2.asnumpy(), rtol=1e-5, atol=1e-5)
def test_bipartite_matching_op():
def assert_match(inputs, x, y, threshold, is_ascend=False):
for dtype in ['float16', 'float32', 'float64']:
inputs = mx.nd.array(inputs, dtype=dtype)
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
a, b = mx.nd.contrib.bipartite_matching(inputs, threshold=threshold, is_ascend=is_ascend)
assert_array_equal(a.asnumpy().astype('int64'), x.astype('int64'))
assert_array_equal(b.asnumpy().astype('int64'), y.astype('int64'))
assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [1, -1, 0], [2, 0], 1e-12, False)
assert_match([[0.5, 0.6], [0.1, 0.2], [0.3, 0.4]], [-1, 0, 1], [1, 2], 100, True)
def test_multibox_target_op():
anchors = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]], ctx=default_context()).reshape((1, -1, 4))
cls_pred = mx.nd.array(list(range(10)), ctx=default_context()).reshape((1, -1, 2))
label = mx.nd.array([1, 0.1, 0.1, 0.5, 0.6], ctx=default_context()).reshape((1, -1, 5))
loc_target, loc_mask, cls_target = \
mx.nd.contrib.MultiBoxTarget(anchors, label, cls_pred,
overlap_threshold=0.5,
negative_mining_ratio=3,
negative_mining_thresh=0.4)
expected_loc_target = np.array([[5.0, 2.5000005, 3.4657357, 4.581454, 0., 0., 0., 0.]])
expected_loc_mask = np.array([[1, 1, 1, 1, 0, 0, 0, 0]])
expected_cls_target = np.array([[2, 0]])
assert_allclose(loc_target.asnumpy(), expected_loc_target, rtol=1e-5, atol=1e-5)
assert_array_equal(loc_mask.asnumpy(), expected_loc_mask)
assert_array_equal(cls_target.asnumpy(), expected_cls_target)
@xfail_when_nonstandard_decimal_separator
def test_gradient_multiplier_op():
# We use the quadratic function in combination with gradient multiplier
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
m = np.random.random_sample() - 0.5
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
gr_q_sym = mx.sym.contrib.gradientmultiplier(quad_sym, scalar=m)
for dtype in [np.float16, np.float32, np.float64]:
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = (2 * a * data_np + b) * m
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
output = mx.nd.contrib.gradientmultiplier(output, scalar=m)
assert_almost_equal(output.asnumpy(), expected,
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check forward
check_symbolic_forward(gr_q_sym, [data_np], [expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
# check backward
check_symbolic_backward(gr_q_sym, [data_np], [np.ones(expected.shape)],
[backward_expected],
rtol=1e-2 if dtype is np.float16 else 1e-5,
atol=1e-2 if dtype is np.float16 else 1e-5)
def test_multibox_prior_op():
h = 561
w = 728
X = mx.nd.random.uniform(shape=(1, 3, h, w))
Y = mx.contrib.nd.MultiBoxPrior(X, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5])
assert_array_equal(Y.shape, np.array((1, 2042040, 4)))
boxes = Y.reshape((h, w, 5, 4))
assert_allclose(boxes.asnumpy()[250, 250, 0, :], np.array([0.055117, 0.071524, 0.63307 , 0.821524]), atol=1e-5, rtol=1e-5)
# relax first ratio if user insists
Y = mx.contrib.nd.MultiBoxPrior(X, sizes=[0.75, 0.5, 0.25], ratios=[20, 2, 0.5])
boxes = Y.reshape((h, w, 5, 4))
assert_allclose(boxes.asnumpy()[250, 250, 0, :], np.array([-0.948249, 0.362671, 1.636436, 0.530377]), atol=1e-5, rtol=1e-5)
def test_box_encode_op():
anchors = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
refs = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
samples = mx.nd.array([[0, 1]])
matches = mx.nd.array([[0, 1]])
means = mx.nd.array([0.0, 0.0, 0.0, 0.0])
stds = mx.nd.array([0.1, 0.1, 0.2, 0.2])
Y, mask = mx.nd.contrib.box_encode(samples, matches, anchors, refs, means, stds)
assert_allclose(Y.asnumpy(), np.zeros((1, 2, 4)), atol=1e-5, rtol=1e-5)
assert_allclose(mask.asnumpy(), np.array([[[0., 0., 0., 0.], [1., 1., 1., 1.]]]), atol=1e-5, rtol=1e-5)
def test_box_decode_op():
data = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
anchors = mx.nd.array([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8]]).reshape((1, -1, 4))
Y = mx.nd.contrib.box_decode(data, anchors, .1, .1, .2, .2)
assert_allclose(Y.asnumpy(), np.array([[[-0.0562755, -0.00865743, 0.26227552, 0.42465743], \
[0.13240421, 0.17859563, 0.93759584, 1.1174043 ]]]), atol=1e-5, rtol=1e-5)
def test_op_mrcnn_mask_target():
if default_context().device_type != 'gpu':
return
num_rois = 2
num_classes = 4
mask_size = (3, 3)
ctx = mx.gpu(0)
# (B, N, 4)
rois = mx.nd.array([[[2.3, 4.3, 2.2, 3.3],
[3.5, 5.5, 0.9, 2.4]]], ctx=ctx)
gt_masks = mx.nd.arange(0, 4*32*32, ctx=ctx).reshape(1, 4, 32, 32)
# (B, N)
matches = mx.nd.array([[2, 0]], ctx=ctx)
# (B, N)
cls_targets = mx.nd.array([[2, 1]], ctx=ctx)
mask_targets, mask_cls = mx.nd.contrib.mrcnn_mask_target(rois, gt_masks, matches, cls_targets,
num_rois=num_rois,
num_classes=num_classes,
mask_size=mask_size)
# Ground truth outputs were generated with GluonCV's target generator
# gluoncv.model_zoo.mask_rcnn.MaskTargetGenerator(1, num_rois, num_classes, mask_size)
gt_mask_targets = mx.nd.array([[[[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]],
[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]],
[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]],
[[2193.4 , 2193.7332 , 2194.0667 ],
[2204.0667 , 2204.4 , 2204.7334 ],
[2214.7334 , 2215.0667 , 2215.4 ]]],
[[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]],
[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]],
[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]],
[[ 185. , 185.33334, 185.66667],
[ 195.66667, 196.00002, 196.33334],
[ 206.33333, 206.66666, 207. ]]]]])
gt_mask_cls = mx.nd.array([[0,0,1,0], [0,1,0,0]])
gt_mask_cls = gt_mask_cls.reshape(1,2,4,1,1).broadcast_axes(axis=(3,4), size=(3,3))
assert_almost_equal(mask_targets.asnumpy(), gt_mask_targets.asnumpy())
assert_almost_equal(mask_cls.asnumpy(), gt_mask_cls.asnumpy())
def test_dynamic_reshape():
def dynamic_reshape_testcases(src_shape, shape_arg, dst_shape):
data = mx.sym.Variable('data')
shape = mx.sym.Variable('shape')
net = mx.sym.contrib.dynamic_reshape(data, shape)
js = net.tojson()
net = mx.sym.load_json(js)
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
args = {
'data': mx.nd.array(dat_npy),
'shape': mx.nd.array(shape_arg)
}
args_grad = {
'data': mx.nd.empty(src_shape)
}
exe = net._bind(default_context(), args, args_grad)
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7
# test ndarray
X = mx.nd.random.uniform(shape=src_shape)
Y = mx.contrib.nd.dynamic_reshape(X, mx.nd.array(shape_arg))
assert_array_equal(Y.shape, dst_shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), (6, 30)],
[(2, 3, 5, 6), (-3, -1), (6, 30)],
[(64,), (-4, 16, 4), (16, 4)],
[(64,), (-4, 16, -1), (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), (16, 4, 1, 2, 3)]]
for test_case in test_cases:
dynamic_reshape_testcases(*test_case)
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ShardedVariable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session as session_lib
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import sharded_variable
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
def _load_and_run(
model_dir,
inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Load a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
meta_graph_def = loader.load(session, [tag_constants.SERVING], model_dir)
signature = meta_graph_def.signature_def[signature_key]
feed_dict = {}
for arg_name in inputs.keys():
input_tensor = session.graph.get_tensor_by_name(
signature.inputs[arg_name].name)
feed_dict[input_tensor] = inputs[arg_name]
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = session.graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
class PartitionerTest(test.TestCase):
def test_fixed_shards_partitioner(self):
partitioner = sharded_variable.FixedShardsPartitioner(num_shards=2)
got = partitioner(tensor_shape.TensorShape([10, 3]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
def test_min_size_partitioner(self):
partitioner = sharded_variable.MinSizePartitioner(
min_shard_bytes=4, max_shards=2)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
partitioner = sharded_variable.MinSizePartitioner(
min_shard_bytes=4, max_shards=10)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [6, 1])
def test_max_size_partitioner(self):
partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=4)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [6, 1])
partitioner = sharded_variable.MaxSizePartitioner(
max_shard_bytes=4, max_shards=2)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [2, 1])
partitioner = sharded_variable.MaxSizePartitioner(max_shard_bytes=1024)
got = partitioner(tensor_shape.TensorShape([6, 1]), dtypes.float32)
self.assertAllEqual(got, [1, 1])
class ShardedVariableTest(test.TestCase):
def test_sharded_variable_simple(self):
v0 = variables_lib.Variable([0])
v1 = variables_lib.Variable([1])
s = sharded_variable.ShardedVariable([v0, v1], name='s')
self.assertEqual(s.variables[0], v0)
self.assertEqual(s.variables[1], v1)
self.assertEqual(s.shape.as_list(), [2])
self.assertEqual(s.dtype, v0.dtype)
self.assertEqual(s.name, 's')
def test_assign(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
ret = s.assign([[4, 4], [5, 5], [6, 6], [7, 7]])
self.assertAllEqual(self.evaluate(s.variables[0]), [[4, 4]])
self.assertAllEqual(self.evaluate(s.variables[1]), [[5, 5], [6, 6]])
self.assertAllEqual(self.evaluate(s.variables[2]), [[7, 7]])
self.assertIs(ret, s)
def test_assign_add(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
ret = s.assign_add([[1, 1], [1, 1], [2, 2], [2, 2]])
self.assertAllEqual(self.evaluate(s.variables[0]), [[1, 1]])
self.assertAllEqual(self.evaluate(s.variables[1]), [[2, 2], [4, 4]])
self.assertAllEqual(self.evaluate(s.variables[2]), [[5, 5]])
self.assertIs(ret, s)
def test_assign_sub(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
ret = s.assign_sub([[0, 0], [1, 1], [1, 1], [3, 3]])
self.assertAllEqual(self.evaluate(s.variables[0]), [[0, 0]])
self.assertAllEqual(self.evaluate(s.variables[1]), [[0, 0], [1, 1]])
self.assertAllEqual(self.evaluate(s.variables[2]), [[0, 0]])
self.assertIs(ret, s)
def test_control_dep_on_assign(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
@def_function.function
def func():
ret = s.assign([[4, 4], [5, 5], [6, 6], [7, 7]])
with ops.control_dependencies([ret]):
a = array_ops.ones((1, 1))
with ops.control_dependencies([control_flow_ops.group(ret)]):
b = array_ops.ones((1, 1))
return a, b
func()
def test_convert_to_tensor(self):
v0 = variables_lib.Variable([[0, 0]])
v1 = variables_lib.Variable([[1, 1], [2, 2]])
v2 = variables_lib.Variable([[3, 3]])
s = sharded_variable.ShardedVariable([v0, v1, v2])
t = ops.convert_to_tensor(s)
self.assertAllEqual(t, [[0, 0], [1, 1], [2, 2], [3, 3]])
def test_save_restore(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
s = sharded_variable.ShardedVariable(variables, name='s')
cp = util.Checkpoint(s=s)
self.assertEqual(self.evaluate(cp.s.variables[0]), [0])
cp.write(fname)
self.evaluate(cp.s.variables[0].assign([4]))
self.assertEqual(self.evaluate(cp.s.variables[0]), [4])
cp.restore(fname)
# Tests that the original weights are restored.
self.assertEqual(self.evaluate(cp.s.variables[0]), [0])
def test_save_restore_different_partitions(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
s = sharded_variable.ShardedVariable(variables, name='s')
cp = util.Checkpoint(s=s)
cp.write(fname)
variables2 = [variables_lib.Variable([0, 0, 0, 0])]
s2 = sharded_variable.ShardedVariable(variables2, name='s')
# Restore from 4 partitions into 1.
cp2 = util.Checkpoint(s=s2)
cp2.restore(fname)
self.assertAllEqual(self.evaluate(cp2.s.variables[0]), [0, 1, 2, 3])
self.evaluate(cp2.s.variables[0].assign([5, 10, 15, 20]))
cp2.write(fname)
# Restore 1 partition into 4.
cp.restore(fname)
self.assertEqual(self.evaluate(cp.s.variables[0]), [5])
self.assertEqual(self.evaluate(cp.s.variables[1]), [10])
self.assertEqual(self.evaluate(cp.s.variables[2]), [15])
self.assertEqual(self.evaluate(cp.s.variables[3]), [20])
def test_save_restore_4_to_2_partitions(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
s = sharded_variable.ShardedVariable(variables, name='s')
cp = util.Checkpoint(s=s)
cp.write(fname)
variables2 = [
variables_lib.Variable([0, 0]),
variables_lib.Variable([0, 0])
]
s2 = sharded_variable.ShardedVariable(variables2, name='s')
cp2 = util.Checkpoint(s=s2)
cp2.restore(fname)
# Assert that weights from the 4 partitions were loaded here.
self.assertLen(cp2.s.variables, 2)
self.assertAllEqual(self.evaluate(cp2.s.variables[0]), [0, 1])
self.assertAllEqual(self.evaluate(cp2.s.variables[1]), [2, 3])
def test_delayed_restore(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
model = tracking.AutoTrackable()
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
model.s = sharded_variable.ShardedVariable(variables)
cp = util.Checkpoint(model=model)
cp.write(fname)
model2 = tracking.AutoTrackable()
cp2 = util.Checkpoint(model=model2)
cp2.restore(fname)
variables2 = [
variables_lib.Variable([0]),
variables_lib.Variable([0]),
variables_lib.Variable([0]),
variables_lib.Variable([0])
]
model2.s = sharded_variable.ShardedVariable(variables2)
self.assertAllEqual(self.evaluate(model2.s.variables[0]), [0])
self.assertAllEqual(self.evaluate(model2.s.variables[1]), [1])
self.assertAllEqual(self.evaluate(model2.s.variables[2]), [2])
self.assertAllEqual(self.evaluate(model2.s.variables[3]), [3])
def test_delayed_restore_4_to_2_partitions(self):
fname = os.path.join(self.get_temp_dir(), 'checkpoint')
model = tracking.AutoTrackable()
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
variables_lib.Variable([2]),
variables_lib.Variable([3])
]
model.s = sharded_variable.ShardedVariable(variables)
cp = util.Checkpoint(model=model)
cp.write(fname)
model2 = tracking.AutoTrackable()
cp2 = util.Checkpoint(model=model2)
cp2.restore(fname)
variables2 = [
variables_lib.Variable([0, 0]),
variables_lib.Variable([0, 0])
]
model2.s = sharded_variable.ShardedVariable(variables2)
self.assertAllEqual(self.evaluate(model2.s.variables[0]), [0, 1])
self.assertAllEqual(self.evaluate(model2.s.variables[1]), [2, 3])
def test_save_graph_def(self):
root = tracking.AutoTrackable()
v1 = variables_lib.Variable([3.])
v2 = variables_lib.Variable([2.])
root.v = sharded_variable.ShardedVariable([v1, v2])
root.train = def_function.function(
lambda x: embedding_ops.embedding_lookup_v2(root.v.variables, x))
# TODO(b/144057383): Remove the necessity of root.serve once saving context
# is made to tf.function cache.
root.serve = def_function.function(
lambda x: embedding_ops.embedding_lookup_v2(root.v.variables[0], x),
input_signature=[tensor_spec.TensorSpec([2], dtypes.int32, name='x')])
# Trace and use root.train
self.assertAllEqual([3., 2.], root.train([0, 1]).numpy())
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save.save(root, save_dir, root.serve)
self.assertAllEqual([3., 2.],
_load_and_run(save_dir, {'x': [0, 1]})['output_0'])
# Continue using root.train for training
self.assertAllEqual([3., 2.], root.train([0, 1]).numpy())
def test_load_raises_error(self):
root = tracking.AutoTrackable()
v1 = variables_lib.Variable([3.])
v2 = variables_lib.Variable([2.])
root.v = sharded_variable.ShardedVariable([v1, v2])
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save.save(root, save_dir)
with self.assertRaisesRegex(
ValueError, 'Loading a saved_model containing ShardedVariable'):
load.load(save_dir)
def test_validation_errors(self):
with self.assertRaisesRegex(ValueError, 'Expected a list of '):
sharded_variable.ShardedVariable(
[variables_lib.Variable([0]), 'not-a-variable'])
with self.assertRaisesRegex(ValueError, 'must have the same dtype'):
sharded_variable.ShardedVariable([
variables_lib.Variable([0], dtype='int64'),
variables_lib.Variable([1], dtype='int32')
])
with self.assertRaisesRegex(ValueError, 'the same shapes except'):
sharded_variable.ShardedVariable([
variables_lib.Variable(array_ops.ones((5, 10))),
variables_lib.Variable(array_ops.ones((5, 20)))
])
with self.assertRaisesRegex(ValueError, '`SaveSliceInfo` should not'):
v = variables_lib.Variable([0])
v._set_save_slice_info(
variables_lib.Variable.SaveSliceInfo(
full_name='s', full_shape=[2], var_offset=[0], var_shape=[1]))
sharded_variable.ShardedVariable([v])
def test_as_function_input(self):
variables1 = [
variables_lib.Variable([1]),
variables_lib.Variable([1]),
]
s = sharded_variable.ShardedVariable(variables1)
variables2 = [
variables_lib.Variable([2]),
variables_lib.Variable([2]),
]
s2 = sharded_variable.ShardedVariable(variables2)
trace_count = [0]
@def_function.function
def func(sharded_var):
trace_count[0] = trace_count[0] + 1
sharded_var.assign([0, 0])
func(s)
self.assertAllEqual(ops.convert_to_tensor(s), [0, 0])
self.assertEqual(trace_count[0], 1)
func(s2)
self.assertAllEqual(ops.convert_to_tensor(s2), [0, 0])
self.assertEqual(trace_count[0], 1)
def test_flatten(self):
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
]
s = sharded_variable.ShardedVariable(variables)
got = nest.flatten(s)
self.assertIs(s, got[0])
got = nest.flatten(s, expand_composites=True)
self.assertAllEqual(variables, got)
def test_tf_module(self):
class Model(module.Module):
def __init__(self):
super().__init__()
variables = [
variables_lib.Variable([0]),
variables_lib.Variable([1]),
]
self.w = sharded_variable.ShardedVariable(variables)
model = Model()
self.assertLen(model.variables, 2)
self.assertEqual(model.variables[0], [0])
self.assertEqual(model.variables[1], [1])
self.assertAllEqual(model.variables, model.trainable_variables)
self.assertLen(model._checkpoint_dependencies, 1)
self.assertIs(model._checkpoint_dependencies[0].ref, model.w)
def test_embedding_lookup(self):
v = [
variables_lib.Variable([[1., 2.], [3., 4.]]),
variables_lib.Variable([[5., 6.], [7., 8.]]),
variables_lib.Variable([[9., 10.]])
]
sv = sharded_variable.ShardedVariable(v)
@def_function.function
def lookup():
ids = constant_op.constant([0, 3, 4])
return embedding_ops.embedding_lookup_v2(sv, ids)
@def_function.function
def sparse_lookup():
sp_ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [2, 2]],
values=[0, 3, 4, 1],
dense_shape=[3, 3])
return embedding_ops.embedding_lookup_sparse_v2(sv, sp_ids, None)
@def_function.function
def safe_sparse_lookup():
sp_ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [2, 2]],
values=[0, -1, 4, 1],
dense_shape=[3, 3])
sp_weights = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [2, 2]],
values=[1., 1., -1., 1.],
dense_shape=[3, 3])
return embedding_ops.safe_embedding_lookup_sparse_v2(
sv, sp_ids, sp_weights)
# TODO(chenkai): Add safe_sparse_lookup to the list. Currently
# ShardedVariable is converted to a tensor in safe_sparse_lookup.
for func in [lookup, sparse_lookup]:
num_gather_ops = 0
for op in func.get_concrete_function().graph.get_operations():
if op.type == 'ResourceGather':
num_gather_ops += 1
self.assertEqual(
num_gather_ops, len(v), 'Number of ResourceGather op does not match'
' expected, possibly due to ShardedVariable accidentally being'
' converted to tensor in embedding_lookup ops.')
self.assertAllEqual(lookup(), [[1., 2.], [7., 8.], [9., 10.]])
self.assertAllClose(sparse_lookup(), [[4., 5.], [9., 10.], [3., 4.]])
self.assertAllClose(safe_sparse_lookup(), [[1., 2.], [0., 0.], [3., 4.]])
def test_slicing(self):
v = [
variables_lib.Variable([[1, 2], [3, 4], [5, 6]]),
variables_lib.Variable([[7, 8], [9, 10], [11, 12]]),
variables_lib.Variable([[13, 14], [15, 16]])
]
sv = sharded_variable.ShardedVariable(v)
empty = v[0][0:0]
# Test cases: positive step
self.assertAllEqual(sv[:], array_ops.concat(v, axis=0))
self.assertAllEqual(sv[:2], [[1, 2], [3, 4]])
self.assertAllEqual(sv[-8:2], [[1, 2], [3, 4]])
self.assertAllEqual(sv[-10:2], [[1, 2], [3, 4]])
self.assertAllEqual(sv[5:], [[11, 12], [13, 14], [15, 16]])
self.assertAllEqual(sv[5:-1], [[11, 12], [13, 14]])
self.assertAllEqual(sv[::3], [[1, 2], [7, 8], [13, 14]])
self.assertAllEqual(sv[::5], [[1, 2], [11, 12]])
self.assertAllEqual(sv[1::6], [[3, 4], [15, 16]])
self.assertAllEqual(sv[1:5:6], [[3, 4]])
self.assertAllEqual(sv[1::7], [[3, 4]])
self.assertAllEqual(sv[2:7], [[5, 6], [7, 8], [9, 10], [11, 12], [13, 14]])
self.assertAllEqual(sv[2:7:2], [[5, 6], [9, 10], [13, 14]])
self.assertAllEqual(sv[2:7:3], [[5, 6], [11, 12]])
# Test cases: negative step
self.assertAllEqual(
sv[::-1], array_ops.reverse(array_ops.concat(v, axis=0), axis=[0]))
self.assertAllEqual(sv[2::-1], [[5, 6], [3, 4], [1, 2]])
self.assertAllEqual(sv[2:-8:-1], [[5, 6], [3, 4]])
self.assertAllEqual(sv[2:-10:-1], [[5, 6], [3, 4], [1, 2]])
self.assertAllEqual(sv[4::-1], [[9, 10], [7, 8], [5, 6], [3, 4], [1, 2]])
self.assertAllEqual(sv[-1:-3:-1], [[15, 16], [13, 14]])
self.assertAllEqual(sv[::-5], [[15, 16], [5, 6]])
self.assertAllEqual(sv[6::-6], [[13, 14], [1, 2]])
self.assertAllEqual(sv[6:5:-6], [[13, 14]])
self.assertAllEqual(sv[6::-7], [[13, 14]])
self.assertAllEqual(sv[7:1:-1],
[[15, 16], [13, 14], [11, 12], [9, 10], [7, 8], [5, 6]])
self.assertAllEqual(sv[7:1:-2], [[15, 16], [11, 12], [7, 8]])
self.assertAllEqual(sv[7:1:-4], [[15, 16], [7, 8]])
# Test cases: empty slice
self.assertAllEqual(sv[0:0], empty)
self.assertAllEqual(sv[5:3], empty)
self.assertAllEqual(sv[3:5:-1], empty)
self.assertAllEqual(sv[-1:0], empty)
self.assertAllEqual(sv[2:-1:-1], empty)
# Test cases: slicing other dimensions
self.assertAllEqual(sv[:, 0], [1, 3, 5, 7, 9, 11, 13, 15])
self.assertAllEqual(sv[:, 0:1], [[1], [3], [5], [7], [9], [11], [13], [15]])
# Test cases: normal indexing
self.assertAllEqual(sv[2], [5, 6])
self.assertAllEqual(sv[6], [13, 14])
self.assertAllEqual(sv[2, 1], 6)
self.assertAllEqual(sv[-2], [13, 14])
with self.assertRaisesRegex(IndexError, 'out of bounds'):
_ = sv[100]
with self.assertRaisesRegex(IndexError, 'out of bounds'):
_ = sv[-100]
# Test cases: Ellipsis
self.assertAllEqual(sv[...], array_ops.concat(v, axis=0))
self.assertAllEqual(sv[..., 0], [1, 3, 5, 7, 9, 11, 13, 15])
self.assertAllEqual(sv[0:1, ...], [[1, 2]])
# Test cases: newaxis
self.assertAllEqual(
sv[array_ops.newaxis, ...],
array_ops.expand_dims_v2(array_ops.concat(v, axis=0), axis=0))
# Test cases: boolean masks
self.assertAllEqual(sv[ops.convert_to_tensor(sv) > 10],
[11, 12, 13, 14, 15, 16])
# Test cases: tensor input
with self.assertRaisesRegex(TypeError, 'not allowed'):
_ = sv[constant_op.constant(1)::]
with self.assertRaisesRegex(TypeError, 'not allowed'):
_ = sv[:constant_op.constant(1):]
with self.assertRaisesRegex(TypeError, 'not allowed'):
_ = sv[constant_op.constant(1)]
# Test cases: inside tf.function
@def_function.function
def func():
a = sv[:, 0]
return a
self.assertAllEqual(func(), [1, 3, 5, 7, 9, 11, 13, 15])
def test_operator_overload(self):
v1 = [
variables_lib.Variable([1.]),
variables_lib.Variable([2.]),
]
sv1 = sharded_variable.ShardedVariable(v1)
v2 = [
variables_lib.Variable([1.]),
variables_lib.Variable([2.]),
]
sv2 = sharded_variable.ShardedVariable(v2)
equal = sv1 == sv2
self.assertAllEqual(equal, [True, True])
self.assertAllEqual(sv1 + sv2, [2.0, 4.0])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
|
import sys
from glob import glob
import os
from datetime import datetime
from tasks import plotms
'''
Plot visibility data for each spw to allow for easy manual flags
'''
try:
vis_name = sys.argv[1]
field_names = sys.argv[2]
corrstring = sys.argv[3]
starting_spw = int(sys.argv[4])
bp_scan = sys.argv[5]
show_channels = True if sys.argv[6] == "T" else False
except IndexError:
vis_name = raw_input("MS Name? : ")
field_names = raw_input("Field Name/Number(s)? : ")
corrstring = raw_input("Corrstring? : ")
starting_spw = int(raw_input("SPW to start at? : "))
bp_scan = raw_input("Bandpass scan? (None or scan number): ")
show_channels = True if \
raw_input("Show amp/channel and phase/channel? (T or F): ") == "T" \
else False
# Only show BP scan plots when given
if bp_scan == "None" or bp_scan == "none":
bp_scan = None
tb.open(vis_name + '/SPECTRAL_WINDOW')
freqs = tb.getcol('REF_FREQUENCY')
nchans = tb.getcol('NUM_CHAN')
tb.close()
spws = range(starting_spw, len(freqs))
fields = field_names.split(",")
for n, field_name in enumerate(fields):
print("On {0}. {1} out of {2} fields.".format(field_name, n + 1,
len(fields)))
for spw_num in spws:
nchan = nchans[spw_num]
print "On SPW {0} of {1}".format(str(spw_num + 1), str(len(freqs)))
if bp_scan is not None:
default('plotms')
vis = vis_name
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = field_name
spw = str(spw_num)
scan = bp_scan
correlation = corrstring
averagedata = False
avgscan = False
transform = False
extendflag = False
iteraxis = ''
coloraxis = 'antenna2'
plotrange = []
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotms()
raw_input("Continue?")
if show_channels:
default('plotms')
vis = vis_name
xaxis = 'channel'
yaxis = 'phase'
ydatacolumn = 'corrected'
selectdata = True
field = field_name
spw = str(spw_num)
correlation = corrstring
averagedata = True
avgtime = '1e8s'
avgscan = True
transform = False
extendflag = False
iteraxis = ''
coloraxis = 'antenna2'
plotrange = []
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotms()
raw_input("Continue?")
default('plotms')
vis = vis_name
xaxis = 'channel'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = field_name
spw = str(spw_num)
correlation = corrstring
averagedata = True
avgtime = '1e8s'
avgscan = True
transform = False
extendflag = False
iteraxis = ''
coloraxis = 'antenna2'
plotrange = []
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotms()
raw_input("Continue?")
default('plotms')
vis = vis_name
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = field_name
spw = str(spw_num)
correlation = corrstring
averagedata = True
avgchannel = str(nchan)
avgscan = False
transform = False
extendflag = False
iteraxis = 'scan'
coloraxis = 'antenna2'
plotrange = []
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotms()
raw_input("Continue?")
default('plotms')
vis = vis_name
xaxis = 'time'
yaxis = 'phase'
ydatacolumn = 'corrected'
selectdata = True
field = field_name
spw = str(spw_num)
correlation = corrstring
averagedata = True
avgchannel = str(nchan)
avgscan = False
transform = False
extendflag = False
iteraxis = 'scan'
coloraxis = 'antenna2'
plotrange = []
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotms()
raw_input("Continue?")
default('plotms')
vis = vis_name
xaxis = 'uvwave'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = field_name
spw = str(spw_num)
correlation = corrstring
averagedata = True
avgchannel = str(nchan)
avgtime = '60s'
avgscan = False
transform = False
extendflag = False
iteraxis = ''
coloraxis = 'antenna2'
plotrange = []
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotms()
raw_input("Continue?")
# Get the existing flag version names.
flag_folder = "{}.flagversions".format(vis_name)
tstamp = datetime.now().strftime("%Y%m%d-%H%M%S")
if not os.path.exists(flag_folder):
print("No flag versions exist. Using default flag name.")
versionname = "manual_flagging_1_{}".format(tstamp)
else:
flag_versions = glob(os.path.join(flag_folder, "flags.manual_flagging_*"))
if len(flag_versions) == 0:
versionname = "manual_flagging_1_{}".format(tstamp)
else:
num = len(flag_versions) + 1
versionname = "manual_flagging_{0}_{1}".format(num, tstamp)
# Save this new version of the flags
flagmanager(vis=vis_name, mode='save',
versionname=versionname)
|
|
from django.db.backends import BaseDatabaseOperations
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
super(DatabaseOperations, self).__init__(connection)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for Postgres:
(datefield + interval '3 days 200 seconds 5 microseconds')
"""
modifiers = []
if timedelta.days:
modifiers.append(u'%s days' % timedelta.days)
if timedelta.seconds:
modifiers.append(u'%s seconds' % timedelta.seconds)
if timedelta.microseconds:
modifiers.append(u'%s microseconds' % timedelta.microseconds)
mods = u' '.join(modifiers)
conn = u' %s ' % connector
return u'(%s)' % conn.join([sql, u'interval \'%s\'' % mods])
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith'):
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def field_cast_sql(self, db_type):
if db_type == 'inet':
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, sequences):
if tables:
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
# us to truncate tables referenced by a foreign key in any other
# table.
sql = ['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(', '.join([self.quote_name(table) for table in tables]))
)]
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
else:
return []
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
def prep_for_iexact_query(self, x):
return x
def check_aggregate_support(self, aggregate):
"""Check that the backend fully supports the provided aggregate.
The implementation of population statistics (STDDEV_POP and VAR_POP)
under Postgres 8.2 - 8.2.4 is known to be faulty. Raise
NotImplementedError if this is the database in use.
"""
if aggregate.sql_function in ('STDDEV_POP', 'VAR_POP'):
pg_version = self.connection.pg_version
if pg_version >= 80200 and pg_version <= 80204:
raise NotImplementedError('PostgreSQL 8.2 to 8.2.4 is known to have a faulty implementation of %s. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):
if fields:
return 'DISTINCT ON (%s)' % ', '.join(fields)
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
# http://initd.org/psycopg/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
return cursor.query
def return_insert_id(self):
return "RETURNING %s", ()
def bulk_insert_sql(self, fields, num_values):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values)
|
|
"""Deployment utilities for clld apps."""
# flake8: noqa
import time
import json
from getpass import getpass
import os
from datetime import datetime, timedelta
from importlib import import_module
import contextlib
from pytz import timezone, utc
from fabric.api import sudo, run, local, put, env, cd, task, execute, settings
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
from fabtools import require
from fabtools.files import upload_template
from fabtools.python import virtualenv
from fabtools import service
from fabtools import postgres
from clldutils.path import Path
from clld.scripts.util import data_file
# we prevent the tasks defined here from showing up in fab --list, because we only
# want the wrapped version imported from clldfabric.tasks to be listed.
__all__ = []
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
env.use_ssh_config = True
def get_input(prompt):
return raw_input(prompt)
@contextlib.contextmanager
def working_directory(path):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
"""
prev_cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(prev_cwd)
def upload_template_as_root(dest, template, context=None, mode=None, owner='root'):
if mode is not None:
mode = int(mode, 8)
upload_template(template, str(dest), context, use_jinja=True,
template_dir=TEMPLATE_DIR, use_sudo=True, backup=False,
mode=mode, chown=True, user=owner)
def create_file_as_root(path, content, **kw):
kw.setdefault('owner', 'root')
kw.setdefault('group', 'root')
require.files.file(str(path), contents=content, use_sudo=True, **kw)
def get_template_variables(app, monitor_mode=False, with_blog=False):
if monitor_mode and not os.environ.get('NEWRELIC_API_KEY'):
print('--> Warning: no newrelic api key found in environment') # pragma: no cover
res = dict(
app=app,
env=env,
newrelic_api_key=os.environ.get('NEWRELIC_API_KEY'),
gunicorn=app.bin('gunicorn_paster'),
newrelic=app.bin('newrelic-admin'),
monitor_mode=monitor_mode,
auth='',
bloghost='',
bloguser='',
blogpassword='')
if with_blog: # pragma: no cover
for key, default in [
('bloghost', 'blog.%s' % app.domain),
('bloguser', app.name),
('blogpassword', ''),
]:
res[key] = os.environ.get(('%s_%s' % (app.name, key)).upper(), '')
if not res[key]:
custom = get_input('Blog %s [%s]: ' % (key[4:], default))
res[key] = custom if custom else default
assert res['blogpassword']
return res
@task
def supervisor(app, command, template_variables=None):
"""
.. seealso: http://serverfault.com/a/479754
"""
template_variables = template_variables or get_template_variables(app)
template_variables['PAUSE'] = {'pause': True, 'run': False}[command]
upload_template_as_root(
app.supervisor, 'supervisor.conf', template_variables, mode='644')
if command == 'run':
sudo('supervisorctl reread')
sudo('supervisorctl update %s' % app.name)
sudo('supervisorctl restart %s' % app.name)
else:
sudo('supervisorctl stop %s' % app.name)
#sudo('supervisorctl reread %s' % app.name)
#sudo('supervisorctl update %s' % app.name)
time.sleep(1)
def require_bibutils(app): # pragma: no cover
"""
tar -xzvf bibutils_5.0_src.tgz -C /home/{app.name}
cd /home/{app.name}/bibutils_5.0
configure
make
sudo make install
"""
if not exists('/usr/local/bin/bib2xml'):
tgz = str(app.venv.joinpath('src', 'clld', 'tools', 'bibutils_5.0_src.tgz'))
sudo('tar -xzvf {tgz} -C {app.home}'.format(tgz=tgz, app=app))
with cd(str(app.home.joinpath('bibutils_5.0'))):
sudo('./configure')
sudo('make')
sudo('make install')
@task
def uninstall(app): # pragma: no cover
for file_ in [app.supervisor, app.nginx_location, app.nginx_site]:
file_ = str(file_)
if exists(file_):
sudo('rm %s' % file_)
service.reload('nginx')
sudo('supervisorctl stop %s' % app.name)
@task
def maintenance(app, hours=2, template_variables=None):
"""turn maintenance mode on|off
"""
template_variables = template_variables or get_template_variables(app)
ts = utc.localize(datetime.utcnow() + timedelta(hours=hours))
ts = ts.astimezone(timezone('Europe/Berlin')).strftime('%Y-%m-%d %H:%M %Z%z')
template_variables['timestamp'] = ts
require.files.directory(str(app.www), use_sudo=True)
upload_template_as_root(
app.www.joinpath('503.html'), '503.html', template_variables)
def http_auth(app):
pwds = {
app.name: getpass(prompt='HTTP Basic Auth password for user %s: ' % app.name),
'admin': ''}
while not pwds['admin']:
pwds['admin'] = getpass(prompt='HTTP Basic Auth password for user admin: ')
for i, pair in enumerate([(n, p) for n, p in pwds.items() if p]):
opts = 'bd'
if i == 0:
opts += 'c'
sudo('htpasswd -%s %s %s %s' % (opts, app.nginx_htpasswd, pair[0], pair[1]))
return bool(pwds[app.name]), """\
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
auth_basic "%s";
auth_basic_user_file %s;""" % (app.name, app.nginx_htpasswd)
@task
def copy_files(app):
data_dir = data_file(import_module(app.name))
tarball = '/tmp/%s-files.tgz' % app.name
local('tar -C %s -czf %s files' % (data_dir, tarball))
require.files.file(tarball, source=tarball)
if os.path.exists(tarball):
os.remove(tarball) # pragma: no cover
with cd('/tmp'):
tarfile = tarball.split('/')[2]
sudo('tar -xzf %s' % tarfile)
target = app.www.joinpath('files')
if exists(target):
sudo('cp -ru files/* %s' % target)
sudo('rm -rf files')
else:
sudo('mv files %s' % app.www) # pragma: no cover
sudo('chown -R root:root %s' % target)
sudo('rm %s' % tarfile)
sudo('tree %s' % app.www)
@task
def copy_rdfdump(app):
execute(copy_downloads(app, pattern='*.n3.gz'))
@task
def copy_downloads(app, pattern='*'):
dl_dir = app.src.joinpath(app.name, 'static', 'download')
require.files.directory(dl_dir, use_sudo=True, mode="777")
local_dl_dir = Path(import_module(app.name).__file__).parent.joinpath('static', 'download')
for f in local_dl_dir.glob(pattern):
target = dl_dir.joinpath(f.name)
create_file_as_root(target, open(f.as_posix()).read())
sudo('chown %s:%s %s' % (app.name, app.name, target))
require.files.directory(dl_dir, use_sudo=True, mode="755")
def init_pg_collkey(app):
require.files.file(
'/tmp/collkey_icu.sql',
source=os.path.join(
os.path.dirname(__file__), 'pg_collkey-v0.5', 'collkey_icu.sql'))
sudo('sudo -u postgres psql -f /tmp/collkey_icu.sql -d {0.name}'.format(app))
@task
def deploy(app, environment, with_alembic=False, with_blog=False, with_files=True):
with settings(warn_only=True):
lsb_release = run('lsb_release -a')
for codename in ['trusty', 'precise']:
if codename in lsb_release:
lsb_release = codename
break
else:
if lsb_release != '{"status": "ok"}':
# if this were the case, we'd be in a test!
raise ValueError('unsupported platform: %s' % lsb_release)
if environment == 'test' and app.workers > 3:
app.workers = 3
template_variables = get_template_variables(
app,
monitor_mode='true' if environment == 'production' else 'false',
with_blog=with_blog)
require.users.user(app.name, shell='/bin/bash')
require.postfix.server(env['host'])
require.postgres.server()
require.deb.packages(app.require_deb)
require.postgres.user(app.name, app.name)
require.postgres.database(app.name, app.name)
require.files.directory(str(app.venv), use_sudo=True)
if getattr(app, 'pg_unaccent', False):
require.deb.packages(['postgresql-contrib'])
sudo('sudo -u postgres psql -c "{0}" -d {1.name}'.format(
'CREATE EXTENSION IF NOT EXISTS unaccent WITH SCHEMA public;',
app))
with_pg_collkey = getattr(app, 'pg_collkey', False)
if with_pg_collkey:
pg_version = '9.1' if lsb_release == 'precise' else '9.3'
if not exists('/usr/lib/postgresql/%s/lib/collkey_icu.so' % pg_version):
require.deb.packages(['postgresql-server-dev-%s' % pg_version, 'libicu-dev'])
upload_template_as_root(
'/tmp/Makefile', 'pg_collkey_Makefile', dict(pg_version=pg_version))
require.files.file(
'/tmp/collkey_icu.c',
source=os.path.join(
os.path.dirname(__file__), 'pg_collkey-v0.5', 'collkey_icu.c'))
with cd('/tmp'):
sudo('make')
sudo('make install')
init_pg_collkey(app)
if lsb_release == 'precise':
require.deb.package('python-dev')
require.python.virtualenv(str(app.venv), use_sudo=True)
else:
require.deb.package('python3-dev')
require.deb.package('python-virtualenv')
if not exists(str(app.venv.joinpath('bin'))):
sudo('virtualenv -q --python=python3 %s' % app.venv)
require.files.directory(str(app.logs), use_sudo=True)
if app.pages and not exists(str(app.pages)):
with cd(str(app.home)):
sudo('sudo -u {0} git clone https://github.com/clld/{0}-pages.git'.format(app.name))
with virtualenv(str(app.venv)):
require.python.pip('6.0.6')
sp = env['sudo_prefix']
env['sudo_prefix'] += ' -H' # set HOME for pip log/cache
require.python.packages(app.require_pip, use_sudo=True)
for name in [app.name] + getattr(app, 'dependencies', []):
pkg = '-e git+git://github.com/clld/%s.git#egg=%s' % (name, name)
require.python.package(pkg, use_sudo=True)
env['sudo_prefix'] = sp
sudo('webassets -m %s.assets build' % app.name)
res = sudo('python -c "import clld; print(clld.__file__)"')
assert res.startswith('/usr/venvs') and '__init__.py' in res
template_variables['clld_dir'] = '/'.join(res.split('/')[:-1])
require_bibutils(app)
#
# configure nginx:
#
require.files.directory(
os.path.dirname(str(app.nginx_location)),
owner='root', group='root', use_sudo=True)
restricted, auth = http_auth(app)
if restricted:
template_variables['auth'] = auth
template_variables['admin_auth'] = auth
if environment == 'test':
upload_template_as_root('/etc/nginx/sites-available/default', 'nginx-default.conf')
template_variables['SITE'] = False
upload_template_as_root(
app.nginx_location, 'nginx-app.conf', template_variables)
elif environment == 'production':
template_variables['SITE'] = True
upload_template_as_root(app.nginx_site, 'nginx-app.conf', template_variables)
upload_template_as_root(
'/etc/logrotate.d/{0}'.format(app.name), 'logrotate.conf', template_variables)
maintenance(app, hours=app.deploy_duration, template_variables=template_variables)
service.reload('nginx')
#
# TODO: replace with initialization of db from data repos!
#
if with_files:
if confirm('Copy files?', default=False):
execute(copy_files, app)
if not with_alembic and confirm('Recreate database?', default=False):
db_name = get_input('from db [{0.name}]: '.format(app))
local('pg_dump -x -O -f /tmp/{0.name}.sql {1}'.format(app, db_name or app.name))
local('gzip -f /tmp/{0.name}.sql'.format(app))
require.files.file(
'/tmp/{0.name}.sql.gz'.format(app),
source="/tmp/{0.name}.sql.gz".format(app))
sudo('gunzip -f /tmp/{0.name}.sql.gz'.format(app))
supervisor(app, 'pause', template_variables)
if postgres.database_exists(app.name):
with cd('/var/lib/postgresql'):
sudo('sudo -u postgres dropdb %s' % app.name)
require.postgres.database(app.name, app.name)
if with_pg_collkey:
init_pg_collkey(app)
sudo('sudo -u {0.name} psql -f /tmp/{0.name}.sql -d {0.name}'.format(app))
else:
if exists(app.src.joinpath('alembic.ini')):
if confirm('Upgrade database?', default=False):
# Note: stopping the app is not strictly necessary, because the alembic
# revisions run in separate transactions!
supervisor(app, 'pause', template_variables)
with virtualenv(str(app.venv)):
with cd(str(app.src)):
sudo('sudo -u {0.name} {1} -n production upgrade head'.format(
app, app.bin('alembic')))
if confirm('Vacuum database?', default=False):
if confirm('VACUUM FULL?', default=False):
sudo('sudo -u postgres vacuumdb -f -z -d %s' % app.name)
else:
sudo('sudo -u postgres vacuumdb -z -d %s' % app.name)
template_variables['TEST'] = {'test': True, 'production': False}[environment]
# We only set add a setting clld.files, if the corresponding directory exists;
# otherwise the app would throw an error on startup.
template_variables['files'] = False
if exists(app.www.joinpath('files')):
template_variables['files'] = app.www.joinpath('files')
upload_template_as_root(app.config, 'config.ini', template_variables)
upload_template_as_root(app.newrelic_config, 'newrelic.ini', template_variables)
supervisor(app, 'run', template_variables)
time.sleep(5)
res = run('curl http://localhost:%s/_ping' % app.port)
assert json.loads(res)['status'] == 'ok'
@task
def pipfreeze(app, environment):
with virtualenv(app.venv):
with open('requirements.txt', 'w') as fp:
for line in sudo('pip freeze').splitlines():
if '%s.git' % app.name in line:
continue
if line.split('==')[0].lower() in ['fabric', 'pyx', 'fabtools', 'paramiko', 'pycrypto', 'babel']:
continue
if 'clld.git' in line:
line = 'clld'
if 'clldmpg.git' in line:
line = 'clldmpg'
fp.write(line+ '\n')
@task
def run_script(app, script_name, *args): # pragma: no cover
with cd(str(app.home)):
sudo(
'%s %s %s#%s %s' % (
app.bin('python'),
app.src.joinpath(app.name, 'scripts', '%s.py' % script_name),
os.path.basename(str(app.config)),
app.name,
' '.join('%s' % arg for arg in args),
),
user=app.name)
@task
def create_downloads(app): # pragma: no cover
dl_dir = app.src.joinpath(app.name, 'static', 'download')
require.files.directory(dl_dir, use_sudo=True, mode="777")
# run the script to create the exports from the database as glottolog3 user
run_script(app, 'create_downloads')
require.files.directory(dl_dir, use_sudo=True, mode="755")
def bootstrap(nr='y'): # pragma: no cover
for pkg in 'vim tree nginx open-vm-tools'.split():
require.deb.package(pkg)
sudo('/etc/init.d/nginx start')
if nr == 'y':
for cmd in [
'wget -O /etc/apt/sources.list.d/newrelic.list http://download.newrelic.com/debian/newrelic.list',
'apt-key adv --keyserver hkp://subkeys.pgp.net --recv-keys 548C16BF',
'apt-get update',
'apt-get install newrelic-sysmond',
'nrsysmond-config --set license_key=%s' % os.environ['NEWRELIC_API_KEY'],
'/etc/init.d/newrelic-sysmond start',
]:
sudo(cmd)
|
|
# -*- coding: utf-8 -*-
"""
Parse pykit IR in the form of C.
"""
from __future__ import print_function, division, absolute_import
from io import StringIO
from os.path import dirname, abspath, join
import tempfile
import json
import tokenize
from functools import partial
from collections import defaultdict, namedtuple
from pykit import types
from pykit.ir import defs, Module, Function, Builder, Const, GlobalValue, ops
from pykit.deps.pycparser import preprocess_file, c_ast, CParser
root = dirname(abspath(__file__))
ir_root = join(dirname(root), 'ir')
#===------------------------------------------------------------------===
# Metadata and comment preprocessing
#===------------------------------------------------------------------===
Token = namedtuple('Token', ['toknum', 'tokval', 'beginpos', 'endpos'])
def parse_metadata(metadata):
"""Parse metadata (a JSON dict) as a string"""
return json.loads(metadata)
def preprocess(source):
"""
Preprocess source and return metadata. I wish CParser would accept a
modified CLexer...
Finds metadata between '/*:' and ':*/' lines
c = (int) add(a, b); /*: { sideeffects: false } :*/
"""
metadata = {} # { lineno : dict of metadata }
tokens = []
# TODO: Python 3 doesn't accept a 'tokeneater' argument
def eat(toknum, tokval, beginpos, endpos, rest):
tokens.append(Token(toknum, tokval, beginpos, endpos))
def error(tok, msg):
raise SyntaxError("%d:%s: %s" % (tok.beginpos + (msg,)))
tokenize.tokenize(StringIO(unicode(source)).readline, tokeneater=eat)
tokval = lambda t: t.tokval
lines = [""] + source.splitlines()
i = 0
while i < len(tokens):
# Locate start of comment
if "".join(map(tokval, tokens[i:i+3])) == "/*:":
for j in xrange(i+3, len(tokens)):
# Locate end of comment
if "".join(map(tokval, tokens[j:j+3])) == ":*/":
lineno = tokens[j].beginpos[0]
if lineno != tokens[i].beginpos[0]:
raise error(tokens[i], "Metadata must be on a single line")
# Cut out string and parse
start, end = tokens[i+3].beginpos[1], tokens[j].beginpos[1]
metadata[lineno] = parse_metadata(lines[lineno][start:end])
i = j + 3
break
else:
raise error(tokens[i], "Metadata not terminated")
i = i + 1
return metadata
#===------------------------------------------------------------------===
# Parsing
#===------------------------------------------------------------------===
type_env = {
"Type": types.Type,
"_list": list,
"void": types.Void,
"int": types.Int,
"long": types.Long,
"long long": types.LongLong,
"float": types.Float32,
"double": types.Float64,
"string": types.Bytes,
}
binary_defs = dict(defs.binary_defs, **defs.compare_defs)
def error(node, msg):
raise SyntaxError("%s:%d: %s" % (node.coord.file, node.coord.line, msg))
class PykitIRVisitor(c_ast.NodeVisitor):
"""
Map pykit IR in the form of polymorphic C to in-memory pykit IR.
int function(float x) {
int i = 0; /* I am a comment */
while (i < 10) { /*: { "unroll": true } :*/
x = call_external("sqrt", x * x);
}
return (int) x;
}
Attributes:
"""
in_function = False
def __init__(self, type_env=None):
self.mod = Module()
self.type_env = type_env or {}
self.func = None
self.builder = None
self.local_vars = None
self.allocas = None
self.global_vars = {}
self.functions = {}
# ______________________________________________________________________
@property
def vars(self):
if self.in_function:
return self.local_vars
else:
return self.global_vars
def enter_func(self):
self.in_function = True
self.local_vars = {}
self.allocas = {}
def leave_func(self):
self.in_function = False
self.mod.add_function(self.func)
self.local_vars = None
self.allocas = None
self.func = None
def visit(self, node, type=None):
"""
Visit a node.
:type: Whether we have a type for this opcode, which is an LHS type
or a cast. E.g.:
(Int) call(...) // cast
result = call(...) // assmnt, assuming 'result' is declared
result = call(..., call(...)) // second 'call' isn't typed
"""
self.type = type
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
# if visitor is None:
# raise SyntaxError(
# "Node %s not supported in %s:%s" % (node, node.coord.file,
# node.coord.line))
return visitor(node)
def visitif(self, node):
if node:
return self.visit(node)
def visits(self, node):
return list(map(self.visit, node))
# ______________________________________________________________________
def alloca(self, varname):
if varname not in self.allocas:
# Allocate variable with alloca
with self.builder.at_front(self.func.startblock):
type = types.Pointer(self.local_vars[varname])
result = self.func.temp(varname)
self.allocas[varname] = self.builder.alloca(type, [], result)
return self.allocas[varname]
def assignvar(self, varname, rhs):
self.builder.store(rhs, self.alloca(varname))
def assign(self, varname, rhs):
if self.in_function:
# Local variable
type = self.local_vars[varname]
self.assignvar(varname, self.visit(rhs, type=type))
else:
# Global variable
type = self.global_vars[varname]
self.mod.add_global(GlobalValue(varname, type=self.type,
value=self.visit(rhs, type=type)))
# ______________________________________________________________________
def visit_Decl(self, decl):
if decl.name in self.vars:
error(decl, "Var '%s' already declared!" % (decl.name,))
type = self.visit(decl.type)
self.vars[decl.name] = type
if decl.init:
self.assign(decl.name, decl.init)
elif not self.in_function:
extern = decl.storage == 'external'
self.mod.add_global(GlobalValue(decl.name, type, external=extern))
return type
def visit_TypeDecl(self, decl):
return self.visit(decl.type)
visit_Typename = visit_TypeDecl
def visit_PtrDecl(self, decl):
return types.Pointer(self.visit(decl.type.type))
def visit_FuncDecl(self, decl):
if decl.args:
params = self.visits(decl.args.params)
else:
params = []
return types.Function(self.visit(decl.type), params)
def visit_IdentifierType(self, node):
name, = node.names
return self.type_env[name]
def visit_Typedef(self, node):
if node.name in ("Type", "_list"):
type = self.type_env[node.name]
else:
type = self.visit(node.type)
if type == types.Type:
type = getattr(types, node.name)
self.type_env[node.name] = type
return type
def visit_Template(self, node):
left = self.visit(node.left)
subtypes = self.visits(node.right)
if left is list:
return list(subtypes)
else:
assert issubclass(left, types.Type)
subtypes = self.visits(node.right)
return left(*subtypes)
# ______________________________________________________________________
def visit_FuncDef(self, node):
assert not node.param_decls
self.enter_func()
name = node.decl.name
type = self.visit(node.decl.type)
if node.decl.type.args:
argnames = [p.name or "" for p in node.decl.type.args.params]
else:
argnames = []
self.func = Function(name, argnames, type)
self.func.new_block('entry')
self.builder = Builder(self.func)
self.builder.position_at_end(self.func.startblock)
# Store arguments in stack variables
for argname in argnames:
self.assignvar(argname, self.func.get_arg(argname))
self.generic_visit(node.body)
self.leave_func()
# ______________________________________________________________________
def visit_FuncCall(self, node):
type = self.type
opcode = node.name.name
args = self.visits(node.args.exprs) if node.args else []
if opcode == "list":
return args
elif not type and not ops.is_void(opcode):
error(node, "Expected a type for sub-expression "
"(add a cast or assignment)")
elif not hasattr(self.builder, opcode):
if opcode in self.mod.functions:
return self.builder.call(type, [self.mod.get_function(opcode),
args])
error(node, "No opcode %s" % (opcode,))
buildop = getattr(self.builder, opcode)
if ops.is_void(opcode):
return buildop(*args)
else:
return buildop(type or "Unset", args)
def visit_ID(self, node):
if self.in_function:
if node.name in self.local_vars:
result = self.alloca(node.name)
return self.builder.load(result.type.base, [result])
global_val = (self.mod.get_function(node.name) or
self.mod.get_global(node.name))
if not global_val:
error(node, "Not a local or global: %r" % node.name)
return global_val
def visit_Cast(self, node):
type = self.visit(node.to_type)
if isinstance(node.expr, c_ast.FuncCall):
op = self.visit(node.expr, type=type)
op.type = type
return op
else:
result = self.visit(node.expr)
if result.type == type:
return result
return self.builder.convert(type, [result])
def visit_Assignment(self, node):
if node.op != '=':
error(node, "Only assignment with '=' is supported")
if not isinstance(node.lvalue, c_ast.ID):
error(node, "Canot only assign to a name")
self.assign(node.lvalue.name, node.rvalue)
def visit_Constant(self, node):
type = self.type_env[node.type]
const = types.convert(node.value, types.resolve_typedef(type))
if isinstance(const, basestring):
const = const[1:-1] # slice away quotes
return Const(const)
def visit_UnaryOp(self, node):
op = defs.unary_defs[node.op]
buildop = getattr(self.builder, op)
arg = self.visit(node.expr)
type = self.type or arg.type
return buildop(type, [arg])
def visit_BinaryOp(self, node):
op = binary_defs[node.op]
buildop = getattr(self.builder, op)
left, right = self.visits([node.left, node.right])
type = self.type
if not type:
l, r = map(types.resolve_typedef, [left.type, right.type])
assert l == r, (l, r)
if node.op in defs.compare_defs:
type = types.Bool
return buildop(type or left.type, [left, right])
def visit_If(self, node):
cond = self.visit(node.cond)
ifpos, elsepos, exit_block = self.builder.ifelse(cond)
with ifpos:
self.visit(node.iftrue)
self.builder.jump(exit_block)
with elsepos:
if node.iffalse:
self.visit(node.iffalse)
self.builder.jump(exit_block)
self.builder.position_at_end(exit_block)
def _loop(self, init, cond, next, body):
_, exit_block = self.builder.splitblock(self.func.temp("exit"))
_, body_block = self.builder.splitblock(self.func.temp("body"))
_, cond_block = self.builder.splitblock(self.func.temp("cond"))
self.visitif(init)
self.builder.jump(cond_block)
with self.builder.at_front(cond_block):
cond = self.visit(cond, type=types.Bool)
self.builder.cbranch(cond, body_block, exit_block)
with self.builder.at_front(body_block):
self.visit(body)
self.visitif(next)
bb = self.builder.basic_block
if not bb.tail or not ops.is_terminator(bb.tail.opcode):
self.builder.jump(cond_block)
self.builder.position_at_end(exit_block)
def visit_While(self, node):
self._loop(None, node.cond, None, node.stmt)
def visit_For(self, node):
# avoid silly 2to3 rewrite to 'node.__next__'
next = getattr(node, 'next')
self._loop(node.init, node.cond, next, node.stmt)
def visit_Return(self, node):
b = self.builder
value = self.visit(node.expr)
t = self.func.temp
b.ret(b.convert(self.func.type.restype, [value]))
debug_args = dict(lex_optimize=False, yacc_optimize=False, yacc_debug=True)
def parse(source, filename):
return CParser().parse(source, filename)
def from_c(source, filename="<string>"):
# TODO: process metadata
# metadata = preprocess(source)
# Preprocess...
f = tempfile.NamedTemporaryFile('w+t')
try:
f.write(source)
f.flush()
source = preprocess_file(f.name, cpp_args=['-I' + ir_root])
finally:
f.close()
# Parse
ast = parse(source, filename)
# ast.show()
visitor = PykitIRVisitor(dict(type_env))
visitor.visit(ast)
return visitor.mod
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""luigi bindings for Google Cloud Storage"""
import io
import logging
import mimetypes
import os
import tempfile
import time
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
import luigi.target
from luigi import six
from luigi.six.moves import xrange
logger = logging.getLogger('luigi-interface')
try:
import httplib2
import oauth2client.client
from googleapiclient import errors
from googleapiclient import discovery
from googleapiclient import http
except ImportError:
logger.warning("Loading GCS module without the python packages googleapiclient & oauth2client. \
This will crash at runtime if GCS functionality is used.")
else:
# Retry transport and file IO errors.
RETRYABLE_ERRORS = (httplib2.HttpLib2Error, IOError)
# Number of times to retry failed downloads.
NUM_RETRIES = 5
# Number of bytes to send/receive in each request.
CHUNKSIZE = 10 * 1024 * 1024
# Mimetype to use if one can't be guessed from the file extension.
DEFAULT_MIMETYPE = 'application/octet-stream'
# Time to sleep while waiting for eventual consistency to finish.
EVENTUAL_CONSISTENCY_SLEEP_INTERVAL = 0.1
# Maximum number of sleeps for eventual consistency.
EVENTUAL_CONSISTENCY_MAX_SLEEPS = 300
def _wait_for_consistency(checker):
"""Eventual consistency: wait until GCS reports something is true.
This is necessary for e.g. create/delete where the operation might return,
but won't be reflected for a bit.
"""
for _ in xrange(EVENTUAL_CONSISTENCY_MAX_SLEEPS):
if checker():
return
time.sleep(EVENTUAL_CONSISTENCY_SLEEP_INTERVAL)
logger.warning('Exceeded wait for eventual GCS consistency - this may be a'
'bug in the library or something is terribly wrong.')
class InvalidDeleteException(luigi.target.FileSystemException):
pass
class GCSClient(luigi.target.FileSystem):
"""An implementation of a FileSystem over Google Cloud Storage.
There are several ways to use this class. By default it will use the app
default credentials, as described at https://developers.google.com/identity/protocols/application-default-credentials .
Alternatively, you may pass an oauth2client credentials object. e.g. to use a service account::
credentials = oauth2client.client.SignedJwtAssertionCredentials(
'012345678912-ThisIsARandomServiceAccountEmail@developer.gserviceaccount.com',
'These are the contents of the p12 file that came with the service account',
scope='https://www.googleapis.com/auth/devstorage.read_write')
client = GCSClient(oauth_credentials=credentails)
The chunksize parameter specifies how much data to transfer when downloading
or uploading files.
.. warning::
By default this class will use "automated service discovery" which will require
a connection to the web. The google api client downloads a JSON file to "create" the
library interface on the fly. If you want a more hermetic build, you can pass the
contents of this file (currently found at https://www.googleapis.com/discovery/v1/apis/storage/v1/rest )
as the ``descriptor`` argument.
"""
def __init__(self, oauth_credentials=None, descriptor='', http_=None,
chunksize=CHUNKSIZE):
self.chunksize = chunksize
http_ = http_ or httplib2.Http()
if not oauth_credentials:
oauth_credentials = oauth2client.client.GoogleCredentials.get_application_default()
if descriptor:
self.client = discovery.build_from_document(descriptor, credentials=oauth_credentials, http=http_)
else:
self.client = discovery.build('storage', 'v1', credentials=oauth_credentials, http=http_)
def _path_to_bucket_and_key(self, path):
(scheme, netloc, path, _, _) = urlsplit(path)
assert scheme == 'gs'
path_without_initial_slash = path[1:]
return netloc, path_without_initial_slash
def _is_root(self, key):
return len(key) == 0 or key == '/'
def _add_path_delimiter(self, key):
return key if key[-1:] == '/' else key + '/'
def _obj_exists(self, bucket, obj):
try:
self.client.objects().get(bucket=bucket, object=obj).execute()
except errors.HttpError as ex:
if ex.resp['status'] == '404':
return False
raise
else:
return True
def _list_iter(self, bucket, prefix):
request = self.client.objects().list(bucket=bucket, prefix=prefix)
response = request.execute()
while response is not None:
for it in response.get('items', []):
yield it
request = self.client.objects().list_next(request, response)
if request is None:
break
response = request.execute()
def _do_put(self, media, dest_path):
bucket, obj = self._path_to_bucket_and_key(dest_path)
request = self.client.objects().insert(bucket=bucket, name=obj, media_body=media)
if not media.resumable():
return request.execute()
response = None
attempts = 0
while response is None:
error = None
try:
status, response = request.next_chunk()
if status:
logger.debug('Upload progress: %.2f%%', 100 * status.progress())
except errors.HttpError as err:
error = err
if err.resp.status < 500:
raise
logger.warning('Caught error while uploading', exc_info=True)
except RETRYABLE_ERRORS as err:
logger.warning('Caught error while uploading', exc_info=True)
error = err
if error:
attempts += 1
if attempts >= NUM_RETRIES:
raise error
else:
attempts = 0
_wait_for_consistency(lambda: self._obj_exists(bucket, obj))
return response
def exists(self, path):
bucket, obj = self._path_to_bucket_and_key(path)
if self._obj_exists(bucket, obj):
return True
return self.isdir(path)
def isdir(self, path):
bucket, obj = self._path_to_bucket_and_key(path)
if self._is_root(obj):
try:
self.client.buckets().get(bucket=bucket).execute()
except errors.HttpError as ex:
if ex.resp['status'] == '404':
return False
raise
obj = self._add_path_delimiter(obj)
if self._obj_exists(bucket, obj):
return True
# Any objects with this prefix
resp = self.client.objects().list(bucket=bucket, prefix=obj, maxResults=20).execute()
lst = next(iter(resp.get('items', [])), None)
return bool(lst)
def remove(self, path, recursive=True):
(bucket, obj) = self._path_to_bucket_and_key(path)
if self._is_root(obj):
raise InvalidDeleteException(
'Cannot delete root of bucket at path {}'.format(path))
if self._obj_exists(bucket, obj):
self.client.objects().delete(bucket=bucket, object=obj).execute()
_wait_for_consistency(lambda: not self._obj_exists(bucket, obj))
return True
if self.isdir(path):
if not recursive:
raise InvalidDeleteException(
'Path {} is a directory. Must use recursive delete'.format(path))
req = http.BatchHttpRequest()
for it in self._list_iter(bucket, self._add_path_delimiter(obj)):
req.add(self.client.objects().delete(bucket=bucket, object=it['name']))
req.execute()
_wait_for_consistency(lambda: not self.isdir(path))
return True
return False
def put(self, filename, dest_path, mimetype=None, chunksize=None):
chunksize = chunksize or self.chunksize
resumable = os.path.getsize(filename) > 0
mimetype = mimetype or mimetypes.guess_type(dest_path)[0] or DEFAULT_MIMETYPE
media = http.MediaFileUpload(filename, mimetype, chunksize=chunksize, resumable=resumable)
self._do_put(media, dest_path)
def put_string(self, contents, dest_path, mimetype=None):
mimetype = mimetype or mimetypes.guess_type(dest_path)[0] or DEFAULT_MIMETYPE
assert isinstance(mimetype, six.string_types)
if not isinstance(contents, six.binary_type):
contents = contents.encode("utf-8")
media = http.MediaIoBaseUpload(six.BytesIO(contents), mimetype, resumable=bool(contents))
self._do_put(media, dest_path)
def mkdir(self, path, parents=True, raise_if_exists=False):
if self.exists(path):
if raise_if_exists:
raise luigi.target.FileAlreadyExists()
elif not self.isdir(path):
raise luigi.target.NotADirectory()
else:
return
self.put_string(b"", self._add_path_delimiter(path), mimetype='text/plain')
def copy(self, source_path, destination_path):
src_bucket, src_obj = self._path_to_bucket_and_key(source_path)
dest_bucket, dest_obj = self._path_to_bucket_and_key(destination_path)
if self.isdir(source_path):
src_prefix = self._add_path_delimiter(src_obj)
dest_prefix = self._add_path_delimiter(dest_obj)
source_path = self._add_path_delimiter(source_path)
copied_objs = []
for obj in self.listdir(source_path):
suffix = obj[len(source_path):]
self.client.objects().copy(
sourceBucket=src_bucket,
sourceObject=src_prefix + suffix,
destinationBucket=dest_bucket,
destinationObject=dest_prefix + suffix,
body={}).execute()
copied_objs.append(dest_prefix + suffix)
_wait_for_consistency(
lambda: all(self._obj_exists(dest_bucket, obj)
for obj in copied_objs))
else:
self.client.objects().copy(
sourceBucket=src_bucket,
sourceObject=src_obj,
destinationBucket=dest_bucket,
destinationObject=dest_obj,
body={}).execute()
_wait_for_consistency(lambda: self._obj_exists(dest_bucket, dest_obj))
def rename(self, source_path, destination_path):
"""
Rename/move an object from one S3 location to another.
"""
self.copy(source_path, destination_path)
self.remove(source_path)
def listdir(self, path):
"""
Get an iterable with S3 folder contents.
Iterable contains paths relative to queried path.
"""
bucket, obj = self._path_to_bucket_and_key(path)
obj_prefix = self._add_path_delimiter(obj)
if self._is_root(obj_prefix):
obj_prefix = ''
obj_prefix_len = len(obj_prefix)
for it in self._list_iter(bucket, obj_prefix):
yield self._add_path_delimiter(path) + it['name'][obj_prefix_len:]
def download(self, path, chunksize=None):
chunksize = chunksize or self.chunksize
bucket, obj = self._path_to_bucket_and_key(path)
with tempfile.NamedTemporaryFile(delete=False) as fp:
# We can't return the tempfile reference because of a bug in python: http://bugs.python.org/issue18879
return_fp = _DeleteOnCloseFile(fp.name, 'r')
# Special case empty files because chunk-based downloading doesn't work.
result = self.client.objects().get(bucket=bucket, object=obj).execute()
if int(result['size']) == 0:
return return_fp
request = self.client.objects().get_media(bucket=bucket, object=obj)
downloader = http.MediaIoBaseDownload(fp, request, chunksize=chunksize)
attempts = 0
done = False
while not done:
error = None
try:
_, done = downloader.next_chunk()
except errors.HttpError as err:
error = err
if err.resp.status < 500:
raise
logger.warning('Error downloading file, retrying', exc_info=True)
except RETRYABLE_ERRORS as err:
logger.warning('Error downloading file, retrying', exc_info=True)
error = err
if error:
attempts += 1
if attempts >= NUM_RETRIES:
raise error
else:
attempts = 0
return return_fp
class _DeleteOnCloseFile(io.FileIO):
def close(self):
super(_DeleteOnCloseFile, self).close()
os.remove(self.name)
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return True
class AtomicGCSFile(luigi.target.AtomicLocalFile):
"""
A GCS file that writes to a temp file and put to GCS on close.
"""
def __init__(self, path, gcs_client):
self.gcs_client = gcs_client
super(AtomicGCSFile, self).__init__(path)
def move_to_final_destination(self):
self.gcs_client.put(self.tmp_path, self.path)
class GCSTarget(luigi.target.FileSystemTarget):
fs = None
def __init__(self, path, format=None, client=None):
super(GCSTarget, self).__init__(path)
if format is None:
format = luigi.format.get_default_format()
self.format = format
self.fs = client or GCSClient()
def open(self, mode='r'):
if mode == 'r':
return self.format.pipe_reader(self.fs.download(self.path))
elif mode == 'w':
return self.format.pipe_writer(AtomicGCSFile(self.path, self.fs))
else:
raise ValueError("Unsupported open mode '{}'".format(mode))
class GCSFlagTarget(GCSTarget):
"""
Defines a target directory with a flag-file (defaults to `_SUCCESS`) used
to signify job success.
This checks for two things:
* the path exists (just like the GCSTarget)
* the _SUCCESS file exists within the directory.
Because Hadoop outputs into a directory and not a single file,
the path is assumed to be a directory.
This is meant to be a handy alternative to AtomicGCSFile.
The AtomicFile approach can be burdensome for GCS since there are no directories, per se.
If we have 1,000,000 output files, then we have to rename 1,000,000 objects.
"""
fs = None
def __init__(self, path, format=None, client=None, flag='_SUCCESS'):
"""
Initializes a S3FlagTarget.
:param path: the directory where the files are stored.
:type path: str
:param client:
:type client:
:param flag:
:type flag: str
"""
if format is None:
format = luigi.format.get_default_format()
if path[-1] != "/":
raise ValueError("S3FlagTarget requires the path to be to a "
"directory. It must end with a slash ( / ).")
super(GCSFlagTarget, self).__init__(path)
self.format = format
self.fs = client or GCSClient()
self.flag = flag
def exists(self):
flag_target = self.path + self.flag
return self.fs.exists(flag_target)
|
|
import xlwt
import numpy as np
import colour
from ..configuration.base_configuration import Filter
from ..configuration.base_configuration import TimeOfDayFilter
from ..core.status import Status
class report:
bold_style = xlwt.easyxf('font: bold 1')
no_dp_style = xlwt.easyxf(num_format_str='0')
one_dp_style = xlwt.easyxf(num_format_str='0.0')
two_dp_style = xlwt.easyxf(num_format_str='0.00')
three_dp_style = xlwt.easyxf(num_format_str='0.000')
four_dp_style = xlwt.easyxf(num_format_str='0.0000')
percent_style = xlwt.easyxf(num_format_str='0.00%')
percent_no_dp_style = xlwt.easyxf(num_format_str='0%')
def __init__(self, windSpeedBins, turbulenceBins, version="unknown", report_power_curve = True):
self.version = version
self.windSpeedBins = windSpeedBins
self.turbulenceBins = turbulenceBins
self.report_power_curve = report_power_curve
def report(self, path, analysis):
book = xlwt.Workbook()
plotsDir = analysis.config.path.replace(".xml","_PPAnalysisPlots")
analysis.png_plots(plotsDir)
gradient = colour.ColourGradient(-0.1, 0.1, 0.01, book)
if self.report_power_curve:
sh = book.add_sheet("PowerCurves", cell_overwrite_ok=True)
settingsSheet = book.add_sheet("Settings", cell_overwrite_ok=True)
self.reportSettings(settingsSheet, analysis)
if self.report_power_curve:
rowsAfterCurves = []
#rowsAfterCurves.append(self.reportPowerCurve(sh, 0, 0, 'uniqueAnalysisId', analysis.specifiedPowerCurve, analysis)) #needs fixing + move to settings sheet
if analysis.specifiedPowerCurve is not None:
if len(analysis.specifiedPowerCurve.powerCurveLevels) != 0:
rowsAfterCurves.append( self.reportPowerCurve(sh, 1, 0, 'Specified', analysis.specifiedPowerCurve, analysis))
if analysis.hasActualPower:
#for name in analysis.residualWindSpeedMatrices:
# residualMatrix = analysis.residualWindSpeedMatrices[name]
#
# if residualMatrix != None:
# self.reportPowerDeviations(book, "ResidualWindSpeed-%s" % name, residualMatrix, gradient)
if analysis.hasShear and analysis.innerMeasuredPowerCurve != None:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 5, 'Inner', analysis.innerMeasuredPowerCurve, analysis) )
if analysis.innerTurbulenceMeasuredPowerCurve != None:
rowsAfterCurves.append( self.reportPowerCurve(sh, 1, 10, 'InnerTurbulence', analysis.innerTurbulenceMeasuredPowerCurve, analysis) )
if analysis.hasShear and analysis.outerMeasuredPowerCurve != None:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 15, 'Outer', analysis.outerMeasuredPowerCurve, analysis) )
rowsAfterCurves.append( self.reportPowerCurve(sh, 1, 20, 'All', analysis.allMeasuredPowerCurve, analysis) )
if analysis.turbRenormActive:
rowsAfterCurves.append(self.reportPowerCurve(sh, 1, 25, 'TurbulenceRenormalisedPower', analysis.allMeasuredTurbCorrectedPowerCurve, analysis) )
if analysis.specifiedPowerCurve is not None:
rowAfterCurves = max(rowsAfterCurves) + 5
sh.write(rowAfterCurves-2, 0, "Power Curves Interpolated to Specified Bins:", self.bold_style)
specifiedLevels = analysis.specifiedPowerCurve.powerCurveLevels.index
if analysis.hasShear and analysis.innerMeasuredPowerCurve != None:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 5, 'Inner', analysis.innerMeasuredPowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 10, 'InnerTurbulence', analysis.innerTurbulenceMeasuredPowerCurve, specifiedLevels)
if analysis.hasShear and analysis.outerMeasuredPowerCurve != None:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 15, 'Outer', analysis.outerMeasuredPowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 20, 'All', analysis.allMeasuredPowerCurve, specifiedLevels)
if analysis.turbRenormActive:
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, 25, 'TurbulenceRenormalisedPower', analysis.allMeasuredTurbCorrectedPowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, (30 if analysis.turbRenormActive else 25), 'DayTime', analysis.dayTimePowerCurve, specifiedLevels)
self.reportInterpolatedPowerCurve(sh, rowAfterCurves, (35 if analysis.turbRenormActive else 30), 'NightTime', analysis.nightTimePowerCurve, specifiedLevels)
self.reportPowerDeviations(book, "HubPowerDeviations", analysis.hubPowerDeviations, gradient)
#self.reportPowerDeviations(book, "HubPowerDeviationsInnerShear", analysis.hubPowerDeviationsInnerShear, gradient)
if analysis.rewsActive:
self.reportPowerDeviations(book, "REWSPowerDeviations", analysis.rewsPowerDeviations, gradient)
#self.reportPowerDeviationsDifference(book, "Hub-REWS-DevDiff", analysis.hubPowerDeviations, analysis.rewsPowerDeviations, gradient)
self.reportPowerDeviations(book, "REWS Deviation", analysis.rewsMatrix, gradient)
if analysis.hasShear: self.reportPowerDeviations(book, "REWS Deviation Inner Shear", analysis.rewsMatrixInnerShear, gradient)
if analysis.hasShear: self.reportPowerDeviations(book, "REWS Deviation Outer Shear", analysis.rewsMatrixOuterShear, gradient)
#self.reportPowerDeviations(book, "REWSPowerDeviationsInnerShear", analysis.rewsPowerDeviationsInnerShear, gradient)
if analysis.turbRenormActive:
self.reportPowerDeviations(book, "TurbPowerDeviations", analysis.turbPowerDeviations, gradient)
#self.reportPowerDeviationsDifference(book, "Hub-Turb-DevDiff", analysis.hubPowerDeviations, analysis.turbPowerDeviations, gradient)
#self.reportPowerDeviations(book, "TurbPowerDeviationsInnerShear", analysis.turbPowerDeviationsInnerShear, gradient)
if analysis.turbRenormActive and analysis.rewsActive:
self.reportPowerDeviations(book, "CombPowerDeviations", analysis.combPowerDeviations, gradient)
#self.reportPowerDeviationsDifference(book, "Hub-Comb-DevDiff", analysis.hubPowerDeviations, analysis.combPowerDeviations, gradient)
#self.reportPowerDeviations(book, "CombPowerDeviationsInnerShear", analysis.combPowerDeviationsInnerShear, gradient)
if analysis.powerDeviationMatrixActive:
self.reportPowerDeviations(book, "PowerDeviationMatrixDeviations", analysis.powerDeviationMatrixDeviations, gradient)
#self.reportPowerDeviationsDifference(book, "Hub-Turb-DevDiff", analysis.hubPowerDeviations, analysis.turbPowerDeviations, gradient)
#self.reportPowerDeviations(book, "TurbPowerDeviationsInnerShear", analysis.turbPowerDeviationsInnerShear, gradient)
if analysis.config.nominal_wind_speed_distribution.absolute_path is not None:
sh = book.add_sheet("EnergyAnalysis", cell_overwrite_ok=True)
self.report_aep(sh,analysis)
if len(analysis.calibrations) == 1:
calSheet = book.add_sheet("Calibration", cell_overwrite_ok=True)
self.reportCalibration(calSheet,analysis.calibrations[0],timeStepInSeconds = analysis.timeStepInSeconds)
elif len(analysis.calibrations) > 1:
i = 0
for cal in analysis.calibrations:
i += 1
calSheet = book.add_sheet("Calibration_%03d" % i, cell_overwrite_ok=True)
self.reportCalibration(calSheet,cal,timeStepInSeconds = analysis.timeStepInSeconds)
book.save(path)
def reportCalibration(self,sh,calibration,timeStepInSeconds = 600.):
conf, calib = calibration
sh.write(0, 0, "Dataset Name", self.bold_style)
sh.write(1, 0, conf.name)
startRow = 3
col = -14
if 'belowAbove' in calib.calibrationSectorDataframe.columns :
belowAbove = True
else:
belowAbove = False
col+=16
row=startRow
sh.write(row,col,conf.name, self.bold_style)
sh.write(row,col+1,"Method:"+conf.calibrationMethod, self.bold_style)
row += 1
sh.write(row,col,"Bin", self.bold_style)
sh.write(row,col+1,"Slope", self.bold_style)
sh.write(row,col+2,"Offset", self.bold_style)
if conf.calibrationMethod != 'Specified':
sh.write(row,col+3,"Count", self.bold_style)
sh.write(row,col+4,"Hours", self.bold_style)
if belowAbove:
sh.write(row,col+5,"Count <= 8m/s", self.bold_style)
sh.write(row,col+6,"Hours <= 8m/s", self.bold_style)
sh.write(row,col+7,"Count > 8m/s", self.bold_style)
sh.write(row,col+8,"Hours > 8m/s", self.bold_style)
sh.write(row,col+9,"Speedup at 10m/s", self.bold_style)
sh.write(row,col+10,"% Speedup at 10m/s", self.bold_style)
sh.write(row,col+11,"Filter (Total Hours > 24)", self.bold_style)
sh.write(row,col+12,"Filter (Hours Below/Above 8m/s > 6)", self.bold_style)
sh.write(row,col+13,"Filter (Speedup Change < 2%)", self.bold_style)
sh.write(row,col+14,"Valid Sector", self.bold_style)
row+=1
for key in sorted(calib.calibrationSectorDataframe.index):
sh.write(row,col,float(key), self.bold_style)
sh.write(row,col+1,calib.calibrationSectorDataframe['Slope'][key], self.four_dp_style)
sh.write(row,col+2,calib.calibrationSectorDataframe['Offset'][key], self.four_dp_style)
if conf.calibrationMethod != 'Specified':
if 'Count' in calib.calibrationSectorDataframe.columns:
sh.write(row,col+3,calib.calibrationSectorDataframe['Count'][key], self.no_dp_style)
sh.write(row,col+4,calib.calibrationSectorDataframe['Count'][key]*(timeStepInSeconds/3600.0), self.one_dp_style)
if belowAbove:
ba = calib.calibrationSectorDataframe.loc[key,'belowAbove']
sh.write(row,col+5,ba[0], self.no_dp_style)
sh.write(row,col+6,ba[0]*(timeStepInSeconds/3600.0), self.one_dp_style)
sh.write(row,col+7,ba[1], self.no_dp_style)
sh.write(row,col+8,ba[1]*(timeStepInSeconds/3600.0), self.one_dp_style)
sh.write(row,col+9,calib.calibrationSectorDataframe['SpeedUpAt10'][key], self.four_dp_style)
sh.write(row,col+10,(calib.calibrationSectorDataframe['SpeedUpAt10'][key]-1.0), self.percent_style)
totalHoursValid = calib.getTotalHoursValidity(key, timeStepInSeconds)
sh.write(row,col+11, "TRUE" if totalHoursValid else "FALSE")
if belowAbove:
belowAboveValid = calib.getBelowAboveValidity(key, timeStepInSeconds)
sh.write(row,col+12, "TRUE" if belowAboveValid else "FALSE")
speedUpChangeValid = calib.getSpeedUpChangeValidity(key)
sh.write(row,col+13, "TRUE" if speedUpChangeValid else "FALSE")
sectorValid = calib.getSectorValidity(key, timeStepInSeconds)
sh.write(row,col+14, "TRUE" if sectorValid else "FALSE", self.bold_style)
row += 1
if len(conf.calibrationFilters) > 0:
row += 2
sh.write(row, col, "Calibration Filters", self.bold_style)
row += 1
sh.write(row, col, "Data Column", self.bold_style)
sh.write(row, col+1, "Filter Type", self.bold_style)
sh.write(row, col+2, "Inclusive", self.bold_style)
sh.write(row, col+3, "Filter Value", self.bold_style)
sh.write(row, col+4, "Active", self.bold_style)
row += 1
for filt in conf.calibrationFilters:
if isinstance(Filter,TimeOfDayFilter):
sh.write(row, col, "Time Of Day Filter")
sh.write(row, col + 1, str(filt.startTime))
sh.write(row, col + 2, str(filt.endTime))
sh.write(row, col + 3, str(filt.daysOfTheWeek))
sh.write(row, col + 4, str(filt.months))
else:
sh.write(row, col, filt.column)
sh.write(row, col+1, filt.filterType)
sh.write(row, col+2, filt.inclusive)
sh.write(row, col+3, str(filt))
sh.write(row, col+4, filt.active) # always true if in list...
row += 1
def reportSettings(self, sh, analysis):
config = analysis.config
sh.write(0, 1, "PCWG Tool Version Number:")
sh.write(0, 2, self.version)
sh.write(0, 3, xlwt.Formula('HYPERLINK("http://www.pcwg.org";"PCWG Website")'))
row = 3
labelColumn = 1
dataColumn = 2
sh.col(labelColumn).width = 256 * 30
sh.col(dataColumn).width = 256 * 50
sh.col(dataColumn+1).width = 256 * 50
#Corretions
sh.write(row, labelColumn, "Density Correction Active", self.bold_style)
sh.write(row, dataColumn, config.densityCorrectionActive)
row += 1
sh.write(row, labelColumn, "REWS Correction Active", self.bold_style)
sh.write(row, dataColumn, config.rewsActive)
row += 1
sh.write(row, labelColumn, "Turbulence Correction Active", self.bold_style)
sh.write(row, dataColumn, config.turbRenormActive)
row += 1
#General Settings
row += 1
sh.write(row, labelColumn, "Time Step In Seconds", self.bold_style)
sh.write(row, dataColumn, analysis.timeStepInSeconds)
row += 1
sh.write(row, labelColumn, "Power Curve Minimum Count", self.bold_style)
sh.write(row, dataColumn, config.powerCurveMinimumCount)
row += 1
sh.write(row, labelColumn, "Baseline Mode", self.bold_style)
sh.write(row, dataColumn, config.baseLineMode)
row += 1
sh.write(row, labelColumn, "Filter Mode", self.bold_style)
sh.write(row, dataColumn, config.filterMode)
row += 1
sh.write(row, labelColumn, "Power Curve Mode", self.bold_style)
sh.write(row, dataColumn, config.powerCurveMode)
row += 1
#Inner Range
row += 1
sh.write(row, labelColumn, "Inner Range", self.bold_style)
row += 1
sh.write(row, labelColumn, "Lower Turbulence", self.bold_style)
sh.write(row, dataColumn, config.innerRangeLowerTurbulence)
row += 1
sh.write(row, labelColumn, "Upper Turbulence", self.bold_style)
sh.write(row, dataColumn, config.innerRangeUpperTurbulence)
row += 1
sh.write(row, labelColumn, "Lower Shear", self.bold_style)
sh.write(row, dataColumn, config.innerRangeLowerShear)
row += 1
sh.write(row, labelColumn, "Upper Shear", self.bold_style)
sh.write(row, dataColumn, config.innerRangeUpperShear)
row += 1
#Turbine
row += 1
sh.write(row, labelColumn, "Turbine", self.bold_style)
row += 1
sh.write(row, labelColumn, "Specified Power Curve", self.bold_style)
sh.write(row, dataColumn, config.specified_power_curve.absolute_path)
row += 1
#datasets
row += 1
sh.write(row, labelColumn, "Datasets", self.bold_style)
row += 2
for datasetConfig in analysis.datasetConfigs:
sh.write(row, labelColumn, "Name", self.bold_style)
sh.write(row, dataColumn, datasetConfig.name)
row += 1
sh.write(row, labelColumn, "Path", self.bold_style)
sh.write(row, dataColumn, datasetConfig.path)
row += 1
sh.write(row, labelColumn, "Rated Power", self.bold_style)
sh.write(row, dataColumn, datasetConfig.ratedPower)
row += 1
sh.write(row, labelColumn, "HubHeight", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubHeight)
row += 1
sh.write(row, labelColumn, "Diameter", self.bold_style)
sh.write(row, dataColumn, datasetConfig.diameter)
row += 1
sh.write(row, labelColumn, "Cut In Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.cutInWindSpeed)
row += 1
sh.write(row, labelColumn, "Cut Out Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.cutOutWindSpeed)
row += 1
sh.write(row, labelColumn, "Start Date", self.bold_style)
sh.write(row, dataColumn, str(datasetConfig.startDate))
row += 1
sh.write(row, labelColumn, "End Date", self.bold_style)
sh.write(row, dataColumn, str(datasetConfig.endDate))
row += 1
sh.write(row, labelColumn, "Hub Wind Speed Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubWindSpeedMode)
row += 1
sh.write(row, labelColumn, "Density Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.densityMode)
row += 2
sh.write(row, labelColumn, "REWS Defined", self.bold_style)
sh.write(row, dataColumn, datasetConfig.rewsDefined)
row += 1
sh.write(row, labelColumn, "Rotor Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.rotorMode)
row += 1
sh.write(row, labelColumn, "Hub Mode", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubMode)
row += 1
sh.write(row, labelColumn, "Number of Rotor Levels", self.bold_style)
sh.write(row, dataColumn, datasetConfig.numberOfRotorLevels)
row += 2
sh.write(row, labelColumn, "Measurements", self.bold_style)
row += 1
sh.write(row, labelColumn, "Input Time Series Path", self.bold_style)
sh.write(row, dataColumn, datasetConfig.input_time_series.absolute_path)
row += 1
sh.write(row, labelColumn, "Date Format", self.bold_style)
sh.write(row, dataColumn, datasetConfig.dateFormat)
row += 1
sh.write(row, labelColumn, "Time Step In Seconds", self.bold_style)
sh.write(row, dataColumn, datasetConfig.timeStepInSeconds)
row += 1
sh.write(row, labelColumn, "Time Stamp", self.bold_style)
sh.write(row, dataColumn, datasetConfig.timeStamp)
row += 1
sh.write(row, labelColumn, "Bad Data Value", self.bold_style)
sh.write(row, dataColumn, datasetConfig.badData)
row += 1
sh.write(row, labelColumn, "Header Rows", self.bold_style)
sh.write(row, dataColumn, datasetConfig.headerRows)
row += 1
sh.write(row, labelColumn, "Turbine Location Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.turbineLocationWindSpeed)
row += 1
sh.write(row, labelColumn, "Hub Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubWindSpeed)
row += 1
sh.write(row, labelColumn, "Hub Turbulence", self.bold_style)
sh.write(row, dataColumn, datasetConfig.hubTurbulence)
row += 1
sh.write(row, labelColumn, "Reference Wind Speed", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindSpeed)
row += 1
sh.write(row, labelColumn, "Reference Wind Speed Std Dev", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindSpeedStdDev)
row += 1
sh.write(row, labelColumn, "Reference Wind Direction", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindDirection)
row += 1
sh.write(row, labelColumn, "Reference Wind Direction Offset", self.bold_style)
sh.write(row, dataColumn, datasetConfig.referenceWindDirectionOffset)
row += 1
sh.write(row, labelColumn, "Density", self.bold_style)
sh.write(row, dataColumn, datasetConfig.density)
row += 1
sh.write(row, labelColumn, "Temperature", self.bold_style)
sh.write(row, dataColumn, datasetConfig.temperature)
row += 1
sh.write(row, labelColumn, "Pressure", self.bold_style)
sh.write(row, dataColumn, datasetConfig.pressure)
row += 1
if len(datasetConfig.turbineShearMeasurements) > 0:
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.referenceShearMeasurements,'Reference Location ')
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.turbineShearMeasurements,'Turbine Location ')
else:
row = self.writeShear(sh,labelColumn,dataColumn,row,datasetConfig.referenceShearMeasurements)
sh.write(row, labelColumn, "Power", self.bold_style)
sh.write(row, dataColumn, datasetConfig.power)
row += 2
if datasetConfig.rewsDefined:
sh.write(row, labelColumn, "Profile Levels", self.bold_style)
row += 1
sh.write(row, labelColumn, "Height", self.bold_style)
sh.write(row, dataColumn, "Speed", self.bold_style)
sh.write(row, dataColumn + 1, "Direction", self.bold_style)
row += 1
for height in sorted(datasetConfig.data.windSpeedLevels):
sh.write(row, labelColumn, height)
sh.write(row, dataColumn, datasetConfig.data.windSpeedLevels[height])
if hasattr(datasetConfig.data, 'windDirectionLevels'): # we are not using this in REWS yet
if height in datasetConfig.data.windDirectionLevels:
sh.write(row, dataColumn + 1, datasetConfig.data.windDirectionLevels[height])
row += 1
sh.write(row, labelColumn, "Filters", self.bold_style)
row += 1
sh.write(row, labelColumn, "Data Column", self.bold_style)
sh.write(row, dataColumn, "Filter Type", self.bold_style)
sh.write(row, dataColumn + 1, "Inclusive", self.bold_style)
sh.write(row, dataColumn + 2, "Filter Value", self.bold_style)
sh.write(row, dataColumn + 3, "Active", self.bold_style)
row += 1
for filter in datasetConfig.filters:
if isinstance(Filter,TimeOfDayFilter):
sh.write(row, labelColumn, "Time Of Day Filter")
sh.write(row, dataColumn, str(filter.startTime))
sh.write(row, dataColumn + 1, str(filter.endTime))
sh.write(row, dataColumn + 2, str(filter.daysOfTheWeek))
sh.write(row, dataColumn + 3, str(filter.months))
else:
sh.write(row, labelColumn, filter.column)
sh.write(row, dataColumn, filter.filterType)
sh.write(row, dataColumn + 1, filter.inclusive)
sh.write(row, dataColumn + 2, str(filter))
sh.write(row, dataColumn + 3, "True") # always true if in list...
row += 1
def writeShear(self,sh,labelColumn,dataColumn,row,shearList,prefix=""):
i=0
for sh_meas in shearList:
sh.write(row, labelColumn, prefix+"Shear Measurement " + str(i+1), self.bold_style)
sh.write(row, dataColumn, sh_meas.height)
row += 1
sh.write(row, labelColumn, prefix+"Shear Measurement {0} Height ".format(i+1), self.bold_style)
sh.write(row, dataColumn, sh_meas.wind_speed_column)
row += 1
return row
def reportPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve, analysis):
powerCurveLevels = powerCurve.powerCurveLevels.copy()
if powerCurve.inputHubWindSpeed is None:
powerCurveLevels['Specified Wind Speed'] = powerCurveLevels.index
windSpeedCol = 'Specified Wind Speed'
else:
windSpeedCol = analysis.inputHubWindSpeed #'Input Hub Wind Speed'
powerCurveLevels = powerCurveLevels.sort(windSpeedCol)
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
sh.col(columnOffset + 1).width = 256 * 15
sh.col(columnOffset + 2).width = 256 * 15
sh.col(columnOffset + 3).width = 256 * 15
if powerCurve.inputHubWindSpeed is None:
sh.col(columnOffset + 5).width = 256 * 5
else:
sh.col(columnOffset + 4).width = 256 * 15
sh.col(columnOffset + 5).width = 256 * 5
rowOrders = { 'Data Count':4,
analysis.actualPower:2, analysis.hubTurbulence:3, analysis.inputHubWindSpeed:1,
'Specified Power':2,'Specified Turbulence':3, 'Specified Wind Speed':1,
analysis.measuredTurbulencePower:2}
styles = { 'Data Count':self.no_dp_style, analysis.inputHubWindSpeed:self.two_dp_style,
analysis.actualPower: self.no_dp_style, analysis.hubTurbulence:self.percent_no_dp_style,
'Specified Power':self.no_dp_style,'Specified Turbulence':self.percent_no_dp_style,
'Specified Wind Speed':self.two_dp_style,analysis.measuredTurbulencePower:self.no_dp_style}
for colname in powerCurveLevels.columns:
if colname in styles.keys():
sh.write(rowOffset + 1, columnOffset + rowOrders[colname], colname, self.bold_style)
countRow = 1
for windSpeed in powerCurveLevels.index:
for colname in powerCurveLevels.columns:
if colname in styles.keys():
val = powerCurveLevels[colname][windSpeed]
if type(val) is np.int64:
#xlwt needs numbers to be recognisable as integers or floats; isinstance(np.int64(1), int) returns False.
#Other numpy types (int32, float64, etc) are recognised as int and float appropriately.
val = int(val)
sh.write(rowOffset + countRow + 1, columnOffset + rowOrders[colname], val, styles[colname])
countRow += 1
if hasattr(powerCurve, 'zeroTurbulencePowerCurve'):
countRow += 3
try:
pc = powerCurve.zeroTurbulencePowerCurve.dfPowerLevels
sh.write(rowOffset + countRow, columnOffset + 2, name + ' Zero TI Power Curve', self.bold_style)
countRow += 1
sh.write(rowOffset + countRow, columnOffset + 1, 'Wind Speed', self.bold_style)
sh.write(rowOffset + countRow, columnOffset + 2, 'Power', self.bold_style)
for ws in pc.index:
sh.write(rowOffset + countRow + 1, columnOffset + 1, ws, styles['Specified Wind Speed'])
sh.write(rowOffset + countRow + 1, columnOffset + 2, pc.loc[ws, 'Power'], styles['Specified Wind Speed'])
countRow += 1
except:
sh.write(rowOffset + countRow, columnOffset + 2,'Zero TI Power Curve not calculated successfully for %s power curve.' % name)
countRow+=1
else:
countRow += 3
Status.add("Not reporting zero TI power curve for %s as it is not defined." % (name), verbosity=2)
sh.write(rowOffset + countRow, columnOffset + 2,"Not reporting zero TI power curve for %s as it is not defined." % (name))
countRow+=1
return countRow
def reportInterpolatedPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve, levels):
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
sh.write(rowOffset + 1, columnOffset + 1, "Wind Speed", self.bold_style)
sh.write(rowOffset + 1, columnOffset + 2, "Power", self.bold_style)
sh.write(rowOffset + 1, columnOffset + 3, "Turbulence", self.bold_style)
count = 1
for windSpeed in sorted(levels):
sh.write(rowOffset + count + 1, columnOffset + 1, windSpeed, self.two_dp_style)
sh.write(rowOffset + count + 1, columnOffset + 2, float(powerCurve.powerFunction(windSpeed)), self.no_dp_style)
sh.write(rowOffset + count + 1, columnOffset + 3, float(powerCurve.turbulenceFunction(windSpeed)), self.percent_no_dp_style)
count += 1
def reportPowerDeviations(self, book, sheetName, powerDeviations, gradient):
sh = book.add_sheet(sheetName, cell_overwrite_ok=True)
sh.write_merge(1,self.turbulenceBins.numberOfBins,0,0,"Turbulence Intensity", xlwt.easyxf('align: rotation 90'))
sh.write_merge(self.turbulenceBins.numberOfBins+2,self.turbulenceBins.numberOfBins+2,2,self.windSpeedBins.numberOfBins+1, "Wind Speed", self.bold_style)
for i in range(self.windSpeedBins.numberOfBins):
sh.col(i + 2).width = 256 * 5
for j in range(self.turbulenceBins.numberOfBins):
turbulence = self.turbulenceBins.binCenterByIndex(j)
row = self.turbulenceBins.numberOfBins - j
sh.write(row, 1, turbulence, self.percent_no_dp_style)
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
col = i + 2
if j == 0:
sh.write(self.turbulenceBins.numberOfBins+1, col, windSpeed, self.one_dp_style)
if windSpeed in powerDeviations.matrix:
if turbulence in powerDeviations.matrix[windSpeed]:
deviation = powerDeviations.matrix[windSpeed][turbulence]
if not np.isnan(deviation):
sh.write(row, col, deviation, gradient.getStyle(deviation))
def reportPowerDeviationsDifference(self, book, sheetName, deviationsA, deviationsB, gradient):
sh = book.add_sheet(sheetName, cell_overwrite_ok=True)
for i in range(self.windSpeedBins.numberOfBins):
sh.col(i + 1).width = 256 * 5
for j in range(self.turbulenceBins.numberOfBins):
turbulence = self.turbulenceBins.binCenterByIndex(j)
row = self.turbulenceBins.numberOfBins - j - 1
sh.write(row, 0, turbulence, self.percent_no_dp_style)
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
col = i + 1
if j == 0: sh.write(self.turbulenceBins.numberOfBins, col, windSpeed, self.one_dp_style)
if windSpeed in deviationsA.matrix:
if turbulence in deviationsA.matrix[windSpeed]:
deviationA = deviationsA.matrix[windSpeed][turbulence]
deviationB = deviationsB.matrix[windSpeed][turbulence]
if not np.isnan(deviationA) and not np.isnan(deviationB):
diff = abs(deviationA) - abs(deviationB)
sh.write(row, col, diff, gradient.getStyle(diff))
def report_aep(self,sh,analysis):
sh # get tables in PP report form
# Summary of EY acceptance test results:
hrsMultiplier = (analysis.timeStepInSeconds/3600.0)
row = 2
tall_style = xlwt.easyxf('font:height 360;') # 18pt
first_row = sh.row(row)
first_row.set_style(tall_style)
sh.write(row,2, "Reference Turbine", self.bold_style)
sh.write(row,3, "Measured (LCB) Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,4, "Extrapolated Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,5, "Last Complete Bin (LCB)", self.bold_style)
sh.write(row,6, "Direction Sectors Analysed (degrees)", self.bold_style)
sh.write(row,7, "Measured Hours", self.bold_style)
#sh.write(row,8, "Annual Energy Yield Uncertainty as a percentage of the Warranted Annual Yield (%)", self.bold_style)
row += 1
sh.write(row,2, analysis.config.Name)
sh.write(row,3, analysis.aepCalcLCB.AEP*100, self.two_dp_style)
sh.write(row,4, analysis.aepCalc.AEP*100, self.two_dp_style)
sh.write(row,5, analysis.aepCalcLCB.lcb, self.two_dp_style)
sh.write(row,6, "{mi} - {ma}".format(mi=analysis.dataFrame[analysis.windDirection].min(),ma=analysis.dataFrame[analysis.windDirection].max()))
timeCovered = analysis.allMeasuredPowerCurve.powerCurveLevels[analysis.dataCount].sum() * hrsMultiplier
sh.write(row,7, timeCovered, self.two_dp_style)
#sh.write(row,8, "NOT YET CALCULATED")
row += 3
if hasattr(analysis.specifiedPowerCurve,"referenceDensity"):
sh.write_merge(row,row,2,6, "Measured Power Curve\n Reference Air Density = {ref} kg/m^3".format(ref=analysis.specifiedPowerCurve.referenceDensity), self.bold_style)
#sh.write(row,7, "Category A Uncertainty", self.bold_style)
#sh.write(row,8, "Category B Uncertainty", self.bold_style)
#sh.write(row,9, "Category C Uncertainty", self.bold_style)
row += 1
sh.write(row,2, "Bin No", self.bold_style)
sh.write(row,3, "Bin Centre Wind Speed", self.bold_style)
sh.write(row,4, "Hub Height Wind Speed", self.bold_style)
sh.write(row,5, "Power Output", self.bold_style)
sh.write(row,6, "Cp", self.bold_style)
sh.write(row,7, "Qty 10-Min Data", self.bold_style)
sh.write(row,8, "Standard Deviation", self.bold_style)
#sh.write(row,7, "Standard Uncertainty", self.bold_style)
#sh.write(row,8, "Standard Uncertainty", self.bold_style)
#sh.write(row,9, "Standard Uncertainty", self.bold_style)
row += 1
sh.write(row,2, "I", self.bold_style)
sh.write(row,3, "Vi_centre", self.bold_style)
sh.write(row,4, "Vi", self.bold_style)
sh.write(row,5, "Pi", self.bold_style)
sh.write(row,7, "Ni", self.bold_style)
sh.write(row,8, "StDev i", self.bold_style)
#sh.write(row,7, "si", self.bold_style)
#sh.write(row,8, "ui", self.bold_style)
#sh.write(row,9, "uc,I", self.bold_style)
row += 1
sh.write(row,3, "[m/s]", self.bold_style)
sh.write(row,4, "[kW]", self.bold_style)
sh.write(row,8, "[kW]", self.bold_style)
#sh.write(row,7, "[kW]", self.bold_style)
#sh.write(row,8, "[kW]", self.bold_style)
#sh.write(row,9, "[kW]", self.bold_style)
for binNo,ws in enumerate(analysis.allMeasuredPowerCurve.powerCurveLevels .index):
if ws <= analysis.aepCalcLCB.lcb and analysis.allMeasuredPowerCurve.powerCurveLevels[analysis.dataCount][ws] > 0:
row+=1
sh.write(row,2, binNo+1, self.no_dp_style)
sh.write(row,3, ws, self.one_dp_style)
sh.write(row,4, analysis.allMeasuredPowerCurve.powerCurveLevels[analysis.inputHubWindSpeed][ws], self.two_dp_style)
sh.write(row,5, analysis.allMeasuredPowerCurve.powerCurveLevels[analysis.actualPower][ws], self.two_dp_style)
if analysis.powerCoeff in analysis.allMeasuredPowerCurve.powerCurveLevels.columns:
sh.write(row,6, analysis.allMeasuredPowerCurve.powerCurveLevels[analysis.powerCoeff][ws], self.two_dp_style)
else:
sh.write(row,6, "-", self.no_dp_style)
datCount = analysis.allMeasuredPowerCurve.powerCurveLevels[analysis.dataCount][ws]
sh.write(row,7, datCount, self.no_dp_style)
if analysis.powerStandDev in analysis.allMeasuredPowerCurve.powerCurveLevels.columns:
sh.write(row,8, analysis.allMeasuredPowerCurve.powerCurveLevels[analysis.powerStandDev][ws])
else:
sh.write(row,8, "-", self.no_dp_style)
#sh.write(row,7, "-", self.no_dp_style)
#sh.write(row,8, "~", self.no_dp_style)
#sh.write(row,9, "-", self.no_dp_style)
row+=2
sh.write_merge(row,row,2,5, "More than 180 hours of data:", self.bold_style)
sh.write(row,6, "TRUE" if timeCovered > 180 else "FALSE")
sh.write(row,7, "({0} Hours)".format(round(timeCovered,2)) , self.two_dp_style)
row+=1
if hasattr(analysis,"windSpeedAt85pctX1pnt5"):
sh.write_merge(row,row,2,5, "Largest WindSpeed > {0}:".format(round(analysis.windSpeedAt85pctX1pnt5,2)), self.bold_style)
sh.write(row,6, "TRUE" if analysis.aepCalcLCB.lcb > analysis.windSpeedAt85pctX1pnt5 else "FALSE")
sh.write(row,7, "Threshold is 1.5*([email protected]*RatedPower)")
row+=1
sh.write_merge(row,row,2,5, "AEP Extrap. within 1% of AEP LCB:",self.bold_style)
ans = abs(1-(analysis.aepCalc.AEP/analysis.aepCalcLCB.AEP)) < 0.01
sh.write(row,6, "TRUE" if ans else "FALSE")
if not ans:
sh.write(row,8, analysis.aepCalc.AEP)
sh.write(row,9, analysis.aepCalcLCB.AEP)
if analysis.turbRenormActive:
row += 2
sh.write(row,3, "Turbulence Corrected Measured (LCB) Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row,4, "Turbulence Corrected Extrapolated Pct of Warranted Annual Energy Yield (%)", self.bold_style)
sh.write(row+1,3, analysis.turbCorrectedAepCalcLCB.AEP*100, self.two_dp_style)
sh.write(row+1,4, analysis.turbCorrectedAepCalc.AEP*100, self.two_dp_style)
row+=2
sh.write_merge(row,row,3,10,"AEP Distribution",self.bold_style)
row+=1
sh.write_merge(row,row,3,6, "Reference", self.bold_style)
sh.write_merge(row,row,7,10, "Measured", self.bold_style)
row+=1
sh.write(row,2,"Wind Speed",self.bold_style)
sh.write(row,3,'Reference Freq',self.bold_style)
sh.write(row,4,'Reference Power',self.bold_style)
sh.write(row,5,'Reference Power (Resampled)',self.bold_style)
sh.write(row,6,"Reference Energy",self.bold_style)
sh.write(row,7,'Measured Freq',self.bold_style)
sh.write(row,8,'Measured Power',self.bold_style)
sh.write(row,9,'Measured Power (Resampled)',self.bold_style)
sh.write(row,10,"Measured Energy",self.bold_style)
for binNum in analysis.aepCalc.energy_distribution.index:
row+=1
sh.write(row,2,binNum,self.two_dp_style)
sh.write(row,3,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Freq"] ,self.four_dp_style)
sh.write(row,4,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Upper"] ,self.four_dp_style)
sh.write(row,5,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Power"] ,self.four_dp_style)
sh.write(row,6,analysis.aepCalc.energy_distribution.loc[binNum,"Reference_Energy"] ,self.four_dp_style)
sh.write(row,7,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Freq"] ,self.four_dp_style)
sh.write(row,8,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Upper"] ,self.four_dp_style)
sh.write(row,9,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Power"] ,self.four_dp_style)
sh.write(row,10,analysis.aepCalc.energy_distribution.loc[binNum,"Measured_Energy"] ,self.four_dp_style)
row+=3
def write_power_curves(self):
Status.add("Wind Speed\tSpecified\tInner\tOuter\tAll", verbosity=2)
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
text = "%0.4f\t" % windSpeed
if windSpeed in self.specifiedPowerCurve.powerCurveLevels:
text += "%0.4f\t" % self.specifiedPowerCurve.powerCurveLevels[windSpeed]
else:
text += "\t"
if windSpeed in self.innerMeasuredPowerCurve.powerCurveLevels:
text += "%0.4f\t" % self.innerMeasuredPowerCurve.powerCurveLevels[windSpeed]
else:
text += "\t"
if windSpeed in self.outerMeasuredPowerCurve.powerCurveLevels:
text += "%0.4f\t" % self.outerMeasuredPowerCurve.powerCurveLevels[windSpeed]
else:
text += "\t"
if windSpeed in self.allMeasuredPowerCurve.powerCurveLevels:
text += "%0.4f\t" % self.allMeasuredPowerCurve.powerCurveLevels[windSpeed]
else:
text += "\t"
Status.add(text, verbosity=2)
def write_power_deviation_matrix(self):
for j in reversed(range(self.turbulenceBins.numberOfBins)):
turbulence = self.turbulenceBins.binCenterByIndex(j)
text = "%f\t" % turbulence
for i in range(self.windSpeedBins.numberOfBins):
windSpeed = self.windSpeedBins.binCenterByIndex(i)
if windSpeed in self.powerDeviations:
if turbulence in self.powerDeviations[windSpeed]:
text += "%f\t" % self.powerDeviations[windSpeed][turbulence]
else:
text += "\t"
else:
text += "\t"
Status.add(text, verbosity=2)
text = "\t"
for i in range(self.windSpeedBins.numberOfBins):
text += "%f\t" % self.windSpeedBins.binCenterByIndex(i)
Status.add(text, verbosity=2)
def report_scatter_metric(self,sh,analysis,row, turbRenormActive):
row += 5
sh.write(row, 1, "Scatter Metric Before TI Renormalisation:", self.bold_style)
sh.write(row+1, 1, analysis.powerCurveScatterMetric, self.percent_style)
if turbRenormActive:
sh.write(row, 2, "Scatter Metric After TI Renormalisation:", self.bold_style)
sh.write(row+1, 2, analysis.powerCurveScatterMetricAfterTiRenorm , self.percent_style)
return row + 3
class AnonReport(report):
def __init__(self,targetPowerCurve,wind_bins, turbulence_bins, version="unknown"):
self.version = version
self.targetPowerCurve = targetPowerCurve
self.turbulenceBins = turbulence_bins
self.normalisedWindSpeedBins = wind_bins
def report(self, path, analysis, powerDeviationMatrix = True, scatterMetric=True):
self.analysis = analysis
book = xlwt.Workbook()
sh = book.add_sheet("Anonymous Report", cell_overwrite_ok=True)
sh.write(0, 0, "PCWG Tool Version Number:")
sh.write(0, 1, self.version)
sh.write(0, 2, xlwt.Formula('HYPERLINK("http://www.pcwg.org";"PCWG Website")'))
row = 1
if powerDeviationMatrix:
row = self.report_power_deviation_matrix(sh,analysis,book)
if scatterMetric:
row = self.report_scatter_metric(sh,analysis,row, analysis.turbRenormActive)
book.save(path)
def report_power_deviation_matrix(self,sh,analysis,book):
gradient = colour.ColourGradient(-0.1, 0.1, 0.01, book)
pcStart = 2
pcEnd = pcStart + self.normalisedWindSpeedBins.numberOfBins + 5
deviationMatrixStart = pcEnd + 5
row= []
row.append( self.reportPowerCurve(sh, pcStart, 0, self.targetPowerCurve.name + ' Power Curve', self.targetPowerCurve) )
row.append( self.reportPowerDeviations(sh,deviationMatrixStart, analysis.normalisedHubPowerDeviations, gradient, "Hub Power"))
if analysis.normalisedTurbPowerDeviations != None:
deviationMatrixStart += (self.turbulenceBins.numberOfBins + 5) * 2
row.append(self.reportPowerDeviations(sh,deviationMatrixStart, analysis.normalisedTurbPowerDeviations, gradient, "Turb Corrected Power") )
return max(row)
def reportPowerDeviations(self,sh, startRow, powerDeviations, gradient, name):
countShift = self.turbulenceBins.numberOfBins + 5
sh.write(startRow, 1, "Deviations Matrix (%s)" % name, self.bold_style)
sh.write(startRow + countShift, 1, "Data Count Matrix (%s)" % name, self.bold_style)
for j in range(self.turbulenceBins.numberOfBins):
turbulence = self.turbulenceBins.binCenterByIndex(j)
row = startRow + self.turbulenceBins.numberOfBins - j
countRow = row + countShift
sh.write(row, 0, turbulence, self.percent_no_dp_style)
sh.write(countRow, 0, turbulence, self.percent_no_dp_style)
for i in range(self.normalisedWindSpeedBins.numberOfBins):
windSpeed = self.normalisedWindSpeedBins.binCenterByIndex(i)
col = i + 1
if j == 0:
sh.write(row + 1, col, windSpeed, self.two_dp_style)
sh.write(countRow + 1, col, windSpeed, self.two_dp_style)
if windSpeed in powerDeviations.matrix:
if turbulence in powerDeviations.matrix[windSpeed]:
deviation = powerDeviations.matrix[windSpeed][turbulence]
count = int(powerDeviations.count[windSpeed][turbulence])
if not np.isnan(deviation):
sh.write(row, col, deviation, gradient.getStyle(deviation))
sh.write(countRow, col, count, self.no_dp_style)
return startRow + self.turbulenceBins.numberOfBins + countShift
def reportPowerCurve(self, sh, rowOffset, columnOffset, name, powerCurve):
sh.write(rowOffset, columnOffset + 2, name, self.bold_style)
rowOrders = { 'Data Count':4, 'Normalised Wind Speed':1,'Normalised Power':2, 'Turbulence':3}
for colname in rowOrders.keys():
sh.write(rowOffset + 1, columnOffset + rowOrders[colname], colname, self.bold_style)
countRow = 1
for i in range(self.normalisedWindSpeedBins.numberOfBins):
windSpeed = self.normalisedWindSpeedBins.binCenterByIndex(i)
mask = self.analysis.dataFrame['Normalised WS Bin'] == windSpeed
dataCount = self.analysis.dataFrame[mask]['Normalised WS Bin'].count()
absoluteWindSpeed = windSpeed * self.analysis.observedRatedWindSpeed
sh.write(rowOffset + countRow + 1, columnOffset + 1, windSpeed, self.two_dp_style)
sh.write(rowOffset + countRow + 1, columnOffset + 4,
dataCount, self.no_dp_style)
if dataCount > 0:
sh.write(rowOffset + countRow + 1, columnOffset + 2,
float(powerCurve.powerFunction(absoluteWindSpeed))/self.analysis.observedRatedPower, self.two_dp_style)
sh.write(rowOffset + countRow + 1, columnOffset + 3,
float(powerCurve.turbulenceFunction(absoluteWindSpeed)), self.percent_no_dp_style)
countRow += 1
return countRow
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset Celery worker"""
import datetime
import json
import subprocess
import time
import unittest
import unittest.mock as mock
from tests.test_app import app # isort:skip
from superset import db, sql_lab
from superset.dataframe import SupersetDataFrame
from superset.db_engine_specs.base import BaseEngineSpec
from superset.models.helpers import QueryStatus
from superset.models.sql_lab import Query
from superset.sql_parse import ParsedQuery
from superset.utils.core import get_example_database
from .base_tests import SupersetTestCase
CELERY_SLEEP_TIME = 5
class UtilityFunctionTests(SupersetTestCase):
# TODO(bkyryliuk): support more cases in CTA function.
def test_create_table_as(self):
q = ParsedQuery("SELECT * FROM outer_space;")
self.assertEqual(
"CREATE TABLE tmp AS \nSELECT * FROM outer_space", q.as_create_table("tmp")
)
self.assertEqual(
"DROP TABLE IF EXISTS tmp;\n"
"CREATE TABLE tmp AS \nSELECT * FROM outer_space",
q.as_create_table("tmp", overwrite=True),
)
# now without a semicolon
q = ParsedQuery("SELECT * FROM outer_space")
self.assertEqual(
"CREATE TABLE tmp AS \nSELECT * FROM outer_space", q.as_create_table("tmp")
)
# now a multi-line query
multi_line_query = "SELECT * FROM planets WHERE\n" "Luke_Father = 'Darth Vader'"
q = ParsedQuery(multi_line_query)
self.assertEqual(
"CREATE TABLE tmp AS \nSELECT * FROM planets WHERE\n"
"Luke_Father = 'Darth Vader'",
q.as_create_table("tmp"),
)
class CeleryTestCase(SupersetTestCase):
def get_query_by_name(self, sql):
session = db.session
query = session.query(Query).filter_by(sql=sql).first()
session.close()
return query
def get_query_by_id(self, id):
session = db.session
query = session.query(Query).filter_by(id=id).first()
session.close()
return query
@classmethod
def setUpClass(cls):
with app.app_context():
class CeleryConfig(object):
BROKER_URL = app.config["CELERY_CONFIG"].BROKER_URL
CELERY_IMPORTS = ("superset.sql_lab",)
CELERY_ANNOTATIONS = {"sql_lab.add": {"rate_limit": "10/s"}}
CONCURRENCY = 1
app.config["CELERY_CONFIG"] = CeleryConfig
db.session.query(Query).delete()
db.session.commit()
base_dir = app.config["BASE_DIR"]
worker_command = base_dir + "/bin/superset worker -w 2"
subprocess.Popen(worker_command, shell=True, stdout=subprocess.PIPE)
@classmethod
def tearDownClass(cls):
subprocess.call(
"ps auxww | grep 'celeryd' | awk '{print $2}' | xargs kill -9", shell=True
)
subprocess.call(
"ps auxww | grep 'superset worker' | awk '{print $2}' | xargs kill -9",
shell=True,
)
def run_sql(
self, db_id, sql, client_id=None, cta=False, tmp_table="tmp", async_=False
):
self.login()
resp = self.client.post(
"/superset/sql_json/",
json=dict(
database_id=db_id,
sql=sql,
runAsync=async_,
select_as_cta=cta,
tmp_table_name=tmp_table,
client_id=client_id,
),
)
self.logout()
return json.loads(resp.data)
def test_run_sync_query_dont_exist(self):
main_db = get_example_database()
db_id = main_db.id
sql_dont_exist = "SELECT name FROM table_dont_exist"
result1 = self.run_sql(db_id, sql_dont_exist, "1", cta=True)
self.assertTrue("error" in result1)
def test_run_sync_query_cta(self):
main_db = get_example_database()
backend = main_db.backend
db_id = main_db.id
tmp_table_name = "tmp_async_22"
self.drop_table_if_exists(tmp_table_name, main_db)
name = "James"
sql_where = f"SELECT name FROM birth_names WHERE name='{name}' LIMIT 1"
result = self.run_sql(db_id, sql_where, "2", tmp_table=tmp_table_name, cta=True)
self.assertEqual(QueryStatus.SUCCESS, result["query"]["state"])
self.assertEqual([], result["data"])
self.assertEqual([], result["columns"])
query2 = self.get_query_by_id(result["query"]["serverId"])
# Check the data in the tmp table.
if backend != "postgresql":
# TODO This test won't work in Postgres
results = self.run_sql(db_id, query2.select_sql, "sdf2134")
self.assertEqual(results["status"], "success")
self.assertGreater(len(results["data"]), 0)
def test_run_sync_query_cta_no_data(self):
main_db = get_example_database()
db_id = main_db.id
sql_empty_result = "SELECT * FROM birth_names WHERE name='random'"
result3 = self.run_sql(db_id, sql_empty_result, "3")
self.assertEqual(QueryStatus.SUCCESS, result3["query"]["state"])
self.assertEqual([], result3["data"])
self.assertEqual([], result3["columns"])
query3 = self.get_query_by_id(result3["query"]["serverId"])
self.assertEqual(QueryStatus.SUCCESS, query3.status)
def drop_table_if_exists(self, table_name, database=None):
"""Drop table if it exists, works on any DB"""
sql = "DROP TABLE {}".format(table_name)
db_id = database.id
if database:
database.allow_dml = True
db.session.flush()
return self.run_sql(db_id, sql)
def test_run_async_query(self):
main_db = get_example_database()
db_id = main_db.id
self.drop_table_if_exists("tmp_async_1", main_db)
sql_where = "SELECT name FROM birth_names WHERE name='James' LIMIT 10"
result = self.run_sql(
db_id, sql_where, "4", async_=True, tmp_table="tmp_async_1", cta=True
)
db.session.close()
assert result["query"]["state"] in (
QueryStatus.PENDING,
QueryStatus.RUNNING,
QueryStatus.SUCCESS,
)
time.sleep(CELERY_SLEEP_TIME)
query = self.get_query_by_id(result["query"]["serverId"])
self.assertEqual(QueryStatus.SUCCESS, query.status)
self.assertTrue("FROM tmp_async_1" in query.select_sql)
self.assertEqual(
"CREATE TABLE tmp_async_1 AS \n"
"SELECT name FROM birth_names "
"WHERE name='James' "
"LIMIT 10",
query.executed_sql,
)
self.assertEqual(sql_where, query.sql)
self.assertEqual(0, query.rows)
self.assertEqual(True, query.select_as_cta)
self.assertEqual(True, query.select_as_cta_used)
def test_run_async_query_with_lower_limit(self):
main_db = get_example_database()
db_id = main_db.id
tmp_table = "tmp_async_2"
self.drop_table_if_exists(tmp_table, main_db)
sql_where = "SELECT name FROM birth_names LIMIT 1"
result = self.run_sql(
db_id, sql_where, "5", async_=True, tmp_table=tmp_table, cta=True
)
db.session.close()
assert result["query"]["state"] in (
QueryStatus.PENDING,
QueryStatus.RUNNING,
QueryStatus.SUCCESS,
)
time.sleep(CELERY_SLEEP_TIME)
query = self.get_query_by_id(result["query"]["serverId"])
self.assertEqual(QueryStatus.SUCCESS, query.status)
self.assertTrue(f"FROM {tmp_table}" in query.select_sql)
self.assertEqual(
f"CREATE TABLE {tmp_table} AS \n" "SELECT name FROM birth_names LIMIT 1",
query.executed_sql,
)
self.assertEqual(sql_where, query.sql)
self.assertEqual(0, query.rows)
self.assertEqual(1, query.limit)
self.assertEqual(True, query.select_as_cta)
self.assertEqual(True, query.select_as_cta_used)
def test_default_data_serialization(self):
data = [("a", 4, 4.0, datetime.datetime(2019, 8, 18, 16, 39, 16, 660000))]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
cdf = SupersetDataFrame(data, cursor_descr, db_engine_spec)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
data, selected_columns, all_columns, expanded_columns = sql_lab._serialize_and_expand_data(
cdf, db_engine_spec, False, True
)
expand_data.assert_called_once()
self.assertIsInstance(data, list)
def test_new_data_serialization(self):
data = [("a", 4, 4.0, datetime.datetime(2019, 8, 18, 16, 39, 16, 660000))]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
cdf = SupersetDataFrame(data, cursor_descr, db_engine_spec)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
data, selected_columns, all_columns, expanded_columns = sql_lab._serialize_and_expand_data(
cdf, db_engine_spec, True
)
expand_data.assert_not_called()
self.assertIsInstance(data, bytes)
def test_default_payload_serialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, datetime.datetime(2019, 8, 18, 16, 39, 16, 660000))]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
cdf = SupersetDataFrame(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
serialized_data, selected_columns, all_columns, expanded_columns = sql_lab._serialize_and_expand_data(
cdf, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized = sql_lab._serialize_payload(payload, use_new_deserialization)
self.assertIsInstance(serialized, str)
def test_msgpack_payload_serialization(self):
use_new_deserialization = True
data = [("a", 4, 4.0, datetime.datetime(2019, 8, 18, 16, 39, 16, 660000))]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
cdf = SupersetDataFrame(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
serialized_data, selected_columns, all_columns, expanded_columns = sql_lab._serialize_and_expand_data(
cdf, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized = sql_lab._serialize_payload(payload, use_new_deserialization)
self.assertIsInstance(serialized, bytes)
@staticmethod
def de_unicode_dict(d):
def str_if_basestring(o):
if isinstance(o, str):
return str(o)
return o
return {str_if_basestring(k): str_if_basestring(d[k]) for k in d}
@classmethod
def dictify_list_of_dicts(cls, l, k):
return {str(o[k]): cls.de_unicode_dict(o) for o in l}
if __name__ == "__main__":
unittest.main()
|
|
from functools import wraps
from flask import (Flask, render_template, request, redirect, url_for,
flash, make_response, jsonify)
from flask import session as login_session
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import (Base, User, Catalog, Category, Record,
Field, RecordTemplate, FieldTemplate, Option)
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import random, string, httplib2, json, requests
app = Flask(__name__)
APPLICATION_NAME = "Catalogizer"
# client id for google openID
CLIENT_ID = json.loads(open(
'client_secrets.json', 'r').read())['web']['client_id']
engine = create_engine('sqlite:///catalogizer.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def login_required(f):
# Needed for decoraters to update __name__ and __module__
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user_id' not in login_session:
return redirect('/login')
return f(*args, **kwargs)
return decorated_function
@app.route('/login/')
def show_login():
state = ''.join(random.choice(
string.ascii_uppercase + string.digits) for x in xrange(32))
login_session['state'] = state
if 'gplus_id' in login_session:
flash('You are already logged in.', 'error')
return redirect('/')
if 'facebook_id' in login_session:
flash('You are already logged in.', 'error')
return redirect('/')
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['Post'])
def gconnect():
if request.args.get('state') != login_session.get('state'):
response = make_response(json.dumps('Invalid state parameter'), 401)
response.headers['Content-Type'] = 'application/json'
return response
code = request.data
try:
# Upgrade the authorization code into a credentials object that can be
# used to authorize requests.
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps(
'Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
# Verify that the acces token is used for the intendend user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps(
"Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps(
"Token's client ID does not match app's"), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check to see if user is already logged in.
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps(
"Current user is already connected."), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# See if user exists and get ID assigned in database.
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
updateUser(login_session['user_id'],
login_session['picture'],
login_session['username'])
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += (' " style = "width: 300px; height: 300px;'
'border-radius: 150px;-webkit-border-radius: 150px'
';-moz-border-radius: 150px;"> ')
flash("Now logged in as %s" % login_session['username'])
print login_session
return output
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
app_id = json.loads(open(
'fb_client_secrets.json', 'r').read())['web']['app_id']
app_secret = json.loads(open(
'fb_client_secrets.json', 'r').read())['web']['app_secret']
url = ('https://graph.facebook.com/oauth/access_token?'
'grant_type=fb_exchange_token&client_id=%s&client_secret=%s&'
'fb_exchange_token=%s' % (app_id, app_secret, access_token))
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.4/me"
# Strip expire tag from access token
token = result.split("&")[0]
url = userinfo_url + '?%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
# The token must be stored in the login_session in order to properly logout
# strip out the information before the equals sign in the token
stored_token = token.split("=")[1]
login_session['access_token'] = stored_token
# Get user picture
url = ('https://graph.facebook.com/v2.4/me/picture?'
'%s&redirect=0&height=200&width=200' % token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# see if user exists, create user if they don't, and get assigned id
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
updateUser(login_session['user_id'],
login_session['picture'],
login_session['username'])
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += (' " style = "width: 300px; height: 300px;'
'border-radius: 150px;-webkit-border-radius: 150px'
';-moz-border-radius: 150px;"> ')
flash("Now logged in as %s" % login_session['username'])
return output
# DISCONNECT - Revoke a current user's token and reset their login_session.
def gdisconnect():
access_token = login_session['access_token']
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
flash('You have been sucessfully logged out')
return redirect(url_for('viewCatalogs'))
else:
response = make_response(json.dumps(
'Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
def fbdisconnect():
access_token = login_session['access_token']
facebook_id = login_session['facebook_id']
url = 'https://graph.facebook.com/%s/\
permissions?access_token=%s' % (facebook_id, access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
print 'result is '
print result
if 'success' in result:
del login_session['access_token']
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
flash('You have been sucessfully logged out')
return redirect(url_for('viewCatalogs'))
else:
response = make_response(json.dumps(
'Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route("/disconnect/")
def disconnect():
if not login_session:
return redirect('/login')
access_token = login_session.get('access_token')
if access_token is None:
flash('You are not logged in', 'error')
return redirect(url_for('show_login'))
if 'gplus_id' in login_session:
return gdisconnect()
elif 'facebook_id' in login_session:
return fbdisconnect()
else:
return redirect('/login')
@app.route('/')
@app.route('/catalog/')
def viewCatalogs():
# print login_session
catalogs = getCatalogs()
return render_template('viewCatalogs.html',
catalogs=catalogs,
current_user=login_session.get('user_id'))
@app.route('/catalog/new/', methods=['GET', 'POST'])
@login_required
def newCatalog():
if request.method == 'POST':
catalogName = request.form['catalog-name']
newCatalogEntry = Catalog(name=catalogName,
user_id=login_session.get('user_id'))
try:
session.add(newCatalogEntry)
session.commit()
flash('%s successfully created!' % catalogName)
except:
session.rollback()
raise
return redirect(url_for('viewCatalogs'))
else:
return render_template('newCatalog.html')
@app.route('/catalog/<int:catalog_id>/edit/', methods=['GET', 'POST'])
@login_required
def editCatalog(catalog_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can edit this catalog.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
newCatalogName = request.form['new-catalog-name']
catalog.name = newCatalogName
try:
session.commit()
flash('%s successfully edited!' % newCatalogName)
except:
session.rollback()
raise
return redirect(url_for('viewCatalogs'))
else:
return render_template('editCatalog.html', catalog=catalog)
@app.route('/catalog/<int:catalog_id>/delete/', methods=['GET', 'POST'])
@login_required
def deleteCatalog(catalog_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can delete this catalog.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
delCatalog(catalog_id)
return redirect(url_for('viewCatalogs'))
else:
return render_template('deleteCatalog.html', catalog=catalog)
@app.route('/catalog/<int:catalog_id>/category/')
def viewCategories(catalog_id):
catalog = getCatalog(catalog_id)
categories = getCategories(catalog_id)
return render_template('viewCategories.html',
catalog=catalog,
categories=categories,
current_user=login_session.get('user_id'))
@app.route('/catalog/<int:catalog_id>/category/new', methods=['GET', 'POST'])
@login_required
def newCategory(catalog_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can add a category to it.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
categoryName = request.form['category-name']
categoryEntry = Category(name=categoryName, catalog_id=catalog.id)
try:
session.add(categoryEntry)
session.commit()
flash('%s successfully created!' % categoryName)
except:
session.rollback()
raise
return redirect(url_for('viewCategories', catalog_id=catalog.id))
else:
return render_template('newCategory.html', catalog=catalog)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/edit/',
methods=['GET', 'POST'])
@login_required
def editCategory(catalog_id, category_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can edit this category.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
category = getCategory(category_id)
if request.method == 'POST':
newCategoryName = request.form['new-category-name']
category.name = newCategoryName
try:
session.commit()
except:
session.rollback()
raise
flash('%s successfully edited!' % newCategoryName)
return redirect(url_for('viewCategories', catalog_id=catalog_id))
else:
return render_template('editCategory.html',
catalog=catalog,
category=category)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/delete/',
methods=['GET', 'POST'])
@login_required
def deleteCategory(catalog_id, category_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can delete this category.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
delCategory(category_id)
return redirect(url_for('viewCategories', catalog_id=catalog_id))
else:
category = getCategory(category_id)
return render_template('deleteCategory.html',
catalog=catalog,
category=category)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/')
def viewRecords(catalog_id, category_id):
catalog = getCatalog(catalog_id)
category = getCategory(category_id)
records = getRecordsByCategoryId(category_id)
return render_template('viewRecords.html',
catalog=catalog,
category=category,
records=records,
current_user=login_session.get('user_id'))
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/add/')
@login_required
def addRecord(catalog_id, category_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can add a new record to it.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
category = getCategory(category_id)
recordTemplates = getRecordTemplates(category_id)
return render_template('addRecord.html',
catalog=catalog,
category=category,
rTemplates=recordTemplates,
current_user=login_session.get('user_id'))
# Over 80 characters, however URL Routes break if they go to new line.
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/add/<int:record_template_id>/new/', methods=['GET', 'POST'])
@login_required
def newRecord(catalog_id, category_id, record_template_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can add a new record to it.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
recordName = request.form['record-name']
addNewRecord(category_id, record_template_id)
flash('%s successfully created!' % recordName)
return redirect(url_for('viewRecords',
catalog_id=catalog_id,
category_id=category_id))
else:
category = getCategory(category_id)
recordTemplate = getRecordTemplate(record_template_id)
fieldTemplatesWithOptions = getFormattedFieldTemplatesWithOptions(
record_template_id)
return render_template('newRecord.html',
catalog=catalog,
category=category,
rTemplate=recordTemplate,
fTemplates=fieldTemplatesWithOptions)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/<int:record_id>/edit/', methods=['GET', 'POST'])
@login_required
def editRecord(catalog_id, category_id, record_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can edit this record.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
record = getRecord(record_id)
if request.method == 'POST':
newName = request.form['record-name']
record_template_id = record.record_template_id
delRecord(record_id)
addNewRecord(category_id, record_template_id)
flash('%s successfully edited!' % newName)
return redirect(url_for('viewRecords',
catalog_id=catalog_id,
category_id=category_id))
else:
category = getCategory(category_id)
fieldTemplatesWithValues = getFieldTemplatesWithValues(record_id)
return render_template('editRecord.html',
catalog=catalog,
category=category,
record=record,
fTemplates=fieldTemplatesWithValues)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/<int:record_id>/delete/', methods=['GET', 'POST'])
@login_required
def deleteRecord(catalog_id, category_id, record_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can delete this record.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
record = getRecord(record_id)
flash('%s successfully deleted!' % record.name)
delRecord(record_id)
return redirect(url_for('viewRecords',
catalog_id=catalog_id,
category_id=category_id))
else:
category = getCategory(category_id)
record = getRecord(record_id)
return render_template('deleteRecord.html',
catalog=catalog,
category=category,
record=record)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/<int:record_id>/view/')
def showRecord(catalog_id, category_id, record_id):
catalog = getCatalog(catalog_id)
category = getCategory(category_id)
record = getRecord(record_id)
fields = getFormattedFields(record_id)
return render_template('showRecord.html',
catalog=catalog,
category=category,
record=record,
fields=fields)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/add/template/', methods=['GET', 'POST'])
@login_required
def newRecordTemplate(catalog_id, category_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can add a record template for it.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
# werkzeug returns an immutable object. Using .copy() creates a mutable
# copy.
formData = request.form.copy()
# Pop recordTemplateName as it has a different format than the other
# values, and goes in a seperate table.
recordTemplateName = formData.pop('template-name')
recordTemplateEntry = RecordTemplate(name=recordTemplateName,
category_id=category_id)
try:
session.add(recordTemplateEntry)
session.commit()
except:
session.rollback()
raise
# Iterate over form inputs, placing labels and field kind inside the
# FieldTemplate table, and options within the Option table.
for keyValue in formData.lists():
# Each field template has a 'label' and a field 'kind',
# which may or may not have 'options' assigned to it.
# They are grouped together by a group identifier number at
# the beginning of the field name (before the first hyphen).
groupIdentifier = keyValue[0][0:keyValue[0].find("-")]
inputType = keyValue[0][keyValue[0].find("-") + 1:]
# For each of the inputs with the name "field-kind",
# find their label and options using the group identifier and
# add them to their respective tables.
if inputType == "field-kind":
fieldTemplateLabel = formData.get(
groupIdentifier + "-field-label")
fieldTemplateKind = formData.get(
groupIdentifier + "-field-kind")
fieldTemplateOptions = formData.getlist(
groupIdentifier + "-option")
fieldTemplateEntry = FieldTemplate(
label=fieldTemplateLabel,
kind=fieldTemplateKind,
order=groupIdentifier,
record_template_id=recordTemplateEntry.id)
try:
session.add(fieldTemplateEntry)
session.commit()
except:
session.rollback()
raise
while len(fieldTemplateOptions) > 0:
optionEntry = Option(
name=fieldTemplateOptions.pop(0),
field_template_id=fieldTemplateEntry.id)
try:
session.add(optionEntry)
session.commit()
except:
session.rollback()
raise
flash('%s successfully created!' % recordTemplateName)
return redirect(url_for('addRecord',
catalog_id=catalog_id,
category_id=category_id))
else:
category = getCategory(category_id)
return render_template('recordTemplate.html',
catalog=catalog,
category=category)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/add/template/<int:record_template_id>/edit/', methods=['GET', 'POST'])
@login_required
def editRecordTemplate(catalog_id, category_id, record_template_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can edit this record template.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
rTemplate = getRecordTemplate(record_template_id)
if request.method == 'POST':
newRecordTemplateName = request.form['new-rt-name']
rTemplate.name = newRecordTemplateName
try:
session.commit()
flash('%s successfully edited!' % newRecordTemplateName)
except:
session.rollback()
raise
return redirect(url_for('addRecord',
catalog_id=catalog_id,
category_id=category_id))
else:
catalog = getCatalog(catalog_id)
category = getCategory(category_id)
return render_template('editRecordTemplate.html',
catalog=catalog,
category=category,
rTemplate=rTemplate)
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/add/template/<int:record_template_id>/delete/', methods=['GET', 'POST'])
@login_required
def deleteRecordTemplate(catalog_id, category_id, record_template_id):
catalog = getCatalog(catalog_id)
if catalog.user_id != login_session.get('user_id'):
flash('Only the owner of %s can delete this record template.'
% catalog.name, 'error')
return redirect(url_for('viewCatalogs'))
if request.method == 'POST':
delRecord_Template(record_template_id)
return redirect(url_for('addRecord',
catalog_id=catalog_id,
category_id=category_id))
else:
category = getCategory(category_id)
rTemplate = getRecordTemplate(record_template_id)
return render_template('deleteRecordTemplate.html',
catalog=catalog,
category=category,
rTemplate=rTemplate)
# JSON API Endpoints
@app.route('/catalog/json/')
def catalogListJSON():
catalogs = getCatalogs()
return jsonify(Catalogs=[c.serialize for c in catalogs])
@app.route('/catalog/<int:catalog_id>/category/json/')
def categoryListJSON(catalog_id):
categories = getCategories(catalog_id)
return jsonify(Categories=[c.serialize for c in categories])
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/json/')
def recordListJSON(catalog_id, category_id):
records = getRecordsByCategoryId(category_id)
return jsonify(Records=[r.serialize for r in records])
@app.route('/catalog/<int:catalog_id>/category/<int:category_id>/record/<int:record_id>/view/json/')
def fieldListJSON(catalog_id, category_id, record_id):
fields = getFields(record_id)
return jsonify(Fields=[r.serialize for r in fields])
# Helper functions for adding new entries or updating entries
def createUser(login_session):
newUser = User(name=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
try:
session.add(newUser)
session.commit()
except:
session.rollback()
raise
# user = session.query(User).filter_by(email=login_session['email']).one()
# return user.id
return newUser.id
def updateUser(user_id, picture, name):
user = session.query(User).filter_by(id=user_id).one()
change = False
if user.picture != picture:
user.picture = picture
change = True
if user.name != name:
user.name = name
change = True
if change is True:
try:
session.commit()
except:
session.rollback()
raise
def addNewRecord(category_id, record_template_id):
# The request object is a Werkzeug data structure called
# ImmutableMultiDict, which has a copy method that returns a
# mutable Wekzeug MultiDict.
formData = request.form.copy()
# Pop the first item (the record name) for a list on the dict, and remove
# the key from the dict.
recordName = formData.pop('record-name')
newRecordEntry = Record(name=recordName,
record_template_id=record_template_id,
category_id=category_id)
try:
session.add(newRecordEntry)
session.commit()
except:
session.rollback()
raise
# Call lists method on the formData multiDict, to get a list of
# tupples of keys and a list of all values corresponding to each
# unique key.
for keyValues in formData.lists():
fieldTemplateId = int(keyValues[0])
fieldValues = keyValues[1]
for fieldValue in fieldValues:
# After calling session.commit() on the newRecordEntry,
# SQLAlchemy automatically reloads the object from the database,
# allowing access to its assigned primary key.
newFieldEntry = Field(value=fieldValue,
field_template_id=fieldTemplateId,
record_id=newRecordEntry.id)
session.add(newFieldEntry)
try:
session.commit()
except:
session.rollback()
raise
# Helper functions to filter through and get database elements
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
def getCatalogs():
return session.query(Catalog).all()
def getCatalog(catalog_id):
return session.query(Catalog).filter_by(id=catalog_id).one()
def getCategories(catalog_id):
return session.query(Category).filter_by(catalog_id=catalog_id).all()
def getCategory(category_id):
return session.query(Category).filter_by(id=category_id).one()
def getRecord(record_id):
return session.query(Record).filter_by(id=record_id).one()
def getRecordsByCategoryId(category_id):
return session.query(Record).filter_by(category_id=category_id).all()
def getRecordsByRecordTemplateId(record_template_id):
return session.query(Record).filter_by(
record_template_id=record_template_id).all()
def getFields(record_id):
return session.query(Field).filter_by(record_id=record_id).all()
def getRecordTemplate(record_template_id):
return session.query(RecordTemplate).filter_by(id=record_template_id).one()
def getRecordTemplates(category_id):
return session.query(RecordTemplate).filter_by(
category_id=category_id).all()
def getFieldTemplates(record_template_id):
return session.query(FieldTemplate).filter_by(
record_template_id=record_template_id).order_by(
asc(FieldTemplate.order))
def getOptions(field_template_id):
return session.query(Option).filter_by(
field_template_id=field_template_id).order_by(
asc(Option.id))
def getFormattedFields(record_id):
"""Returns field labels and values in the form of an array of
tupples of the field label and an array of the field values.
E.g. [ ( field label, [field value1, field value2] ) ]"""
record = getRecord(record_id)
fieldTemplates = getFieldTemplates(record.record_template_id)
fields = []
for fieldTemplate in fieldTemplates:
fieldLabel = fieldTemplate.label
valueList = session.query(Field).filter_by(
field_template_id=fieldTemplate.id,
record_id=record.id).order_by(
asc(Field.id))
fieldValues = []
for v in valueList:
fieldValues.append(v.value)
fields.append((fieldLabel, fieldValues))
return fields
def getFormattedFieldTemplatesWithOptions(record_template_id):
"""Returns a list of dictionaries containing
field template id, label, kind, and a list of options
for that field template."""
fieldTemplates = getFieldTemplates(record_template_id)
fieldsWithOptions = []
for fieldTemplate in fieldTemplates:
fieldTemplateDict = {
'id': fieldTemplate.id,
'label': fieldTemplate.label,
'kind': fieldTemplate.kind,
'options': []
}
options = getOptions(fieldTemplate.id)
for option in options:
fieldTemplateDict['options'].append((option.name, option.id))
fieldsWithOptions.append(fieldTemplateDict)
return fieldsWithOptions
def getFieldTemplatesWithValues(record_id):
"""Returns a list of dictionaries containing
field template id, label, kind, a list of options for
that field template, and the value(s) for that field.
Field Templates of the kind 'checkbox' may have more
than one of the options selected as s value"""
record = getRecord(record_id)
ftDictList = getFormattedFieldTemplatesWithOptions(
record.record_template_id)
for ftDict in ftDictList:
ftDict['values'] = []
ftId = ftDict['id']
fields = session.query(Field).filter_by(field_template_id=ftId).all()
for field in fields:
value = field.value
ftDict['values'].append(value)
return ftDictList
# Helper functions to delete database items
def delRecord(record_id):
record = getRecord(record_id)
fields = getFields(record_id)
for field in fields:
try:
session.delete(field)
session.commit()
except:
session.rollback()
raise
try:
session.delete(record)
session.commit()
except:
session.rollback()
raise
def delRecord_Template(record_template_id):
recordTemplate = getRecordTemplate(record_template_id)
fieldTemplates = getFieldTemplates(record_template_id)
records = getRecordsByRecordTemplateId(record_template_id)
flash('%s successfully deleted!' % recordTemplate.name)
for record in records:
delRecord(record.id)
for fieldTemplate in fieldTemplates:
options = getOptions(fieldTemplate.id)
for option in options:
try:
session.delete(option)
session.commit()
except:
session.rollback()
raise
try:
session.delete(fieldTemplate)
session.commit()
except:
session.rollback()
raise
try:
session.delete(recordTemplate)
session.commit()
except:
session.rollback()
raise
def delCategory(category_id):
category = getCategory(category_id)
recordTemplates = getRecordTemplates(category_id)
flash('%s successfully deleted!' % category.name)
for recordTemplate in recordTemplates:
delRecord_Template(recordTemplate.id)
try:
session.delete(category)
session.commit()
except:
session.rollback()
raise
def delCatalog(catalog_id):
catalog = getCatalog(catalog_id)
categories = getCategories(catalog_id)
flash('%s successfully deleted!' % catalog.name)
for category in categories:
delCategory(category.id)
try:
session.delete(catalog)
session.commit()
except:
session.rollback()
raise
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for student forms view."""
import os
import tempfile
from tests import profile_utils
from tests import test_utils
from tests.utils import project_utils
class FormPageTest(test_utils.GSoCDjangoTestCase):
"""Test student form page."""
def setUp(self):
self.init()
def testLoneUserAccessForbidden(self):
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testMentorAccessForbidden(self):
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, mentor_for=[self.org.key])
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testOrgAdminAccessForbidden(self):
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
profile_utils.seedNDBProfile(
self.program.key(), user=user, admin_for=[self.org.key])
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testHostAccessForbidden(self):
user = profile_utils.seedNDBUser(host_for=[self.program])
profile_utils.loginNDB(user)
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testStudentAccessForbidden(self):
# access should be forbidden because at this point students are not
# permitted to upload their forms
self.timeline_helper.studentsAnnounced()
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
student = profile_utils.seedSOCStudent(self.program, user=user)
project_utils.seedProject(
student, self.program.key(), org_key=self.org.key)
self._assertAccessForbiddenForUrl(self._getEnrollmentFormUrl())
self._assertAccessForbiddenForUrl(self._getTaxFormUrl())
def testStudentAccessGranted(self):
self.timeline_helper.formSubmission()
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
student = profile_utils.seedSOCStudent(self.program, user=user)
project_utils.seedProject(
student, self.program.key(), org_key=self.org.key)
# check for enrollment form
url = self._getEnrollmentFormUrl()
response = self.get(url)
self.assertResponseOK(response)
self._assertStudentFormsTemplatesUsed(response)
# check for tax form
url = self._getTaxFormUrl()
response = self.get(url)
self.assertResponseOK(response)
self._assertStudentFormsTemplatesUsed(response)
def testEnrollmentFormSubmissionByStudent(self):
"""Tests that enrollment form is submitted properly by a student."""
self.timeline_helper.formSubmission()
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
student = profile_utils.seedSOCStudent(self.program, user=user)
project_utils.seedProject(
student, self.program.key(), org_key=self.org.key)
# check that there is no enrollment form at this stage
self.assertIsNone(student.student_data.enrollment_form)
with tempfile.NamedTemporaryFile() as test_file:
# check for the enrollment form
url = self._getEnrollmentFormUrl()
postdata = {'enrollment_form': test_file}
response = self.post(url, postdata)
self.assertResponseRedirect(
response, self._getEnrollmentFormUrl(validated=True))
# check if the form has been submitted
student = student.key.get()
self.assertIsNotNone(student.student_data.enrollment_form)
def testTaxFormSubmissionByStudent(self):
"""Tests that enrollment form is submitted properly by a student."""
self.timeline_helper.formSubmission()
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
student = profile_utils.seedSOCStudent(self.program, user=user)
project_utils.seedProject(
student, self.program.key(), org_key=self.org.key)
# check that there is no tax form at this stage
self.assertIsNone(student.student_data.tax_form)
with tempfile.NamedTemporaryFile() as test_file:
# check for the enrollment form
url = self._getTaxFormUrl()
postdata = {'tax_form': test_file}
response = self.post(url, postdata)
self.assertResponseRedirect(
response, self._getTaxFormUrl(validated=True))
# check if the form has been submitted
student = student.key.get()
self.assertIsNotNone(student.student_data.tax_form)
def testEnrollmentFormSubmissionByAdmin(self):
"""Tests that enrollment form is submitted properly by an admin."""
self.timeline_helper.formSubmission()
user = profile_utils.seedNDBUser(host_for=[self.program])
profile_utils.loginNDB(user)
student = profile_utils.seedSOCStudent(self.program)
project_utils.seedProject(student, self.program.key(), org_key=self.org.key)
# check that there is no enrollment form at this stage
self.assertIsNone(student.student_data.enrollment_form)
with tempfile.NamedTemporaryFile() as test_file:
url = self._getAdminEnrollmentForm(student)
postdata = {'enrollment_form': test_file}
response = self.post(url, postdata)
self.assertResponseRedirect(
response, self._getAdminEnrollmentForm(student, validated=True))
# check if the form has been submitted
student = student.key.get()
self.assertIsNotNone(student.student_data.enrollment_form)
def testTaxFormSubmissionByAdmin(self):
"""Tests that tax form is submitted properly by an admin."""
self.timeline_helper.formSubmission()
user = profile_utils.seedNDBUser(host_for=[self.program])
profile_utils.loginNDB(user)
student = profile_utils.seedSOCStudent(self.program)
project_utils.seedProject(student, self.program.key(), org_key=self.org.key)
# check that there is no tax form at this stage
self.assertIsNone(student.student_data.tax_form)
with tempfile.NamedTemporaryFile() as test_file:
url = self._getAdminTaxForm(student)
postdata = {'tax_form': test_file}
response = self.post(url, postdata)
self.assertResponseRedirect(
response, self._getAdminTaxForm(student, validated=True))
# check if the form has been submitted
student = student.key.get()
self.assertIsNotNone(student.student_data.tax_form)
def testSubmitAnotherForm(self):
"""Tests that another form may be resubmitted by a student."""
self.timeline_helper.formSubmission()
user = profile_utils.seedNDBUser()
profile_utils.loginNDB(user)
student = profile_utils.seedSOCStudent(self.program, user=user)
project_utils.seedProject(
student, self.program.key(), org_key=self.org.key)
# set initial tax form
blob_key = self.createBlob('initial_tax_form.pdf')
student.student_data.tax_form = blob_key
student.put()
# submit a new tax form
with tempfile.NamedTemporaryFile() as test_file:
# check for the enrollment form
url = self._getTaxFormUrl()
postdata = {'tax_form': test_file}
response = self.post(url, postdata)
self.assertResponseRedirect(
response, self._getTaxFormUrl(validated=True))
# check if the form has been submitted
student = student.key.get()
self.assertIsNotNone(student.student_data.tax_form)
self.assertEqual(os.path.basename(test_file.name),
student.student_data.tax_form)
def _getEnrollmentFormUrl(self, validated=False):
"""Returns URL for the student enrollment form upload."""
url = '/gsoc/student_forms/enrollment/' + self.gsoc.key().name()
return url if not validated else url + '?validated'
def _getTaxFormUrl(self, validated=False):
"""Returns URL for the student tax form upload."""
url = '/gsoc/student_forms/tax/' + self.gsoc.key().name()
return url if not validated else url + '?validated'
def _getAdminEnrollmentForm(self, profile, validated=False):
"""Returns URL for the student enrollment form upload by admin."""
url = '/gsoc/student_forms/admin/enrollment/%s' % profile.key.id()
return url if not validated else url + '?validated'
def _getAdminTaxForm(self, profile, validated=False):
"""Returns URL for the student tax form upload by admin."""
url = '/gsoc/student_forms/admin/tax/%s' % profile.key.id()
return url if not validated else url + '?validated'
def _assertAccessForbiddenForUrl(self, url):
"""Asserts that GET request will return forbidden response
for the specified URL."""
response = self.get(url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def _assertStudentFormsTemplatesUsed(self, response):
"""Asserts that all the templates from the student forms were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response,
'modules/gsoc/student_forms/base.html')
self.assertTemplateUsed(response, 'modules/gsoc/_form.html')
def _createNewMentor(self):
"""Returns a newly created mentor."""
return profile_utils.seedNDBProfile(
self.program.key(), mentor_for=[self.org.key])
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
def embedding_lookup(params, ids, partition_strategy="mod", name=None,
validate_indices=True):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
[`tf.gather()`](../../api_docs/python/array_ops.md#gather), where `params` is
interpreted as a partition of a larger embedding tensor.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A list of tensors with the same type and which can be concatenated
along dimension 0. Each `Tensor` must be appropriately sized for the given
`partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for the operation (optional).
validate_indices: Whether or not to validate gather indices.
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
if not isinstance(params, list):
params = [params]
with ops.op_scope(params + [ids], name, "embedding_lookup") as name:
if not params:
raise ValueError("Need at least one param")
np = len(params) # Number of partitions
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
return array_ops.gather(params[0], ids, name=name,
validate_indices=validate_indices)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape()[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape()[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.pack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(
flat_ids // (ids_per_partition + 1),
(flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(
p_assignments < extras, flat_ids.dtype)
new_ids = (
is_in_first_extras_partitions * (
flat_ids % (ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) * (
(flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(array_ops.gather(
params[p], gather_ids[p],
validate_indices=validate_indices))
# Stitch these back together
ret = data_flow_ops.dynamic_stitch(pindices, partitioned_result,
name=name)
# Reshape to reverse the flattening of ids.
# It's important that we compute params[0].shape on the right device
# to avoid data motion.
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
ret = array_ops.reshape(ret, array_ops.concat(0, [
array_ops.shape(ids), array_ops.slice(params_shape, [1], [-1])]))
# output shape = ids.shape + params[*].shape[1:]
# Normally the reshape is sufficient, but setting shape explicitly
# teaches shape inference that params[1:].get_shape() matters.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
ret.set_shape(ids.get_shape().concatenate(element_shape))
return ret
# TODO(lif): Add support for higher-rank SparseTensors
def embedding_lookup_sparse(params, sp_ids, sp_weights,
partition_strategy="mod",
name=None,
combiner="mean"):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
shape(combined params) = [p0, p1, ..., pm]
and
shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]
then
shape(output) = [d0, d1, ..., dn-1, p1, ..., pm].
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
with combiner="mean", then the output will be a 3x20 matrix where
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = params[0, :] * 1.0
output[2, :] = params[1, :] * 3.0
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, ops.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, ops.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.shape.get_shape().assert_is_compatible_with(
sp_weights.shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.op_scope(params + [sp_ids], name, "embedding_lookup_sparse") as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(0, [
array_ops.shape(weights), ones])
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
else:
assert False, "Unrecognized combiner"
else:
assert idx is not None
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=name)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=name)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx,
segment_ids, name=name)
else:
assert False, "Unrecognized combiner"
return embeddings
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base test cases for all neutron tests.
"""
import contextlib
import gc
import logging as std_logging
import os
import os.path
import random
import weakref
import eventlet.timeout
import fixtures
import mock
from oslo_concurrency.fixture import lockutils
from oslo_config import cfg
from oslo_messaging import conffixture as messaging_conffixture
from oslo_utils import strutils
import six
import testtools
from neutron.agent.linux import external_process
from neutron.api.rpc.callbacks.consumer import registry as rpc_consumer_reg
from neutron.callbacks import manager as registry_manager
from neutron.callbacks import registry
from neutron.common import config
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.db import agentschedulers_db
from neutron import manager
from neutron import policy
from neutron.tests import fake_notifier
from neutron.tests import post_mortem_debug
from neutron.tests import tools
CONF = cfg.CONF
CONF.import_opt('state_path', 'neutron.common.config')
LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
ROOTDIR = os.path.dirname(__file__)
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def fake_use_fatal_exceptions(*args):
return True
def fake_consume_in_threads(self):
return []
def get_rand_name(max_length=None, prefix='test'):
"""Return a random string.
The string will start with 'prefix' and will be exactly 'max_length'.
If 'max_length' is None, then exactly 8 random characters, each
hexadecimal, will be added. In case len(prefix) <= len(max_length),
ValueError will be raised to indicate the problem.
"""
if max_length:
length = max_length - len(prefix)
if length <= 0:
raise ValueError("'max_length' must be bigger than 'len(prefix)'.")
suffix = ''.join(str(random.randint(0, 9)) for i in range(length))
else:
suffix = hex(random.randint(0x10000000, 0x7fffffff))[2:]
return prefix + suffix
def get_rand_device_name(prefix='test'):
return get_rand_name(
max_length=constants.DEVICE_NAME_MAX_LEN, prefix=prefix)
def bool_from_env(key, strict=False, default=False):
value = os.environ.get(key)
return strutils.bool_from_string(value, strict=strict, default=default)
def get_test_timeout(default=0):
return int(os.environ.get('OS_TEST_TIMEOUT', 0))
class AttributeDict(dict):
"""
Provide attribute access (dict.key) to dictionary values.
"""
def __getattr__(self, name):
"""Allow attribute access for all keys in the dict."""
if name in self:
return self[name]
raise AttributeError(_("Unknown attribute '%s'.") % name)
class DietTestCase(testtools.TestCase):
"""Same great taste, less filling.
BaseTestCase is responsible for doing lots of plugin-centric setup
that not all tests require (or can tolerate). This class provides
only functionality that is common across all tests.
"""
def setUp(self):
super(DietTestCase, self).setUp()
# Configure this first to ensure pm debugging support for setUp()
debugger = os.environ.get('OS_POST_MORTEM_DEBUGGER')
if debugger:
self.addOnException(post_mortem_debug.get_exception_handler(
debugger))
# Make sure we see all relevant deprecation warnings when running tests
self.useFixture(tools.WarningsFixture())
if bool_from_env('OS_DEBUG'):
_level = std_logging.DEBUG
else:
_level = std_logging.INFO
capture_logs = bool_from_env('OS_LOG_CAPTURE')
if not capture_logs:
std_logging.basicConfig(format=LOG_FORMAT, level=_level)
self.log_fixture = self.useFixture(
fixtures.FakeLogger(
format=LOG_FORMAT,
level=_level,
nuke_handlers=capture_logs,
))
test_timeout = get_test_timeout()
if test_timeout == -1:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
# If someone does use tempfile directly, ensure that it's cleaned up
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.addCleanup(mock.patch.stopall)
if bool_from_env('OS_STDOUT_CAPTURE'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if bool_from_env('OS_STDERR_CAPTURE'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
self.addOnException(self.check_for_systemexit)
self.orig_pid = os.getpid()
def check_for_systemexit(self, exc_info):
if isinstance(exc_info[1], SystemExit):
if os.getpid() != self.orig_pid:
# Subprocess - let it just exit
raise
# This makes sys.exit(0) still a failure
self.force_failure = True
@contextlib.contextmanager
def assert_max_execution_time(self, max_execution_time=5):
with eventlet.timeout.Timeout(max_execution_time, False):
yield
return
self.fail('Execution of this test timed out')
def assertOrderedEqual(self, expected, actual):
expect_val = self.sort_dict_lists(expected)
actual_val = self.sort_dict_lists(actual)
self.assertEqual(expect_val, actual_val)
def sort_dict_lists(self, dic):
for key, value in six.iteritems(dic):
if isinstance(value, list):
dic[key] = sorted(value)
elif isinstance(value, dict):
dic[key] = self.sort_dict_lists(value)
return dic
def assertDictSupersetOf(self, expected_subset, actual_superset):
"""Checks that actual dict contains the expected dict.
After checking that the arguments are of the right type, this checks
that each item in expected_subset is in, and matches, what is in
actual_superset. Separate tests are done, so that detailed info can
be reported upon failure.
"""
if not isinstance(expected_subset, dict):
self.fail("expected_subset (%s) is not an instance of dict" %
type(expected_subset))
if not isinstance(actual_superset, dict):
self.fail("actual_superset (%s) is not an instance of dict" %
type(actual_superset))
for k, v in expected_subset.items():
self.assertIn(k, actual_superset)
self.assertEqual(v, actual_superset[k],
"Key %(key)s expected: %(exp)r, actual %(act)r" %
{'key': k, 'exp': v, 'act': actual_superset[k]})
class ProcessMonitorFixture(fixtures.Fixture):
"""Test fixture to capture and cleanup any spawn process monitor."""
def _setUp(self):
self.old_callable = (
external_process.ProcessMonitor._spawn_checking_thread)
p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor."
"_spawn_checking_thread",
new=lambda x: self.record_calls(x))
p.start()
self.instances = []
self.addCleanup(self.stop)
def stop(self):
for instance in self.instances:
instance.stop()
def record_calls(self, instance):
self.old_callable(instance)
self.instances.append(instance)
class BaseTestCase(DietTestCase):
@staticmethod
def config_parse(conf=None, args=None):
"""Create the default configurations."""
# neutron.conf includes rpc_backend which needs to be cleaned up
if args is None:
args = []
args += ['--config-file', etcdir('neutron.conf')]
if conf is None:
config.init(args=args)
else:
conf(args)
def setUp(self):
super(BaseTestCase, self).setUp()
# suppress all but errors here
capture_logs = bool_from_env('OS_LOG_CAPTURE')
self.useFixture(
fixtures.FakeLogger(
name='neutron.api.extensions',
format=LOG_FORMAT,
level=std_logging.ERROR,
nuke_handlers=capture_logs,
))
self.useFixture(lockutils.ExternalLockFixture())
cfg.CONF.set_override('state_path', self.get_default_temp_dir().path)
self.addCleanup(CONF.reset)
self.useFixture(ProcessMonitorFixture())
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.exceptions.NeutronException.use_fatal_exceptions',
fake_use_fatal_exceptions))
self.useFixture(fixtures.MonkeyPatch(
'oslo_config.cfg.find_config_files',
lambda project=None, prog=None, extension=None: []))
self.setup_rpc_mocks()
self.setup_config()
self.setup_test_registry_instance()
policy.init()
self.addCleanup(policy.reset)
self.addCleanup(rpc_consumer_reg.clear)
def get_new_temp_dir(self):
"""Create a new temporary directory.
:returns fixtures.TempDir
"""
return self.useFixture(fixtures.TempDir())
def get_default_temp_dir(self):
"""Create a default temporary directory.
Returns the same directory during the whole test case.
:returns fixtures.TempDir
"""
if not hasattr(self, '_temp_dir'):
self._temp_dir = self.get_new_temp_dir()
return self._temp_dir
def get_temp_file_path(self, filename, root=None):
"""Returns an absolute path for a temporary file.
If root is None, the file is created in default temporary directory. It
also creates the directory if it's not initialized yet.
If root is not None, the file is created inside the directory passed as
root= argument.
:param filename: filename
:type filename: string
:param root: temporary directory to create a new file in
:type root: fixtures.TempDir
:returns absolute file path string
"""
root = root or self.get_default_temp_dir()
return root.join(filename)
def setup_rpc_mocks(self):
# don't actually start RPC listeners when testing
self.useFixture(fixtures.MonkeyPatch(
'neutron.common.rpc.Connection.consume_in_threads',
fake_consume_in_threads))
self.useFixture(fixtures.MonkeyPatch(
'oslo_messaging.Notifier', fake_notifier.FakeNotifier))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
# NOTE(russellb) We want all calls to return immediately.
self.messaging_conf.response_timeout = 0
self.useFixture(self.messaging_conf)
self.addCleanup(n_rpc.clear_extra_exmods)
n_rpc.add_extra_exmods('neutron.test')
self.addCleanup(n_rpc.cleanup)
n_rpc.init(CONF)
def setup_test_registry_instance(self):
"""Give a private copy of the registry to each test."""
self._callback_manager = registry_manager.CallbacksManager()
mock.patch.object(registry, '_get_callback_manager',
return_value=self._callback_manager).start()
def setup_config(self, args=None):
"""Tests that need a non-default config can override this method."""
self.config_parse(args=args)
def config(self, **kw):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the fixtures cleanup process.
"""
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
CONF.set_override(k, v, group)
def setup_coreplugin(self, core_plugin=None):
cp = PluginFixture(core_plugin)
self.useFixture(cp)
self.patched_dhcp_periodic = cp.patched_dhcp_periodic
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver)
class PluginFixture(fixtures.Fixture):
def __init__(self, core_plugin=None):
super(PluginFixture, self).__init__()
self.core_plugin = core_plugin
def _setUp(self):
self.dhcp_periodic_p = mock.patch(
'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.'
'start_periodic_dhcp_agent_status_check')
self.patched_dhcp_periodic = self.dhcp_periodic_p.start()
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if self.core_plugin is not None:
cfg.CONF.set_override('core_plugin', self.core_plugin)
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
raise AssertionError(
'The plugin for this test was not deallocated.')
|
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/kennethreitz/requests/issues/3578.
import encodings.idna
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
try:
from .packages import idna
except ImportError:
# tolerate the possibility of downstream repackagers unvendoring `requests`
# For more information, read: packages/__init__.py
import idna
sys.modules['requests.packages.idna'] = idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, collections.Mapping))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
|
|
"""Blueprint models."""
import asyncio
import logging
import pathlib
from typing import Any, Dict, Optional, Union
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.const import CONF_DOMAIN, CONF_NAME, CONF_PATH
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import placeholder
from homeassistant.util import yaml
from .const import (
BLUEPRINT_FOLDER,
CONF_BLUEPRINT,
CONF_INPUT,
CONF_SOURCE_URL,
CONF_USE_BLUEPRINT,
DOMAIN,
)
from .errors import (
BlueprintException,
FailedToLoad,
InvalidBlueprint,
InvalidBlueprintInputs,
MissingPlaceholder,
)
from .schemas import BLUEPRINT_INSTANCE_FIELDS, BLUEPRINT_SCHEMA
class Blueprint:
"""Blueprint of a configuration structure."""
def __init__(
self,
data: dict,
*,
path: Optional[str] = None,
expected_domain: Optional[str] = None,
) -> None:
"""Initialize a blueprint."""
try:
data = self.data = BLUEPRINT_SCHEMA(data)
except vol.Invalid as err:
raise InvalidBlueprint(expected_domain, path, data, err) from err
self.placeholders = placeholder.extract_placeholders(data)
# In future, we will treat this as "incorrect" and allow to recover from this
data_domain = data[CONF_BLUEPRINT][CONF_DOMAIN]
if expected_domain is not None and data_domain != expected_domain:
raise InvalidBlueprint(
expected_domain,
path or self.name,
data,
f"Found incorrect blueprint type {data_domain}, expected {expected_domain}",
)
self.domain = data_domain
missing = self.placeholders - set(data[CONF_BLUEPRINT].get(CONF_INPUT, {}))
if missing:
raise InvalidBlueprint(
data_domain,
path or self.name,
data,
f"Missing input definition for {', '.join(missing)}",
)
@property
def name(self) -> str:
"""Return blueprint name."""
return self.data[CONF_BLUEPRINT][CONF_NAME]
@property
def metadata(self) -> dict:
"""Return blueprint metadata."""
return self.data[CONF_BLUEPRINT]
def update_metadata(self, *, source_url: Optional[str] = None) -> None:
"""Update metadata."""
if source_url is not None:
self.data[CONF_BLUEPRINT][CONF_SOURCE_URL] = source_url
class BlueprintInputs:
"""Inputs for a blueprint."""
def __init__(
self, blueprint: Blueprint, config_with_inputs: Dict[str, Any]
) -> None:
"""Instantiate a blueprint inputs object."""
self.blueprint = blueprint
self.config_with_inputs = config_with_inputs
@property
def inputs(self):
"""Return the inputs."""
return self.config_with_inputs[CONF_USE_BLUEPRINT][CONF_INPUT]
def validate(self) -> None:
"""Validate the inputs."""
missing = self.blueprint.placeholders - set(self.inputs)
if missing:
raise MissingPlaceholder(
self.blueprint.domain, self.blueprint.name, missing
)
# In future we can see if entities are correct domain, areas exist etc
# using the new selector helper.
@callback
def async_substitute(self) -> dict:
"""Get the blueprint value with the inputs substituted."""
processed = placeholder.substitute(self.blueprint.data, self.inputs)
combined = {**self.config_with_inputs, **processed}
# From config_with_inputs
combined.pop(CONF_USE_BLUEPRINT)
# From blueprint
combined.pop(CONF_BLUEPRINT)
return combined
class DomainBlueprints:
"""Blueprints for a specific domain."""
def __init__(
self,
hass: HomeAssistant,
domain: str,
logger: logging.Logger,
) -> None:
"""Initialize a domain blueprints instance."""
self.hass = hass
self.domain = domain
self.logger = logger
self._blueprints = {}
self._load_lock = asyncio.Lock()
hass.data.setdefault(DOMAIN, {})[domain] = self
@callback
def async_reset_cache(self) -> None:
"""Reset the blueprint cache."""
self._blueprints = {}
def _load_blueprint(self, blueprint_path) -> Blueprint:
"""Load a blueprint."""
try:
blueprint_data = yaml.load_yaml(
self.hass.config.path(BLUEPRINT_FOLDER, self.domain, blueprint_path)
)
except (HomeAssistantError, FileNotFoundError) as err:
raise FailedToLoad(self.domain, blueprint_path, err) from err
return Blueprint(
blueprint_data, expected_domain=self.domain, path=blueprint_path
)
def _load_blueprints(self) -> Dict[str, Union[Blueprint, BlueprintException]]:
"""Load all the blueprints."""
blueprint_folder = pathlib.Path(
self.hass.config.path(BLUEPRINT_FOLDER, self.domain)
)
results = {}
for blueprint_path in blueprint_folder.glob("**/*.yaml"):
blueprint_path = str(blueprint_path.relative_to(blueprint_folder))
if self._blueprints.get(blueprint_path) is None:
try:
self._blueprints[blueprint_path] = self._load_blueprint(
blueprint_path
)
except BlueprintException as err:
self._blueprints[blueprint_path] = None
results[blueprint_path] = err
continue
results[blueprint_path] = self._blueprints[blueprint_path]
return results
async def async_get_blueprints(
self,
) -> Dict[str, Union[Blueprint, BlueprintException]]:
"""Get all the blueprints."""
async with self._load_lock:
return await self.hass.async_add_executor_job(self._load_blueprints)
async def async_get_blueprint(self, blueprint_path: str) -> Blueprint:
"""Get a blueprint."""
if blueprint_path in self._blueprints:
return self._blueprints[blueprint_path]
async with self._load_lock:
# Check it again
if blueprint_path in self._blueprints:
return self._blueprints[blueprint_path]
try:
blueprint = await self.hass.async_add_executor_job(
self._load_blueprint, blueprint_path
)
except Exception:
self._blueprints[blueprint_path] = None
raise
self._blueprints[blueprint_path] = blueprint
return blueprint
async def async_inputs_from_config(
self, config_with_blueprint: dict
) -> BlueprintInputs:
"""Process a blueprint config."""
try:
config_with_blueprint = BLUEPRINT_INSTANCE_FIELDS(config_with_blueprint)
except vol.Invalid as err:
raise InvalidBlueprintInputs(
self.domain, humanize_error(config_with_blueprint, err)
) from err
bp_conf = config_with_blueprint[CONF_USE_BLUEPRINT]
blueprint = await self.async_get_blueprint(bp_conf[CONF_PATH])
inputs = BlueprintInputs(blueprint, config_with_blueprint)
inputs.validate()
return inputs
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script is used to test Salt from a Jenkins server, specifically
jenkins.saltstack.com.
This script is intended to be shell-centric!!
'''
# Import python libs
from __future__ import absolute_import, print_function
import glob
import os
import re
import sys
import json
import time
import shutil
import optparse
import subprocess
import random
# Import Salt libs
import salt.utils
try:
from salt.utils.nb_popen import NonBlockingPopen
except ImportError:
# Salt not installed, or nb_popen was not yet shipped with it
SALT_LIB = os.path.abspath(
os.path.dirname(os.path.dirname(__file__))
)
if SALT_LIB not in sys.path:
sys.path.insert(0, SALT_LIB)
try:
# Let's try using the current checked out code
from salt.utils.nb_popen import NonBlockingPopen
except ImportError:
# Still an ImportError??? Let's use some "brute-force"
sys.path.insert(
0,
os.path.join(SALT_LIB, 'salt', 'utils')
)
from nb_popen import NonBlockingPopen
# Import 3rd-party libs
import yaml
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
SALT_GIT_URL = 'https://github.com/saltstack/salt.git'
def build_pillar_data(options):
'''
Build a YAML formatted string to properly pass pillar data
'''
pillar = {'test_transport': options.test_transport,
'cloud_only': options.cloud_only,
'with_coverage': options.test_without_coverage is False}
if options.test_git_commit is not None:
pillar['test_git_commit'] = options.test_git_commit
if options.test_git_url is not None:
pillar['test_git_url'] = options.test_git_url
if options.bootstrap_salt_url is not None:
pillar['bootstrap_salt_url'] = options.bootstrap_salt_url
if options.bootstrap_salt_commit is not None:
pillar['bootstrap_salt_commit'] = options.bootstrap_salt_commit
if options.package_source_dir:
pillar['package_source_dir'] = options.package_source_dir
if options.package_build_dir:
pillar['package_build_dir'] = options.package_build_dir
if options.package_artifact_dir:
pillar['package_artifact_dir'] = options.package_artifact_dir
if options.pillar:
pillar.update(dict(options.pillar))
return yaml.dump(pillar, default_flow_style=True, indent=0, width=sys.maxint).rstrip()
def build_minion_target(options, vm_name):
target = vm_name
for grain in options.grain_target:
target += ' and G@{0}'.format(grain)
if options.grain_target:
return '"{0}"'.format(target)
return target
def generate_vm_name(options):
'''
Generate a random enough vm name
'''
if 'BUILD_NUMBER' in os.environ:
random_part = 'BUILD{0:0>6}'.format(os.environ.get('BUILD_NUMBER'))
else:
random_part = os.urandom(3).encode('hex')
return '{0}-{1}-{2}'.format(options.vm_prefix, options.platform, random_part)
def delete_vm(options):
'''
Stop a VM
'''
cmd = 'salt-cloud -d {0} -y'.format(options.delete_vm)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
def echo_parseable_environment(options, parser):
'''
Echo NAME=VAL parseable output
'''
output = []
if options.platform:
name = generate_vm_name(options)
output.extend([
'JENKINS_SALTCLOUD_VM_PLATFORM={0}'.format(options.platform),
'JENKINS_SALTCLOUD_VM_NAME={0}'.format(name)
])
if options.provider:
output.append(
'JENKINS_SALTCLOUD_VM_PROVIDER={0}'.format(options.provider)
)
if options.pull_request:
# This is a Jenkins triggered Pull Request
# We need some more data about the Pull Request available to the
# environment
if HAS_REQUESTS is False:
parser.error(
'The python \'requests\' library needs to be installed'
)
headers = {}
url = 'https://api.github.com/repos/saltstack/salt/pulls/{0}'.format(options.pull_request)
github_access_token_path = os.path.join(
os.environ.get('JENKINS_HOME', os.path.expanduser('~')),
'.github_token'
)
if os.path.isfile(github_access_token_path):
headers = {
'Authorization': 'token {0}'.format(
open(github_access_token_path).read().strip()
)
}
http_req = requests.get(url, headers=headers)
if http_req.status_code != 200:
parser.error(
'Unable to get the pull request: {0[message]}'.format(http_req.json())
)
pr_details = http_req.json()
output.extend([
'SALT_PR_GIT_URL={0}'.format(pr_details['head']['repo']['clone_url']),
'SALT_PR_GIT_BRANCH={0}'.format(pr_details['head']['ref']),
'SALT_PR_GIT_COMMIT={0}'.format(pr_details['head']['sha']),
'SALT_PR_GIT_BASE_BRANCH={0}'.format(pr_details['base']['ref']),
])
sys.stdout.write('\n\n{0}\n\n'.format('\n'.join(output)))
sys.stdout.flush()
def download_unittest_reports(options):
print('Downloading remote unittest reports...')
sys.stdout.flush()
workspace = options.workspace
xml_reports_path = os.path.join(workspace, 'xml-test-reports')
if os.path.isdir(xml_reports_path):
shutil.rmtree(xml_reports_path)
os.makedirs(xml_reports_path)
cmds = (
'salt {0} archive.tar zcvf /tmp/xml-test-reports.tar.gz \'*.xml\' cwd=/tmp/xml-unittests-output/',
'salt {0} cp.push /tmp/xml-test-reports.tar.gz',
'mv -f /var/cache/salt/master/minions/{1}/files/tmp/xml-test-reports.tar.gz {2} && '
'tar zxvf {2}/xml-test-reports.tar.gz -C {2}/xml-test-reports && '
'rm -f {2}/xml-test-reports.tar.gz'
)
vm_name = options.download_unittest_reports
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def download_coverage_report(options):
print('Downloading remote coverage report...')
sys.stdout.flush()
workspace = options.workspace
vm_name = options.download_coverage_report
if os.path.isfile(os.path.join(workspace, 'coverage.xml')):
os.unlink(os.path.join(workspace, 'coverage.xml'))
cmds = (
'salt {0} archive.gzip /tmp/coverage.xml',
'salt {0} cp.push /tmp/coverage.xml.gz',
'gunzip /var/cache/salt/master/minions/{1}/files/tmp/coverage.xml.gz',
'mv /var/cache/salt/master/minions/{1}/files/tmp/coverage.xml {2}'
)
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def download_remote_logs(options):
print('Downloading remote logs...')
sys.stdout.flush()
workspace = options.workspace
vm_name = options.download_remote_logs
for fname in ('salt-runtests.log', 'minion.log'):
if os.path.isfile(os.path.join(workspace, fname)):
os.unlink(os.path.join(workspace, fname))
if not options.remote_log_path:
options.remote_log_path = [
'/tmp/salt-runtests.log',
'/var/log/salt/minion'
]
cmds = []
for remote_log in options.remote_log_path:
cmds.extend([
'salt {{0}} archive.gzip {0}'.format(remote_log),
'salt {{0}} cp.push {0}.gz'.format(remote_log),
'gunzip /var/cache/salt/master/minions/{{1}}/files{0}.gz'.format(remote_log),
'mv /var/cache/salt/master/minions/{{1}}/files{0} {{2}}/{1}'.format(
remote_log,
'{0}{1}'.format(
os.path.basename(remote_log),
'' if remote_log.endswith('.log') else '.log'
)
)
])
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def download_packages(options):
print('Downloading packages...')
sys.stdout.flush()
workspace = options.workspace
vm_name = options.download_packages
for fglob in ('salt-*.rpm',
'salt-*.deb',
'salt-*.pkg.xz',
'salt-buildpackage.log'):
for fname in glob.glob(os.path.join(workspace, fglob)):
if os.path.isfile(fname):
os.unlink(fname)
cmds = [
('salt {{0}} archive.tar czf {0}.tar.gz sources=\'*.*\' cwd={0}'
.format(options.package_artifact_dir)),
'salt {{0}} cp.push {0}.tar.gz'.format(options.package_artifact_dir),
('tar -C {{2}} -xzf /var/cache/salt/master/minions/{{1}}/files{0}.tar.gz'
.format(options.package_artifact_dir)),
]
for cmd in cmds:
cmd = cmd.format(build_minion_target(options, vm_name), vm_name, workspace)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
if proc.returncode != 0:
print(
'\nFailed to execute command. Exit code: {0}'.format(
proc.returncode
)
)
time.sleep(0.25)
def run(opts):
'''
RUN!
'''
vm_name = os.environ.get(
'JENKINS_SALTCLOUD_VM_NAME',
generate_vm_name(opts)
)
if opts.download_remote_reports:
if opts.test_without_coverage is False:
opts.download_coverage_report = vm_name
opts.download_unittest_reports = vm_name
opts.download_packages = vm_name
if opts.bootstrap_salt_commit is not None:
if opts.bootstrap_salt_url is None:
opts.bootstrap_salt_url = 'https://github.com/saltstack/salt.git'
cmd = (
'salt-cloud -l debug'
' --script-args "-D -g {bootstrap_salt_url} -n git {1}"'
' -p {provider}_{platform} {0}'.format(
vm_name,
os.environ.get(
'SALT_MINION_BOOTSTRAP_RELEASE',
opts.bootstrap_salt_commit
),
**opts.__dict__
)
)
else:
cmd = (
'salt-cloud -l debug'
' --script-args "-D -n git {1}" -p {provider}_{platform} {0}'.format(
vm_name,
os.environ.get(
'SALT_MINION_BOOTSTRAP_RELEASE',
opts.bootstrap_salt_commit
),
**opts.__dict__
)
)
if opts.splay is not None:
# Sleep a random number of seconds
cloud_downtime = random.randint(0, opts.splay)
print('Sleeping random period before calling salt-cloud: {0}'.format(cloud_downtime))
time.sleep(cloud_downtime)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = NonBlockingPopen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stream_stds=True
)
proc.poll_and_read_until_finish(interval=0.5)
proc.communicate()
retcode = proc.returncode
if retcode != 0:
print('Failed to bootstrap VM. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
print('VM Bootstrapped. Exit code: {0}'.format(retcode))
sys.stdout.flush()
# Sleep a random number of seconds
bootstrap_downtime = random.randint(0, opts.splay)
print('Sleeping for {0} seconds to allow the minion to breathe a little'.format(bootstrap_downtime))
sys.stdout.flush()
time.sleep(bootstrap_downtime)
if opts.bootstrap_salt_commit is not None:
# Let's find out if the installed version matches the passed in pillar
# information
print('Grabbing bootstrapped minion version information ... ')
cmd = 'salt -t 100 {0} --out json test.version'.format(build_minion_target(opts, vm_name))
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
retcode = proc.returncode
if retcode != 0:
print('Failed to get the bootstrapped minion version. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
outstr = salt.utils.to_str(stdout).strip()
if not outstr:
print('Failed to get the bootstrapped minion version(no output). Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
try:
version_info = json.loads(outstr)
bootstrap_minion_version = os.environ.get(
'SALT_MINION_BOOTSTRAP_RELEASE',
opts.bootstrap_salt_commit[:7]
)
print('Minion reported salt version: {0}'.format(version_info))
if bootstrap_minion_version not in version_info[vm_name]:
print('\n\nATTENTION!!!!\n')
print('The boostrapped minion version commit does not contain the desired commit:')
print(
' \'{0}\' does not contain \'{1}\''.format(
version_info[vm_name],
bootstrap_minion_version
)
)
print('\n\n')
sys.stdout.flush()
#if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
# delete_vm(opts)
#sys.exit(retcode)
else:
print('matches!')
except ValueError:
print('Failed to load any JSON from \'{0}\''.format(outstr))
if opts.cloud_only:
# Run Cloud Provider tests preparation SLS
cloud_provider_downtime = random.randint(3, opts.splay)
time.sleep(cloud_provider_downtime)
cmd = (
'salt -t 900 {target} state.sls {cloud_prep_sls} pillar="{pillar}" '
'--no-color'.format(
target=build_minion_target(opts, vm_name),
cloud_prep_sls='cloud-only',
pillar=build_pillar_data(opts),
)
)
else:
# Run standard preparation SLS
standard_sls_downtime = random.randint(3, opts.splay)
time.sleep(standard_sls_downtime)
cmd = (
'salt -t 1800 {target} state.sls {prep_sls} pillar="{pillar}" '
'--no-color'.format(
target=build_minion_target(opts, vm_name),
prep_sls=opts.prep_sls,
pillar=build_pillar_data(opts),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
if stdout:
print(salt.utils.to_str(stdout))
if stderr:
print(salt.utils.to_str(stderr))
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to execute the preparation SLS file. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if opts.cloud_only:
cloud_provider_pillar = random.randint(3, opts.splay)
time.sleep(cloud_provider_pillar)
# Run Cloud Provider tests pillar preparation SLS
cmd = (
'salt -t 600 {target} state.sls {cloud_prep_sls} pillar="{pillar}" '
'--no-color'.format(
target=build_minion_target(opts, vm_name),
cloud_prep_sls='cloud-test-configs',
pillar=build_pillar_data(opts),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate()
if stdout:
# DO NOT print the state return here!
print('Cloud configuration files provisioned via pillar.')
if stderr:
print(salt.utils.to_str(stderr))
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to execute the preparation SLS file. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if opts.prep_sls_2 is not None:
sls_2_downtime = random.randint(3, opts.splay)
time.sleep(sls_2_downtime)
# Run the 2nd preparation SLS
cmd = (
'salt -t 30 {target} state.sls {prep_sls_2} pillar="{pillar}" '
'--no-color'.format(
prep_sls_2=opts.prep_sls_2,
pillar=build_pillar_data(opts),
target=build_minion_target(opts, vm_name),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate()
if stdout:
print(salt.utils.to_str(stdout))
if stderr:
print(salt.utils.to_str(stderr))
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to execute the 2nd preparation SLS file. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
# Run remote checks
if opts.test_git_url is not None:
test_git_downtime = random.randint(1, opts.splay)
time.sleep(test_git_downtime)
# Let's find out if the cloned repository if checked out from the
# desired repository
print('Grabbing the cloned repository remotes information ... ')
cmd = 'salt -t 100 {0} --out json git.remote_get /testing'.format(build_minion_target(opts, vm_name))
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
retcode = proc.returncode
if retcode != 0:
print('Failed to get the cloned repository remote. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if not stdout:
print('Failed to get the cloned repository remote(no output). Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
try:
remotes_info = json.loads(stdout.strip())
if remotes_info is None or remotes_info[vm_name] is None or opts.test_git_url not in remotes_info[vm_name]:
print('The cloned repository remote is not the desired one:')
print(' \'{0}\' is not in {1}'.format(opts.test_git_url, remotes_info))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
print('matches!')
except ValueError:
print('Failed to load any JSON from \'{0}\''.format(salt.utils.to_str(stdout).strip()))
if opts.test_git_commit is not None:
test_git_commit_downtime = random.randint(1, opts.splay)
time.sleep(test_git_commit_downtime)
# Let's find out if the cloned repository is checked out at the desired
# commit
print('Grabbing the cloned repository commit information ... ')
cmd = 'salt -t 100 {0} --out json git.revision /testing'.format(build_minion_target(opts, vm_name))
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
sys.stdout.flush()
retcode = proc.returncode
if retcode != 0:
print('Failed to get the cloned repository revision. Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
if not stdout:
print('Failed to get the cloned repository revision(no output). Exit code: {0}'.format(retcode))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
try:
revision_info = json.loads(stdout.strip())
if revision_info[vm_name][7:] != opts.test_git_commit[7:]:
print('The cloned repository commit is not the desired one:')
print(' \'{0}\' != \'{1}\''.format(revision_info[vm_name][:7], opts.test_git_commit[:7]))
sys.stdout.flush()
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
sys.exit(retcode)
print('matches!')
except ValueError:
print('Failed to load any JSON from \'{0}\''.format(salt.utils.to_str(stdout).strip()))
# Run tests here
test_begin_downtime = random.randint(3, opts.splay)
time.sleep(test_begin_downtime)
cmd = (
'salt -t 1800 {target} state.sls {sls} pillar="{pillar}" --no-color'.format(
sls=opts.sls,
pillar=build_pillar_data(opts),
target=build_minion_target(opts, vm_name),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
outstr = salt.utils.to_str(stdout)
if outstr:
print(outstr)
if stderr:
print(salt.utils.to_str(stderr))
sys.stdout.flush()
try:
match = re.search(r'Test Suite Exit Code: (?P<exitcode>[\d]+)', outstr)
retcode = int(match.group('exitcode'))
except AttributeError:
# No regex matching
retcode = 1
except ValueError:
# Not a number!?
retcode = 1
except TypeError:
# No output!?
retcode = 1
if outstr:
# Anything else, raise the exception
raise
if retcode == 0:
# Build packages
time.sleep(3)
cmd = (
'salt -t 1800 {target} state.sls buildpackage pillar="{pillar}" --no-color'.format(
pillar=build_pillar_data(opts),
target=build_minion_target(opts, vm_name),
)
)
print('Running CMD: {0}'.format(cmd))
sys.stdout.flush()
proc = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = proc.communicate()
if stdout:
print(salt.utils.to_str(stdout))
if stderr:
print(salt.utils.to_str(stderr))
sys.stdout.flush()
# Grab packages and log file (or just log file if build failed)
download_packages(opts)
if opts.download_remote_reports:
# Download unittest reports
download_unittest_reports(opts)
# Download coverage report
if opts.test_without_coverage is False:
download_coverage_report(opts)
if opts.clean and 'JENKINS_SALTCLOUD_VM_NAME' not in os.environ:
delete_vm(opts)
return retcode
def parse():
'''
Parse the CLI options
'''
parser = optparse.OptionParser()
parser.add_option(
'--vm-prefix',
default=os.environ.get('JENKINS_VM_NAME_PREFIX', 'ZJENKINS'),
help='The bootstrapped machine name prefix'
)
parser.add_option(
'-w', '--workspace',
default=os.path.abspath(
os.environ.get(
'WORKSPACE',
os.path.dirname(os.path.dirname(__file__))
)
),
help='Path the execution workspace'
)
parser.add_option(
'--platform',
default=os.environ.get('JENKINS_SALTCLOUD_VM_PLATFORM', None),
help='The target platform, choose from:\ncent6\ncent5\nubuntu12.04')
parser.add_option(
'--provider',
default=os.environ.get('JENKINS_SALTCLOUD_VM_PROVIDER', None),
help='The vm provider')
parser.add_option(
'--bootstrap-salt-url',
default=None,
help='The salt git repository url used to boostrap a minion')
parser.add_option(
'--bootstrap-salt-commit',
default=None,
help='The salt git commit used to boostrap a minion')
parser.add_option(
'--test-git-url',
default=None,
help='The testing git repository url')
parser.add_option(
'--test-git-commit',
default=None,
help='The testing git commit to track')
parser.add_option(
'--test-transport',
default='zeromq',
choices=('zeromq', 'raet', 'tcp'),
help=('Select which transport to run the integration tests with, '
'zeromq, raet, or tcp. Default: %default')
)
parser.add_option(
'--test-without-coverage',
default=False,
action='store_true',
help='Do not generate coverage reports'
)
parser.add_option(
'--prep-sls',
default='git.salt',
help='The sls file to execute to prepare the system')
parser.add_option(
'--prep-sls-2',
default=None,
help='An optional 2nd system preparation SLS')
parser.add_option(
'--sls',
default='testrun-no-deps',
help='The final sls file to execute')
parser.add_option(
'--pillar',
action='append',
nargs=2,
help='Pillar (key, value)s to pass to the sls file. '
'Example: \'--pillar pillar_key pillar_value\'')
parser.add_option(
'--no-clean',
dest='clean',
default=True,
action='store_false',
help='Clean up the built vm')
parser.add_option(
'--echo-parseable-environment',
default=False,
action='store_true',
help='Print a parseable KEY=VAL output'
)
parser.add_option(
'--pull-request',
type=int,
help='Include the PR info only'
)
parser.add_option(
'--delete-vm',
default=None,
help='Delete a running VM'
)
parser.add_option(
'--download-remote-reports',
default=False,
action='store_true',
help='Download remote reports when running remote \'testrun\' state'
)
parser.add_option(
'--download-unittest-reports',
default=None,
help='Download the XML unittest results'
)
parser.add_option(
'--download-coverage-report',
default=None,
help='Download the XML coverage reports'
)
parser.add_option(
'--remote-log-path',
action='append',
default=[],
help='Provide additional log paths to download from remote minion'
)
parser.add_option(
'--download-remote-logs',
default=None,
help='Download remote minion and runtests log files'
)
parser.add_option(
'--grain-target',
action='append',
default=[],
help='Match minions using compound matchers, the minion ID, plus the passed grain.'
)
parser.add_option(
'--cloud-only',
default=False,
action='store_true',
help='Run the cloud provider tests only.'
)
parser.add_option(
'--build-packages',
default=True,
action='store_true',
help='Run buildpackage.py to create packages off of the git build.'
)
# These next three options are ignored if --build-packages is False
parser.add_option(
'--package-source-dir',
default='/testing',
help='Directory where the salt source code checkout is found '
'(default: %default)',
)
parser.add_option(
'--package-build-dir',
default='/tmp/salt-buildpackage',
help='Build root for automated package builds (default: %default)',
)
parser.add_option(
'--package-artifact-dir',
default='/tmp/salt-packages',
help='Location on the minion from which packages should be '
'retrieved (default: %default)',
)
parser.add_option(
'--splay',
default='10',
help='The number of seconds across which calls to provisioning components should be made'
)
options, args = parser.parse_args()
if options.delete_vm is not None and not options.test_git_commit:
delete_vm(options)
parser.exit(0)
if options.download_unittest_reports is not None and not options.test_git_commit:
download_unittest_reports(options)
parser.exit(0)
if options.test_without_coverage is False:
if options.download_coverage_report is not None and not options.test_git_commit:
download_coverage_report(options)
parser.exit(0)
if options.download_remote_logs is not None and not options.test_git_commit:
download_remote_logs(options)
parser.exit(0)
if not options.platform and not options.pull_request:
parser.exit('--platform or --pull-request is required')
if not options.provider and not options.pull_request:
parser.exit('--provider or --pull-request is required')
if options.echo_parseable_environment:
echo_parseable_environment(options, parser)
parser.exit(0)
if not options.test_git_commit and not options.pull_request:
parser.exit('--commit or --pull-request is required')
return options
if __name__ == '__main__':
exit_code = run(parse())
print('Exit Code: {0}'.format(exit_code))
sys.exit(exit_code)
|
|
__author__ = 'croxis'
from io import BytesIO
import random
import re
from PIL import Image, ImageDraw, ImageFont
import requests
import lib.transforms as transforms
import lib.utils as utils
from lib.manalib import Manatext
from . import magic_image
from . import img_manager
try:
import textwrap
import nltk.data
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# This crazy thing is actually invoked as an unpass, so newlines are still
# encoded.
def sentencecase(s):
s = s.replace(utils.x_marker, utils.reserved_marker)
lines = s.split(utils.newline)
clines = []
for line in lines:
if line:
sentences = sent_tokenizer.tokenize(line)
clines += [' '.join([sent.capitalize() for sent in sentences])]
return utils.newline.join(clines).replace(utils.reserved_marker,
utils.x_marker)
except ImportError:
def sentencecase(s):
return s.capitalize()
def get_fonts():
return dict(
font_title=ImageFont.truetype("fonts/beleren-bold_P1.01.ttf", size=18),
font_type=ImageFont.truetype("fonts/beleren-bold_P1.01.ttf", size=16),
font=ImageFont.truetype("fonts/mplantin.ttf", size=18))
def draw_costs(image, draw, fonts, card):
cost = get_cost(card)
w, h = img_manager.get_icon('white').size
x_offset = 0
for x in range(0, cost['white']):
image.paste(img_manager.get_icon('white'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('white'))
x_offset += 23
for x in range(0, cost['blue']):
image.paste(img_manager.get_icon('blue'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
for x in range(0, cost['black']):
image.paste(img_manager.get_icon('black'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
for x in range(0, cost['green']):
image.paste(img_manager.get_icon('green'),
(321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
for x in range(0, cost['red']):
image.paste(img_manager.get_icon('red'), (321 - x_offset, 42 - h // 2),
img_manager.get_icon('blue'))
x_offset += 23
if cost['colorless']:
colorless_mana = img_manager.get_icon('colorless')
draw_colorless = ImageDraw.Draw(colorless_mana)
w, h = draw_colorless.textsize(str(cost['colorless']))
W, H = colorless_mana.size
draw_colorless.text(((W - w) // 2 - 2, (H - h) // 2 - 5),
str(cost['colorless']),
fill=(0, 0, 0, 255),
font=fonts['font_title'])
image.paste(colorless_mana,
(321 - x_offset, 36 - h // 2),
colorless_mana)
colorless_mana.close()
def draw_title(image, draw, fonts, card):
w, h = draw.textsize(card.name.title())
draw.text((35, 38 - h // 2),
# card.name.format(gatherer=True),
card.name.title(),
fill=(0, 0, 0, 255),
font=fonts['font_title'])
def draw_types(image, draw, fonts, card):
typeline = ""
if card.supertypes:
typeline += ' '.join(card.supertypes).title() + ' '
typeline += ' '.join(card.types).title()
if card.subtypes:
typeline += ' - ' + ' '.join(card.subtypes).title()
w, h = draw.textsize(typeline)
draw.text((35, 304 - h // 2),
typeline,
fill=(0, 0, 0, 255),
font=fonts['font_type'])
def get_card_text(card):
# Card texts
# card_text = card.text.format()
mtext = card.text.text
mtext = transforms.text_unpass_1_choice(mtext, delimit=True)
mtext = transforms.text_unpass_2_counters(mtext)
mtext = transforms.text_unpass_3_uncast(mtext)
mtext = transforms.text_unpass_4_unary(mtext)
mtext = transforms.text_unpass_5_symbols(mtext, for_forum=False)
mtext = sentencecase(mtext)
# We will do step 5 ourselves to keep capitalization
mtext = transforms.text_unpass_6_cardname(mtext, card.name.title())
mtext = transforms.text_unpass_7_newlines(mtext)
mtext = transforms.text_unpass_8_unicode(mtext)
new_text = Manatext('')
new_text.text = mtext
new_text.costs = card.text.costs
card_text = new_text.format()
return card_text
def draw_card_text(image, draw, fonts, card):
lines = textwrap.wrap(get_card_text(card), 37, replace_whitespace=False)
y_offset = 0
for line in lines:
for sub_line in line.split('\n'):
x_offset = 0
rg = re.compile('(\\{.*?\\})', re.IGNORECASE | re.DOTALL)
for subsub_line in rg.split(sub_line):
if subsub_line:
x = 36 + x_offset
y = 335 + y_offset - 3
if rg.match(subsub_line):
if '{w}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('white'),
(x, y),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{b}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('black'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{u}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('blue'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{r}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('red'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{g}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('green'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('blue'))
x_offset += 21
elif '{t}' in subsub_line.lower():
image.paste(img_manager.get_icon_text('tap'),
(36 + x_offset, 335 + y_offset - 3),
img_manager.get_icon_text('tap'))
x_offset += 21
else:
try:
int(subsub_line[1])
colorless_mana = img_manager.get_icon_text(
'colorless')
draw_colorless = ImageDraw.Draw(colorless_mana)
w, h = draw_colorless.textsize(
str(subsub_line[1]))
draw_colorless.text(
((18 - w) // 2 - 2, (18 - h) // 2 - 4),
str(subsub_line[1]),
fill=(0, 0, 0, 255),
font=fonts['font_title'])
image.paste(colorless_mana, (x, y),
colorless_mana)
colorless_mana.close()
x_offset += 21
except:
pass
else:
draw.text((35 + x_offset, 335 + y_offset),
subsub_line,
fill=(0, 0, 0, 255),
font=fonts['font'])
x_offset += fonts['font'].getsize(subsub_line)[0]
y_offset += 19
def draw_card_copywrite(image, draw, fonts, card):
draw.text((60, 484), "Copy, right?", fill=(0, 0, 0, 255),
font=fonts['font'])
def draw_power_toughness(image, draw, fonts, card):
if not card.pt:
return
power = str(card.pt_p.count('^'))
toughness = str(card.pt_t.count('^'))
c = card.cost.colors
if len(c) == '':
c = 'a'
if len(c) > 1:
c = 'm'
c = c.lower()
if not c:
c = 'a'
pt_image = Image.open('app/card_parts/magic-new.mse-style/' +
c +
'pt.jpg')
image.paste(pt_image, (271, 461))
draw.text((295, 470), power + " / " + toughness, fill=(0, 0, 0, 255),
font=fonts['font_title'])
def draw_rarity(image, draw, fonts, card):
pass
def create_card_img(card, google):
background_color = get_background_color(card)
image = img_manager.get_background(background_color)
fonts = get_fonts()
draw = ImageDraw.Draw(image)
draw_costs(image, draw, fonts, card)
draw_title(image, draw, fonts, card)
draw_types(image, draw, fonts, card)
draw_card_text(image, draw, fonts, card)
draw_card_copywrite(image, draw, fonts, card)
draw_power_toughness(image, draw, fonts, card)
draw_rarity(image, draw, fonts, card)
art, w, h = get_card_art(card, google)
draw_card_art(image, draw, fonts, card, art, w, h)
return image
def draw_card_art(image, draw, fonts, card, art, w, h):
image.paste(art, ((image.size[0] - w) // 2, 175 - h // 2))
def get_cost(card):
cost = {}
cost['colorless'] = 0
cost['white'] = card.cost.format().lower().count('w')
cost['blue'] = card.cost.format().lower().count('u')
cost['black'] = card.cost.format().lower().count('b')
cost['red'] = card.cost.format().lower().count('r')
cost['green'] = card.cost.format().lower().count('g')
rg = re.compile('(\\d+)', re.IGNORECASE | re.DOTALL)
m = rg.search(card.cost.format())
if m:
cost['colorless'] = int(m.group(1))
return cost
def get_background_color(card):
colors = card.cost.get_colors()
if colors == "":
return 'artifact'
if len(colors) > 1:
return 'multicolor'
if colors == "W":
return 'white'
if colors == "U":
return 'blue'
if colors == "B":
return 'black'
if colors == 'R':
return 'red'
if colors == 'G':
return 'green'
return None
def get_card_art(card, google):
if google:
google_result = google_card_art(card)
if google_result != None:
return google_result
return get_default_card_art(card)
def get_default_card_art(card):
art = img_manager.default_portrait
art = art.crop((0, 0, 311, 228))
w, h = art.size
return (art, w, h)
def google_card_art(card):
terms = magic_image.find_search_terms(card)
random.shuffle(terms)
img_url = None
for term in terms[:5]:
color = term[-1]
query = "+".join(term[:-1])
if color == 'u':
color = 'blue'
img_url = magic_image.fetch(query + '+"fantasy"+paintings+-card',
color)
if img_url:
break
if img_url:
with BytesIO(requests.get(img_url).content) as reader:
reader.seek(0)
try:
art = Image.open(reader)
art.thumbnail((311, 311))
art = art.crop((0, 0, 311, 229))
w, h = art.size
return (art, w, h)
except OSError:
print("Unable to handle this kind of image.")
return None
|
|
"""
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.datasets.base import _pkl_filepath
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = DIRECTORY_URL + "samples.zip"
COVERAGES_URL = DIRECTORY_URL + "coverages.zip"
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = int(header[b'NODATA_value'])
if nodata != -9999:
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
|
|
from __future__ import print_function
import warnings
from six.moves import range
import numpy as np
from time import sleep
from landlab import ModelParameterDictionary, CLOSED_BOUNDARY, Component
from landlab.core.model_parameter_dictionary import MissingKeyError
from landlab.field.scalar_data_fields import FieldError
from landlab.grid.base import BAD_INDEX_VALUE
from landlab.utils.decorators import make_return_array_immutable
class SedDepEroder(Component):
"""
This module implements sediment flux dependent channel incision
following::
E = f(Qs, Qc) * ([a stream power-like term] - [an optional threshold]),
where E is the bed erosion rate, Qs is the volumetric sediment flux
into a node, and Qc is the volumetric sediment transport capacity at
that node.
This component is under active research and development; proceed with its
use at your own risk.
The details of the implementation are a function of the two key
arguments, *sed_dependency_type* and *Qc*. The former controls the
shape of the sediment dependent response function f(Qs, Qc), the
latter controls the way in which sediment transport capacities are
calculated (primarily, whether a full Meyer-Peter Muller approach is
used, or whether simpler stream-power-like equations can be assumed).
For Qc, 'power_law' broadly follows the assumptions in Gasparini et
al. 2006, 2007; 'MPM' broadly follows those in Hobley et al., 2011.
Note that a convex-up channel can result in many cases assuming MPM,
unless parameters b and c are carefully tuned.
If ``Qc == 'power_law'``::
E = K_sp * f(Qs, Qc) * A ** m_sp * S ** n_sp;
Qc = K_t * A ** m_t * S ** n_t
If ``Qc == 'MPM'``::
shear_stress = fluid_density * g * depth * S
= fluid_density * g * (mannings_n/k_w) ** 0.6 * (
k_Q* A ** c_sp) ** (0.6 * (1. - b_sp)) * S ** 0.7,
for consistency with MPM
E = K_sp * f(Qs, Qc) * (shear_stress ** a_sp - [threshold_sp])
Qc = 8 * C_MPM * sqrt((sed_density-fluid_density)/fluid_density *
g * D_char**3) * (shields_stress - threshold_shields)**1.5
shields_stress = shear_stress / (g * (sed_density-fluid_density) *
D_char)
If you choose Qc='MPM', you may provide thresholds for both channel
incision and shields number, or alternatively set either or both of
these threshold dynamically. The minimum shear stress can be made
equivalent to the Shields number using *set_threshold_from_Dchar*,
for full consistency with the MPM approach (i.e., the threshold
becomes a function of the characteristic grain size on the bed). The
Shields threshold itself can also be a weak function of slope if
*slope_sensitive_threshold*, following Lamb et al. 2008,
taustar_c = 0.15 * S ** 0.25.
The component is able to handle flooded nodes, if created by a lake
filler. It assumes the flow paths found in the fields already reflect
any lake routing operations, and then requires the optional argument
*flooded_depths* be passed to the run method. A flooded depression
acts as a perfect sediment trap, and will be filled sequentially
from the inflow points towards the outflow points.
Construction::
SedDepEroder(grid, K_sp=1.e-6, g=9.81, rock_density=2700,
sediment_density=2700, fluid_density=1000,
runoff_rate=1.,
sed_dependency_type='generalized_humped',
kappa_hump=13.683, nu_hump=1.13, phi_hump=4.24,
c_hump=0.00181, Qc='power_law', m_sp=0.5, n_sp=1.,
K_t=1.e-4, m_t=1.5, n_t=1., C_MPM=1., a_sp=1.,
b_sp=0.5, c_sp=1., k_w=2.5, k_Q=2.5e-7,
mannings_n=0.05, threshold_shear_stress=None,
Dchar=0.05, set_threshold_from_Dchar=True,
set_Dchar_from_threshold=False,
threshold_Shields=0.05,
slope_sensitive_threshold=False,
pseudoimplicit_repeats=5,
return_stream_properties=False)
Parameters
----------
grid : a ModelGrid
A grid.
K_sp : float (time unit must be *years*)
K in the stream power equation; the prefactor on the erosion
equation (units vary with other parameters).
g : float (m/s**2)
Acceleration due to gravity.
rock_density : float (Kg m**-3)
Bulk intact rock density.
sediment_density : float (Kg m**-3)
Typical density of loose sediment on the bed.
fluid_density : float (Kg m**-3)
Density of the fluid.
runoff_rate : float, array or field name (m/s)
The rate of excess overland flow production at each node (i.e.,
rainfall rate less infiltration).
pseudoimplicit_repeats : int
Number of loops to perform with the pseudoimplicit iterator,
seeking a stable solution. Convergence is typically rapid.
return_stream_properties : bool
Whether to perform a few additional calculations in order to set
the additional optional output fields, 'channel__width',
'channel__depth', and 'channel__discharge' (default False).
sed_dependency_type : {'generalized_humped', 'None', 'linear_decline',
'almost_parabolic'}
The shape of the sediment flux function. For definitions, see
Hobley et al., 2011. 'None' gives a constant value of 1.
NB: 'parabolic' is currently not supported, due to numerical
stability issues at channel heads.
Qc : {'power_law', 'MPM'}
Whether to use simple stream-power-like equations for both
sediment transport capacity and erosion rate, or more complex
forms based directly on the Meyer-Peter Muller equation and a
shear stress based erosion model consistent with MPM (per
Hobley et al., 2011).
If ``sed_dependency_type == 'generalized_humped'``...
kappa_hump : float
Shape parameter for sediment flux function. Primarily controls
function amplitude (i.e., scales the function to a maximum of 1).
Default follows Leh valley values from Hobley et al., 2011.
nu_hump : float
Shape parameter for sediment flux function. Primarily controls
rate of rise of the "tools" limb. Default follows Leh valley
values from Hobley et al., 2011.
phi_hump : float
Shape parameter for sediment flux function. Primarily controls
rate of fall of the "cover" limb. Default follows Leh valley
values from Hobley et al., 2011.
c_hump : float
Shape parameter for sediment flux function. Primarily controls
degree of function asymmetry. Default follows Leh valley values
from Hobley et al., 2011.
If ``Qc == 'power_law'``...
m_sp : float
Power on drainage area in the erosion equation.
n_sp : float
Power on slope in the erosion equation.
K_t : float (time unit must be in *years*)
Prefactor in the transport capacity equation.
m_t : float
Power on drainage area in the transport capacity equation.
n_t : float
Power on slope in the transport capacity equation.
if ``Qc == 'MPM'``...
C_MPM : float
A prefactor on the MPM relation, allowing tuning to known sediment
saturation conditions (leave as 1. in most cases).
a_sp : float
Power on shear stress to give erosion rate.
b_sp : float
Power on drainage area to give channel width.
c_sp : float
Power on drainage area to give discharge.
k_w : float (unit variable with b_sp)
Prefactor on A**b_sp to give channel width.
k_Q : float (unit variable with c_sp, but time unit in *seconds*)
Prefactor on A**c_sp to give discharge.
mannings_n : float
Manning's n for the channel.
threshold_shear_stress : None or float (Pa)
The threshold shear stress in the equation for erosion rate. If
None, implies that *set_threshold_from_Dchar* is True, and this
parameter will get set from the Dchar value and critical Shields
number.
Dchar :None, float, array, or field name (m)
The characteristic grain size on the bed, that controls the
relationship between critical Shields number and critical shear
stress. If None, implies that *set_Dchar_from_threshold* is True,
and this parameter will get set from the threshold_shear_stress
value and critical Shields number.
set_threshold_from_Dchar : bool
If True (default), threshold_shear_stress will be set based on
Dchar and threshold_Shields.
set_Dchar_from_threshold : bool
If True, Dchar will be set based on threshold_shear_stress and
threshold_Shields. Default is False.
threshold_Shields : None or float
The threshold Shields number. If None, implies that
*slope_sensitive_threshold* is True.
slope_sensitive_threshold : bool
If True, the threshold_Shields will be set according to
0.15 * S ** 0.25, per Lamb et al., 2008 & Hobley et al., 2011.
"""
_name = 'SedDepEroder'
_input_var_names = (
'topographic__elevation',
'drainage_area',
'flow__receiver_node',
'flow__upstream_node_order',
'topographic__steepest_slope',
'flow__link_to_receiver_node'
)
_output_var_names = (
'topographic__elevation',
'channel__bed_shear_stress',
'channel_sediment__volumetric_transport_capacity',
'channel_sediment__volumetric_flux',
'channel_sediment__relative_flux',
'channel__discharge',
'channel__width', # optional
'channel__depth', # optional
)
_optional_var_names = (
'channel__width',
'channel__depth'
)
_var_units = {'topographic__elevation': 'm',
'drainage_area': 'm**2',
'flow__receiver_node': '-',
'topographic__steepest_slope': '-',
'flow__upstream_node_order': '-',
'flow__link_to_receiver_node': '-',
'channel__bed_shear_stress': 'Pa',
'channel_sediment__volumetric_transport_capacity': 'm**3/s',
'channel_sediment__volumetric_flux': 'm**3/s',
'channel_sediment__relative_flux': '-',
'channel__discharge': 'm**3/s',
'channel__width': 'm',
'channel__depth': 'm'
}
_var_mapping = {'topographic__elevation': 'node',
'drainage_area': 'node',
'flow__receiver_node': 'node',
'topographic__steepest_slope': 'node',
'flow__upstream_node_order': 'node',
'flow__link_to_receiver_node': 'node',
'channel__bed_shear_stress': 'node',
'channel_sediment__volumetric_transport_capacity': 'node',
'channel_sediment__volumetric_flux': 'node',
'channel_sediment__relative_flux': 'node',
'channel__discharge': 'node',
'channel__width': 'node',
'channel__depth': 'node'
}
_var_type = {'topographic__elevation': float,
'drainage_area': float,
'flow__receiver_node': int,
'topographic__steepest_slope': float,
'flow__upstream_node_order': int,
'flow__link_to_receiver_node': int,
'channel__bed_shear_stress': float,
'channel_sediment__volumetric_transport_capacity': float,
'channel_sediment__volumetric_flux': float,
'channel_sediment__relative_flux': float,
'channel__discharge': float,
'channel__width': float,
'channel__depth': float
}
_var_doc = {
'topographic__elevation': 'Land surface topographic elevation',
'drainage_area':
("Upstream accumulated surface area contributing to the node's " +
"discharge"),
'flow__receiver_node':
('Node array of receivers (node that receives flow from current ' +
'node)'),
'topographic__steepest_slope':
'Node array of steepest *downhill* slopes',
'flow__upstream_node_order':
('Node array containing downstream-to-upstream ordered list of ' +
'node IDs'),
'flow__link_to_receiver_node':
'ID of link downstream of each node, which carries the discharge',
'channel__bed_shear_stress':
('Shear exerted on the bed of the channel, assuming all ' +
'discharge travels along a single, self-formed channel'),
'channel_sediment__volumetric_transport_capacity':
('Volumetric transport capacity of a channel carrying all runoff' +
' through the node, assuming the Meyer-Peter Muller transport ' +
'equation'),
'channel_sediment__volumetric_flux':
('Total volumetric fluvial sediment flux brought into the node ' +
'from upstream'),
'channel_sediment__relative_flux':
('The fluvial_sediment_flux_into_node divided by the fluvial_' +
'sediment_transport_capacity'),
'channel__discharge':
('Volumetric water flux of the a single channel carrying all ' +
'runoff through the node'),
'channel__width':
('Width of the a single channel carrying all runoff through the ' +
'node'),
'channel__depth':
('Depth of the a single channel carrying all runoff through the ' +
'node')
}
def __init__(self, grid, K_sp=1.e-6, g=9.81,
rock_density=2700, sediment_density=2700, fluid_density=1000,
runoff_rate=1.,
sed_dependency_type='generalized_humped', kappa_hump=13.683,
nu_hump=1.13, phi_hump=4.24, c_hump=0.00181,
Qc='power_law', m_sp=0.5, n_sp=1., K_t=1.e-4, m_t=1.5, n_t=1.,
# these params for Qc='MPM':
C_MPM=1., a_sp=1., b_sp=0.5, c_sp=1., k_w=2.5, k_Q=2.5e-7,
mannings_n=0.05, threshold_shear_stress=None,
Dchar=0.05, set_threshold_from_Dchar=True,
set_Dchar_from_threshold=False,
threshold_Shields=0.05, slope_sensitive_threshold=False,
# params for model numeric behavior:
pseudoimplicit_repeats=5, return_stream_properties=False,
**kwds):
"""Constructor for the class."""
self._grid = grid
self.pseudoimplicit_repeats = pseudoimplicit_repeats
self.link_S_with_trailing_blank = np.zeros(grid.number_of_links+1)
# ^needs to be filled with values in execution
self.count_active_links = np.zeros_like(
self.link_S_with_trailing_blank, dtype=int)
self.count_active_links[:-1] = 1
self._K_unit_time = K_sp/31557600.
# ^...because we work with dt in seconds
# set gravity
self.g = g
self.rock_density = rock_density
self.sed_density = sediment_density
self.fluid_density = fluid_density
self.relative_weight = (
(self.sed_density-self.fluid_density)/self.fluid_density*self.g)
# ^to accelerate MPM calcs
self.rho_g = self.fluid_density*self.g
self.type = sed_dependency_type
assert self.type in ('generalized_humped', 'None', 'linear_decline',
'almost_parabolic')
self.Qc = Qc
assert self.Qc in ('MPM', 'power_law')
self.return_ch_props = return_stream_properties
if return_stream_properties:
assert self.Qc == 'MPM', ("Qc must be 'MPM' to return stream " +
"properties")
if type(runoff_rate) in (float, int):
self.runoff_rate = float(runoff_rate)
elif type(runoff_rate) is str:
self.runoff_rate = self.grid.at_node[runoff_rate]
else:
self.runoff_rate = np.array(runoff_rate)
assert runoff_rate.size == self.grid.number_of_nodes
if self.Qc == 'MPM':
if threshold_shear_stress is not None:
self.thresh = threshold_shear_stress
self.set_threshold = True
# ^flag for sed_flux_dep_incision to see if the threshold was
# manually set.
# print("Found a shear stress threshold to use: ", self.thresh)
else:
warnings.warn("Found no incision threshold to use.")
self.thresh = 0.
self.set_threshold = False
self._a = a_sp
self._b = b_sp
self._c = c_sp
self.k_Q = k_Q
self.k_w = k_w
self.mannings_n = mannings_n
if mannings_n < 0. or mannings_n > 0.2:
warnings.warn("Manning's n outside it's typical range")
self.diffusivity_power_on_A = 0.9*self._c*(1.-self._b)
# ^i.e., q/D**(1/6)
self.override_threshold = set_threshold_from_Dchar
self.override_Dchar = set_Dchar_from_threshold
if self.override_threshold:
assert self.set_threshold is False, (
"If set_threshold_from_Dchar, threshold_Shields must be " +
"set to None")
assert self.override_Dchar is False
if self.override_Dchar:
assert self.override_threshold is False
self.shields_crit = threshold_Shields
self.lamb_flag = slope_sensitive_threshold
if self.lamb_flag:
assert self.shields_crit is None, (
"If slope_sensitive_threshold, threshold_Shields must " +
"be set to None")
elif self.Qc == 'power_law':
self._m = m_sp
self._n = n_sp
self._Kt = K_t/31557600. # in sec
self._mt = m_t
self._nt = n_t
# now conditional inputs
if self.type == 'generalized_humped':
self.kappa = kappa_hump
self.nu = nu_hump
self.phi = phi_hump
self.c = c_hump
if self.Qc == 'MPM':
if Dchar is not None:
if type(Dchar) in (int, float):
self.Dchar_in = float(Dchar)
elif type(Dchar) is str:
self.Dchar_in = self.grid.at_node[Dchar]
else:
self.Dchar_in = np.array(Dchar)
assert self.Dchar_in.size == self.grid.number_of_nodes
assert not self.override_Dchar, (
"If set_Dchar_from_threshold, Dchar must be set to None")
else:
assert self.override_Dchar
# remember the threshold getting set is already tau**a
if not self.lamb_flag:
self.Dchar_in = self.thresh/self.g/(
self.sed_density-self.fluid_density)/self.shields_crit
else:
self.Dchar_in = None
self.C_MPM = C_MPM
if self.override_threshold:
# print("Overriding any supplied threshold...")
try:
self.thresh = self.shields_crit*self.g*(
self.sed_density-self.fluid_density)*self.Dchar_in
except AttributeError:
self.thresh = self.shields_crit*self.g*(
self.sed_density-self.fluid_density)*Dchar
# new 11/12/14
self.point6onelessb = 0.6*(1.-self._b)
self.shear_stress_prefactor = self.fluid_density*self.g*(
self.mannings_n/self.k_w)**0.6
if self.set_threshold is False or self.override_threshold:
try:
self.shields_prefactor_to_shear = (
(self.sed_density-self.fluid_density)*self.g *
self.Dchar_in)
except AttributeError: # no Dchar
self.shields_prefactor_to_shear_noDchar = (
self.sed_density-self.fluid_density)*self.g
twothirds = 2./3.
self.Qs_prefactor = (
4.*self.C_MPM**twothirds*self.fluid_density**twothirds /
(self.sed_density-self.fluid_density)**twothirds*self.g **
(twothirds/2.)*mannings_n**0.6*self.k_w**(1./15.)*self.k_Q **
(0.6+self._b/15.)/self.sed_density**twothirds)
self.Qs_thresh_prefactor = (
4.*(self.C_MPM*self.k_w*self.k_Q**self._b/self.fluid_density **
0.5/(self.sed_density-self.fluid_density)/self.g /
self.sed_density)**twothirds)
# both these are divided by sed density to give a vol flux
self.Qs_power_onA = self._c*(0.6+self._b/15.)
self.Qs_power_onAthresh = twothirds*self._b*self._c
self.cell_areas = np.empty(grid.number_of_nodes)
self.cell_areas.fill(np.mean(grid.area_of_cell))
self.cell_areas[grid.node_at_cell] = grid.area_of_cell
# set up the necessary fields:
self.initialize_output_fields()
if self.return_ch_props:
self.initialize_optional_output_fields()
def get_sed_flux_function(self, rel_sed_flux):
if self.type == 'generalized_humped':
"Returns K*f(qs,qc)"
sed_flux_fn = self.kappa*(rel_sed_flux**self.nu + self.c)*np.exp(
-self.phi*rel_sed_flux)
elif self.type == 'linear_decline':
sed_flux_fn = (1.-rel_sed_flux)
elif self.type == 'parabolic':
raise MissingKeyError(
'Pure parabolic (where intersect at zero flux is exactly ' +
'zero) is currently not supported, sorry. Try ' +
'almost_parabolic instead?')
sed_flux_fn = 1. - 4.*(rel_sed_flux-0.5)**2.
elif self.type == 'almost_parabolic':
sed_flux_fn = np.where(rel_sed_flux > 0.1,
1. - 4.*(rel_sed_flux-0.5)**2.,
2.6*rel_sed_flux+0.1)
elif self.type == 'None':
sed_flux_fn = 1.
else:
raise MissingKeyError(
'Provided sed flux sensitivity type in input file was not ' +
'recognised!')
return sed_flux_fn
def get_sed_flux_function_pseudoimplicit(self, sed_in, trans_cap_vol_out,
prefactor_for_volume,
prefactor_for_dz):
rel_sed_flux_in = sed_in/trans_cap_vol_out
rel_sed_flux = rel_sed_flux_in
if self.type == 'generalized_humped':
"Returns K*f(qs,qc)"
def sed_flux_fn_gen(rel_sed_flux_in):
return self.kappa*(rel_sed_flux_in**self.nu + self.c)*np.exp(
-self.phi*rel_sed_flux_in)
elif self.type == 'linear_decline':
def sed_flux_fn_gen(rel_sed_flux_in):
return 1.-rel_sed_flux_in
elif self.type == 'parabolic':
raise MissingKeyError(
'Pure parabolic (where intersect at zero flux is exactly ' +
'zero) is currently not supported, sorry. Try ' +
'almost_parabolic instead?')
def sed_flux_fn_gen(rel_sed_flux_in):
return 1. - 4.*(rel_sed_flux_in-0.5)**2.
elif self.type == 'almost_parabolic':
def sed_flux_fn_gen(rel_sed_flux_in):
return np.where(rel_sed_flux_in > 0.1,
1. - 4.*(rel_sed_flux_in-0.5)**2.,
2.6*rel_sed_flux_in+0.1)
elif self.type == 'None':
def sed_flux_fn_gen(rel_sed_flux_in):
return 1.
else:
raise MissingKeyError(
'Provided sed flux sensitivity type in input file was not ' +
'recognised!')
for i in range(self.pseudoimplicit_repeats):
sed_flux_fn = sed_flux_fn_gen(rel_sed_flux)
sed_vol_added = prefactor_for_volume*sed_flux_fn
rel_sed_flux = rel_sed_flux_in + sed_vol_added/trans_cap_vol_out
# print rel_sed_flux
if rel_sed_flux >= 1.:
rel_sed_flux = 1.
break
if rel_sed_flux < 0.:
rel_sed_flux = 0.
break
last_sed_flux_fn = sed_flux_fn
sed_flux_fn = sed_flux_fn_gen(rel_sed_flux)
# this error could alternatively be used to break the loop
error_in_sed_flux_fn = sed_flux_fn-last_sed_flux_fn
dz = prefactor_for_dz*sed_flux_fn
sed_flux_out = rel_sed_flux*trans_cap_vol_out
return dz, sed_flux_out, rel_sed_flux, error_in_sed_flux_fn
def erode(self, dt, flooded_depths=None, **kwds):
"""Erode and deposit on the channel bed for a duration of *dt*.
Erosion occurs according to the sediment dependent rules specified
during initialization.
Parameters
----------
dt : float (years, only!)
Timestep for which to run the component.
flooded_depths : array or field name (m)
Depths of flooding at each node, zero where no lake. Note that the
component will dynamically update this array as it fills nodes
with sediment (...but does NOT update any other related lake
fields).
"""
grid = self.grid
node_z = grid.at_node['topographic__elevation']
node_A = grid.at_node['drainage_area']
flow_receiver = grid.at_node['flow__receiver_node']
s_in = grid.at_node['flow__upstream_node_order']
node_S = grid.at_node['topographic__steepest_slope']
if type(flooded_depths) is str:
flooded_depths = mg.at_node[flooded_depths]
# also need a map of initial flooded conds:
flooded_nodes = flooded_depths > 0.
elif type(flooded_depths) is np.ndarray:
assert flooded_depths.size == self.grid.number_of_nodes
flooded_nodes = flooded_depths > 0.
# need an *updateable* record of the pit depths
else:
# if None, handle in loop
flooded_nodes = None
steepest_link = 'flow__link_to_receiver_node'
link_length = np.empty(grid.number_of_nodes, dtype=float)
link_length.fill(np.nan)
draining_nodes = np.not_equal(grid.at_node[steepest_link],
BAD_INDEX_VALUE)
core_draining_nodes = np.intersect1d(np.where(draining_nodes)[0],
grid.core_nodes,
assume_unique=True)
link_length[core_draining_nodes] = grid._length_of_link_with_diagonals[
grid.at_node[steepest_link][core_draining_nodes]]
if self.Qc == 'MPM':
if self.Dchar_in is not None:
self.Dchar = self.Dchar_in
else:
assert not self.set_threshold, (
"Something is seriously wrong with your model " +
"initialization.")
assert self.override_threshold, (
"You need to confirm to the module you intend it to " +
"internally calculate a shear stress threshold, " +
"with set_threshold_from_Dchar in the input file.")
# we need to adjust the thresholds for the Shields number
# & gs dynamically:
variable_thresh = self.shields_crit*self.g*(
self.sed_density-self.fluid_density)*self.Dchar
if self.lamb_flag:
variable_shields_crit = 0.15*node_S**0.25
try:
variable_thresh = (variable_shields_crit *
self.shields_prefactor_to_shear)
except AttributeError:
variable_thresh = (
variable_shields_crit *
self.shields_prefactor_to_shear_noDchar*self.Dchar)
node_Q = self.k_Q*self.runoff_rate*node_A**self._c
shear_stress_prefactor_timesAparts = (
self.shear_stress_prefactor*node_Q**self.point6onelessb)
try:
transport_capacities_thresh = (
self.thresh*self.Qs_thresh_prefactor*self.runoff_rate**(
0.66667*self._b)*node_A**self.Qs_power_onAthresh)
except AttributeError:
transport_capacities_thresh = (
variable_thresh*self.Qs_thresh_prefactor *
self.runoff_rate**(0.66667*self._b)*node_A **
self.Qs_power_onAthresh)
transport_capacity_prefactor_withA = (
self.Qs_prefactor*self.runoff_rate**(0.6+self._b/15.)*node_A **
self.Qs_power_onA)
internal_t = 0.
break_flag = False
dt_secs = dt*31557600.
counter = 0
rel_sed_flux = np.empty_like(node_Q)
# excess_vol_overhead = 0.
while 1:
# ^use the break flag, to improve computational efficiency for
# runs which are very stable
# we assume the drainage structure is forbidden to change
# during the whole dt
# note slopes will be *negative* at pits
# track how many loops we perform:
counter += 1
downward_slopes = node_S.clip(0.)
# this removes the tendency to transfer material against
# gradient, including in any lake depressions
# we DON'T immediately zero trp capacity in the lake.
# positive_slopes = np.greater(downward_slopes, 0.)
slopes_tothe07 = downward_slopes**0.7
transport_capacities_S = (transport_capacity_prefactor_withA *
slopes_tothe07)
trp_diff = (transport_capacities_S -
transport_capacities_thresh).clip(0.)
transport_capacities = np.sqrt(trp_diff*trp_diff*trp_diff)
shear_stress = (shear_stress_prefactor_timesAparts *
slopes_tothe07)
shear_tothe_a = shear_stress**self._a
dt_this_step = dt_secs-internal_t
# ^timestep adjustment is made AFTER the dz calc
node_vol_capacities = transport_capacities*dt_this_step
sed_into_node = np.zeros(grid.number_of_nodes, dtype=float)
dz = np.zeros(grid.number_of_nodes, dtype=float)
len_s_in = s_in.size
cell_areas = self.cell_areas
try:
raise NameError
# ^tripped out deliberately for now; doesn't appear to
# accelerate much
weave.inline(self.routing_code, [
'len_s_in', 'sed_into_node', 'transport_capacities',
'dz', 'cell_areas', 'dt_this_step', 'flow__receiver_node'])
except NameError:
for i in s_in[::-1]: # work downstream
cell_area = cell_areas[i]
if flooded_nodes is not None:
flood_depth = flooded_depths[i]
else:
flood_depth = 0.
sed_flux_into_this_node = sed_into_node[i]
node_capacity = transport_capacities[i]
# ^we work in volume flux, not volume per se here
node_vol_capacity = node_vol_capacities[i]
if flood_depth > 0.:
node_vol_capacity = 0.
# requires special case handling - as much sed as
# possible is dumped here, then the remainder
# passed on
if sed_flux_into_this_node < node_vol_capacity:
# ^note incision is forbidden at capacity
# flooded nodes never enter this branch
# #implementing the pseudoimplicit method:
try:
thresh = variable_thresh
except: # it doesn't exist
thresh = self.thresh
dz_prefactor = self._K_unit_time*dt_this_step*(
shear_tothe_a[i]-thresh).clip(0.)
vol_prefactor = dz_prefactor*cell_area
(dz_here, sed_flux_out, rel_sed_flux_here,
error_in_sed_flux) = \
self.get_sed_flux_function_pseudoimplicit(
sed_flux_into_this_node, node_vol_capacity,
vol_prefactor, dz_prefactor)
# note now dz_here may never create more sed than
# the out can transport...
assert sed_flux_out <= node_vol_capacity, (
'failed at node '+str(s_in.size-i) +
' with rel sed flux '+str(
sed_flux_out/node_capacity))
rel_sed_flux[i] = rel_sed_flux_here
vol_pass = sed_flux_out
else:
rel_sed_flux[i] = 1.
vol_dropped = (sed_flux_into_this_node -
node_vol_capacity)
dz_here = -vol_dropped/cell_area
# with the pits, we aim to inhibit incision, but
# depo is OK. We have already zero'd any adverse
# grads, so sed can make it to the bottom of the
# pit but no further in a single step, which seems
# raeasonable. Pit should fill.
if flood_depth <= 0.:
vol_pass = node_vol_capacity
else:
height_excess = -dz_here - flood_depth
# ...above water level
if height_excess <= 0.:
vol_pass = 0.
# dz_here is already correct
flooded_depths[i] += dz_here
else:
dz_here = -flood_depth
vol_pass = height_excess * cell_area
# ^bit cheeky?
flooded_depths[i] = 0.
# note we must update flooded depths
# transiently to conserve mass
# do we need to retain a small downhill slope?
# ...don't think so. Will resolve itself on next
# timestep.
dz[i] -= dz_here
sed_into_node[flow_receiver[i]] += vol_pass
break_flag = True
node_z[grid.core_nodes] += dz[grid.core_nodes]
if break_flag:
break
# do we need to reroute the flow/recalc the slopes here?
# -> NO, slope is such a minor component of Diff we'll be OK
# BUT could be important not for the stability, but for the
# actual calc. So YES.
node_S = np.zeros_like(node_S)
node_S[core_draining_nodes] = (node_z-node_z[flow_receiver])[
core_draining_nodes]/link_length[core_draining_nodes]
internal_t += dt_this_step # still in seconds, remember
elif self.Qc == 'power_law':
transport_capacity_prefactor_withA = self._Kt * node_A**self._mt
erosion_prefactor_withA = self._K_unit_time * node_A**self._m
# ^doesn't include S**n*f(Qc/Qc)
internal_t = 0.
break_flag = False
dt_secs = dt*31557600.
counter = 0
rel_sed_flux = np.empty_like(node_A)
while 1:
counter += 1
# print counter
downward_slopes = node_S.clip(0.)
# positive_slopes = np.greater(downward_slopes, 0.)
slopes_tothen = downward_slopes**self._n
slopes_tothent = downward_slopes**self._nt
transport_capacities = (transport_capacity_prefactor_withA *
slopes_tothent)
erosion_prefactor_withS = (
erosion_prefactor_withA * slopes_tothen) # no time, no fqs
# shear_tothe_a = shear_stress**self._a
dt_this_step = dt_secs-internal_t
# ^timestep adjustment is made AFTER the dz calc
node_vol_capacities = transport_capacities*dt_this_step
sed_into_node = np.zeros(grid.number_of_nodes, dtype=float)
dz = np.zeros(grid.number_of_nodes, dtype=float)
cell_areas = self.cell_areas
for i in s_in[::-1]: # work downstream
cell_area = cell_areas[i]
if flooded_nodes is not None:
flood_depth = flooded_depths[i]
else:
flood_depth = 0.
sed_flux_into_this_node = sed_into_node[i]
node_capacity = transport_capacities[i]
# ^we work in volume flux, not volume per se here
node_vol_capacity = node_vol_capacities[i]
if flood_depth > 0.:
node_vol_capacity = 0.
if sed_flux_into_this_node < node_vol_capacity:
# ^note incision is forbidden at capacity
dz_prefactor = dt_this_step*erosion_prefactor_withS[i]
vol_prefactor = dz_prefactor*cell_area
(dz_here, sed_flux_out, rel_sed_flux_here,
error_in_sed_flux) = \
self.get_sed_flux_function_pseudoimplicit(
sed_flux_into_this_node, node_vol_capacity,
vol_prefactor, dz_prefactor)
# note now dz_here may never create more sed than the
# out can transport...
assert sed_flux_out <= node_vol_capacity, (
'failed at node '+str(s_in.size-i) +
' with rel sed flux '+str(
sed_flux_out/node_capacity))
rel_sed_flux[i] = rel_sed_flux_here
vol_pass = sed_flux_out
else:
rel_sed_flux[i] = 1.
vol_dropped = (sed_flux_into_this_node -
node_vol_capacity)
dz_here = -vol_dropped/cell_area
try:
isflooded = flooded_nodes[i]
except TypeError: # was None
isflooded = False
if flood_depth <= 0. and not isflooded:
vol_pass = node_vol_capacity
# we want flooded nodes which have already been
# filled to enter the else statement
else:
height_excess = -dz_here - flood_depth
# ...above water level
if height_excess <= 0.:
vol_pass = 0.
# dz_here is already correct
flooded_depths[i] += dz_here
else:
dz_here = -flood_depth
vol_pass = height_excess * cell_area
# ^bit cheeky?
flooded_depths[i] = 0.
dz[i] -= dz_here
sed_into_node[flow_receiver[i]] += vol_pass
break_flag = True
node_z[grid.core_nodes] += dz[grid.core_nodes]
if break_flag:
break
# do we need to reroute the flow/recalc the slopes here?
# -> NO, slope is such a minor component of Diff we'll be OK
# BUT could be important not for the stability, but for the
# actual calc. So YES.
node_S = np.zeros_like(node_S)
# print link_length[core_draining_nodes]
node_S[core_draining_nodes] = (node_z-node_z[flow_receiver])[
core_draining_nodes]/link_length[core_draining_nodes]
internal_t += dt_this_step # still in seconds, remember
active_nodes = grid.core_nodes
if self.return_ch_props:
# add the channel property field entries,
# 'channel__width', 'channel__depth', and 'channel__discharge'
W = self.k_w*node_Q**self._b
H = shear_stress/self.rho_g/node_S # ...sneaky!
grid.at_node['channel__width'][:] = W
grid.at_node['channel__depth'][:] = H
grid.at_node['channel__discharge'][:] = node_Q
grid.at_node['channel__bed_shear_stress'][:] = shear_stress
grid.at_node['channel_sediment__volumetric_transport_capacity'][
:] = transport_capacities
grid.at_node['channel_sediment__volumetric_flux'][:] = sed_into_node
grid.at_node['channel_sediment__relative_flux'][:] = rel_sed_flux
# elevs set automatically to the name used in the function call.
self.iterations_in_dt = counter
return grid, grid.at_node['topographic__elevation']
def run_one_step(self, dt, flooded_depths=None, **kwds):
"""Run the component across one timestep increment, dt.
Erosion occurs according to the sediment dependent rules specified
during initialization. Method is fully equivalent to the :func:`erode`
method.
Parameters
----------
dt : float (years, only!)
Timestep for which to run the component.
flooded_depths : array or field name (m)
Depths of flooding at each node, zero where no lake. Note that the
component will dynamically update this array as it fills nodes
with sediment (...but does NOT update any other related lake
fields).
"""
self.erode(dt=dt, flooded_depths=flooded_depths, **kwds)
@property
@make_return_array_immutable
def characteristic_grainsize(self):
"""Return the characteristic grain size used by the component.
Particularly useful if the set_Dchar_from_threshold flag was True
at initialization.
Returns
-------
Dchar : float or array
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg1 = RasterModelGrid((3,4), 1.)
>>> thresh_shields = np.arange(1, mg1.number_of_nodes+1, dtype=float)
>>> thresh_shields /= 100.
>>> sde1 = SedDepEroder(mg1, threshold_shear_stress=100., Qc='MPM',
... Dchar=None, set_threshold_from_Dchar=False,
... set_Dchar_from_threshold=True,
... threshold_Shields=thresh_shields)
>>> sde1.characteristic_grainsize
array([ 0.59962823, 0.29981412, 0.19987608, 0.14990706, 0.11992565,
0.09993804, 0.08566118, 0.07495353, 0.06662536, 0.05996282,
0.05451166, 0.04996902])
>>> mg2 = RasterModelGrid((3,4), 1.)
>>> sde2 = SedDepEroder(mg2, threshold_shear_stress=100., Qc='MPM',
... Dchar=None, set_threshold_from_Dchar=False,
... set_Dchar_from_threshold=True,
... threshold_Shields=None,
... slope_sensitive_threshold=True)
>>> S = mg2.add_ones('node', 'topographic__steepest_slope')
>>> S *= 0.05 # thresh = 100 Pa @ 5pc slope
>>> sde2.characteristic_grainsize # doctest: +NORMALIZE_WHITESPACE
array([ 0.08453729, 0.08453729, 0.08453729, 0.08453729,
0.08453729, 0.08453729, 0.08453729, 0.08453729,
0.08453729, 0.08453729, 0.08453729, 0.08453729])
"""
# Dchar is None means self.lamb_flag, Dchar is spatially variable,
# and not calculated until the main loop
assert self.Qc == 'MPM', ("Characteristic grainsize is only " +
"calculated if Qc == 'MPM'")
if self.Dchar_in is not None:
return self.Dchar_in
else:
taustarcrit = 0.15 * self.grid.at_node[
'topographic__steepest_slope']**0.25
Dchar = self.thresh/(self.g*(self.sed_density -
self.fluid_density)*taustarcrit)
return Dchar
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata builder helper for models built with Keras API.
Currently, there are 3 different model-building styles in Keras: sequential,
functional and subclassed models. This module supports all 3 different flavors
for Tensorflow 1.X. In order to make use of it, a KerasGraphMetadataBuilder
should be initialized with a Keras model object and other optional parameters
such as session (if using other than Keras' default), serving_inputs (signature
definition dictionary if saving the model with this module), etc.
The module creates default metadata from given Keras model. Users have the
option to supplement input and output metadata with additional configuration
parameters using set_* functions. In addition, auxiliary input and output can be
added using add_* functions or deleted using remove_* functions. Final metadata
can either be fetched as a dictionary using get_metadata function or written
to a directory along with the model using save_model_with_metadata function.
Example usage is as follows:
model = keras.models.Sequential()
model.add(keras.layers.Dense(32, activation='relu', input_dim=10))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy')
builder = KerasGraphMetadataBuilder(model)
builder.save_model_with_metadata("gs://xai/model/keras/")
"""
from typing import Dict, Text, Optional, List, Any, Set, Union
import tensorflow.compat.v1 as tf
from explainable_ai_sdk.metadata import parameters
from explainable_ai_sdk.metadata.tf.v1 import graph_metadata_builder
class KerasGraphMetadataBuilder(graph_metadata_builder.GraphMetadataBuilder):
"""Class for generating metadata for models built with Keras API."""
def __init__(self,
model: tf.keras.Model,
outputs_to_explain: Optional[List[tf.Tensor]] = (),
session: tf.Session = None,
serving_inputs: Optional[Dict[Text, tf.Tensor]] = None,
serving_outputs: Optional[Dict[Text, tf.Tensor]] = None,
tags: Set[Text] = (tf.saved_model.tag_constants.SERVING,),
auto_infer: bool = True,
**kwargs): # pytype: disable=annotation-type-mismatch
"""Initializes a KerasGraphMetadataBuilder object.
Args:
model: Keras model to write the metadata for.
outputs_to_explain: List of output tensors (model.outputs) to explain.
Only single output is supported for now. Hence, the list should
contain one element. This parameter is required if the model has
multiple outputs.
session: Optional TF Session, if using a session different than of Keras
backend.
serving_inputs: A dictionary mapping from serving key to corresponding
input tensors. If not provided or empty, added and/or inferred input
metadata will be used.
serving_outputs: A dictionary mapping from serving key to model outputs.
If not provided or empty, added and/or inferred output metadata will be
used.
tags: The set of tags to annotate the meta graph def with.
auto_infer: A boolean flag to indicate whether inputs and outputs should
be inferred from the model itself. If set to False, the model's inputs
and an output must be provided.
**kwargs: Any keyword arguments to be passed to saved model builder's
add_meta_graph() function.
"""
if outputs_to_explain and len(outputs_to_explain) > 1:
raise ValueError('"outputs_to_explain" can only contain 1 element.\n'
'Got: %s' % len(outputs_to_explain))
if not outputs_to_explain and len(model.outputs) > 1:
raise ValueError('Model has multiple outputs. Please specify which one to'
' explain via "outputs_to_explain" parameter.')
self._model = model
self._output_tensors = outputs_to_explain
self._inputs, self._outputs = {}, {}
if auto_infer:
self._create_metadata_entries_from_model()
self._session = session if session else tf.keras.backend.get_session()
self._serving_inputs = serving_inputs
self._serving_outputs = serving_outputs
self._saved_model_args = kwargs
self._tags = tags
def _create_metadata_entries_from_model(self):
"""Creates input and output metadata from models inputs and outputs."""
for model_input in self._model.inputs:
self.add_numeric_metadata(model_input, model_input.op.name)
for model_output in self._model.outputs:
if (not self._output_tensors or
model_output.name == self._output_tensors[0].name):
self.add_output_metadata(model_output, model_output.op.name)
break
else:
raise ValueError('Provided output is not one of model outputs.')
def set_categorical_metadata(self,
model_input: tf.Tensor,
encoded_layer: tf.keras.layers.Layer,
encoding: Text,
name: Optional[Text] = None,
input_baselines: Optional[List[Any]] = None,
encoded_baselines: Optional[List[Any]] = None):
"""Sets an existing metadata identified by input as categorical with params.
Args:
model_input: One of model inputs.
encoded_layer: Encoded model layer for input layer if it exists. Output of
this layer will be used as the encoded tensor.
encoding: Encoding type if encoded_layer is provided. Possible values are
{identity, bag_of_features, bag_of_features_sparse, indicator,
combined_embedding, concat_embedding}.
name: Unique friendly name for this feature.
input_baselines: A list of baseline values. Each baseline value can be a
single entity or of the same shape as the model_input (except for the
batch dimension).
encoded_baselines: A list of baseline values for encoded layer output.
Each baseline value can be a single entity or of the same shape as the
encoded tensor (except for the batch dimension).
"""
self.remove_input_metadata(model_input)
self.add_categorical_metadata(model_input, encoded_layer.output, encoding,
name, input_baselines, encoded_baselines)
def set_numeric_metadata(self,
model_input: tf.Tensor,
name: Optional[Text] = None,
input_baselines: Optional[List[Any]] = None,
index_feature_mapping: Optional[List[Any]] = None):
"""Sets an existing metadata identified by input as numeric with params.
Args:
model_input: One of model inputs.
name: Unique friendly name for this feature.
input_baselines: A list of baseline values. Each baseline value can be a
single entity or of the same shape as the model_input (except for the
batch dimension).
index_feature_mapping: A list of feature names for each index in the input
tensor.
"""
self.remove_input_metadata(model_input)
self.add_numeric_metadata(
model_input,
name,
input_baselines,
index_feature_mapping=index_feature_mapping)
def set_text_metadata(self,
model_input: tf.Tensor,
encoded_layer: tf.keras.layers.Layer,
encoding: Text,
name: Optional[Text] = None,
input_baselines: Optional[List[Any]] = None,
encoded_baselines: Optional[List[Any]] = None):
"""Sets an existing metadata identified by input as text with params.
Args:
model_input: One of model inputs.
encoded_layer: Encoded model layer for input layer if it exists. Output of
this layer will be used as the encoded tensor.
encoding: Encoding type if encoded_layer is provided. Possible values are
{identity, bag_of_features, bag_of_features_sparse, indicator,
combined_embedding, concat_embedding}.
name: Unique friendly name for this feature.
input_baselines: A list of baseline values. Each baseline value can be a
single entity or of the same shape as the model_input (except for the
batch dimension).
encoded_baselines: A list of baseline values for encoded layer output.
Each baseline value can be a single entity or of the same shape as the
encoded tensor (except for the batch dimension).
"""
self.remove_input_metadata(model_input)
self.add_text_metadata(model_input, encoded_layer.output, encoding, name,
input_baselines, encoded_baselines)
def set_image_metadata(
self,
model_input: tf.Tensor,
name: Optional[Text] = None,
input_baselines: Optional[List[Any]] = None,
visualization: Optional[Union[Dict[str, str],
parameters.VisualizationParameters]] = None,
domain: Optional[parameters.DomainInfo] = None
):
"""Sets an existing metadata identified by input as image with params.
Args:
model_input: One of model inputs.
name: Unique friendly name for this feature.
input_baselines: A list of baseline values. Each baseline value can be a
single entity or of the same shape as the model_input (except for the
batch dimension).
visualization: Either a dictionary of visualization parameters or
VisualizationParameters instance. Using VisualizationParameters is
recommended instead of a dictionary, which will be deprecated soon. If
None, a default visualization config will be selected based on the
explanation method (IG/XRAI).
domain: DomainInfo object specifying the range of the input feature.
"""
self.remove_input_metadata(model_input)
self.add_image_metadata(model_input, name, input_baselines, visualization,
domain)
def set_output_metadata(self,
model_output: tf.Tensor,
name: Optional[Text] = None):
"""Adds output tensor as output metadata.
Args:
model_output: Model output to get the explanations for. Needs to be a
tensor of float type, such as probabilities, logits.
name: Unique friendly name for the output.
"""
self.remove_output_metadata(model_output)
self.add_output_metadata(model_output, name)
def remove_input_metadata(self, model_input: tf.Tensor):
"""Removes a metadata entry identified by the tensor.
Args:
model_input: Model input to be removed from input metadata entries.
"""
if model_input.name not in self._inputs:
raise ValueError('Input "%s" does not exist.' % model_input.name)
del self._inputs[model_input.name]
def remove_output_metadata(self, model_output: tf.Tensor):
"""Removes a metadata entry identified by the tensor.
Args:
model_output: Model output to be removed from output metadata.
"""
if model_output.name not in self._outputs:
raise ValueError('Output "%s" does not exist.' % model_output.name)
del self._outputs[model_output.name]
|
|
"""
Support for Synology NAS Sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.synologydsm/
"""
import logging
from datetime import timedelta
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.const import (
CONF_HOST, CONF_USERNAME, CONF_PASSWORD, CONF_PORT,
CONF_MONITORED_CONDITIONS, TEMP_CELSIUS, EVENT_HOMEASSISTANT_START)
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
REQUIREMENTS = ['python-synology==0.1.0']
_LOGGER = logging.getLogger(__name__)
CONF_DISKS = 'disks'
CONF_VOLUMES = 'volumes'
DEFAULT_NAME = 'Synology DSM'
DEFAULT_PORT = 5000
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
_UTILISATION_MON_COND = {
'cpu_other_load': ['CPU Load (Other)', '%', 'mdi:chip'],
'cpu_user_load': ['CPU Load (User)', '%', 'mdi:chip'],
'cpu_system_load': ['CPU Load (System)', '%', 'mdi:chip'],
'cpu_total_load': ['CPU Load (Total)', '%', 'mdi:chip'],
'cpu_1min_load': ['CPU Load (1 min)', '%', 'mdi:chip'],
'cpu_5min_load': ['CPU Load (5 min)', '%', 'mdi:chip'],
'cpu_15min_load': ['CPU Load (15 min)', '%', 'mdi:chip'],
'memory_real_usage': ['Memory Usage (Real)', '%', 'mdi:memory'],
'memory_size': ['Memory Size', 'Mb', 'mdi:memory'],
'memory_cached': ['Memory Cached', 'Mb', 'mdi:memory'],
'memory_available_swap': ['Memory Available (Swap)', 'Mb', 'mdi:memory'],
'memory_available_real': ['Memory Available (Real)', 'Mb', 'mdi:memory'],
'memory_total_swap': ['Memory Total (Swap)', 'Mb', 'mdi:memory'],
'memory_total_real': ['Memory Total (Real)', 'Mb', 'mdi:memory'],
'network_up': ['Network Up', 'Kbps', 'mdi:upload'],
'network_down': ['Network Down', 'Kbps', 'mdi:download'],
}
_STORAGE_VOL_MON_COND = {
'volume_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'volume_device_type': ['Type', None, 'mdi:harddisk'],
'volume_size_total': ['Total Size', None, 'mdi:chart-pie'],
'volume_size_used': ['Used Space', None, 'mdi:chart-pie'],
'volume_percentage_used': ['Volume Used', '%', 'mdi:chart-pie'],
'volume_disk_temp_avg': ['Average Disk Temp', None, 'mdi:thermometer'],
'volume_disk_temp_max': ['Maximum Disk Temp', None, 'mdi:thermometer'],
}
_STORAGE_DSK_MON_COND = {
'disk_name': ['Name', None, 'mdi:harddisk'],
'disk_device': ['Device', None, 'mdi:dots-horizontal'],
'disk_smart_status': ['Status (Smart)', None,
'mdi:checkbox-marked-circle-outline'],
'disk_status': ['Status', None, 'mdi:checkbox-marked-circle-outline'],
'disk_exceed_bad_sector_thr': ['Exceeded Max Bad Sectors', None,
'mdi:test-tube'],
'disk_below_remain_life_thr': ['Below Min Remaining Life', None,
'mdi:test-tube'],
'disk_temp': ['Temperature', None, 'mdi:thermometer'],
}
_MONITORED_CONDITIONS = list(_UTILISATION_MON_COND.keys()) + \
list(_STORAGE_VOL_MON_COND.keys()) + \
list(_STORAGE_DSK_MON_COND.keys())
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]),
vol.Optional(CONF_DISKS, default=None): cv.ensure_list,
vol.Optional(CONF_VOLUMES, default=None): cv.ensure_list,
})
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the Synology NAS Sensor."""
# pylint: disable=too-many-locals
def run_setup(event):
"""Wait until HASS is fully initialized before creating.
Delay the setup until Home Assistant is fully initialized.
This allows any entities to be created already
"""
# Setup API
api = SynoApi(config.get(CONF_HOST), config.get(CONF_PORT),
config.get(CONF_USERNAME), config.get(CONF_PASSWORD),
hass.config.units.temperature_unit)
sensors = [SynoNasUtilSensor(api, variable,
_UTILISATION_MON_COND[variable])
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _UTILISATION_MON_COND]
# Handle all Volumes
volumes = config['volumes']
if volumes is None:
volumes = api.storage.volumes
for volume in volumes:
sensors += [SynoNasStorageSensor(api, variable,
_STORAGE_VOL_MON_COND[variable],
volume)
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _STORAGE_VOL_MON_COND]
# Handle all Disks
disks = config['disks']
if disks is None:
disks = api.storage.disks
for disk in disks:
sensors += [SynoNasStorageSensor(api, variable,
_STORAGE_DSK_MON_COND[variable],
disk)
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _STORAGE_DSK_MON_COND]
add_devices_callback(sensors)
# Wait until start event is sent to load this component.
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, run_setup)
class SynoApi():
"""Class to interface with API."""
# pylint: disable=too-many-arguments, bare-except
def __init__(self, host, port, username, password, temp_unit):
"""Constructor of the API wrapper class."""
from SynologyDSM import SynologyDSM
self.temp_unit = temp_unit
try:
self._api = SynologyDSM(host,
port,
username,
password)
except:
_LOGGER.error("Error setting up Synology DSM")
# Will be updated when `update` gets called.
self.utilisation = self._api.utilisation
self.storage = self._api.storage
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update function for updating api information."""
self._api.update()
class SynoNasSensor(Entity):
"""Representation of a Synology Nas Sensor."""
def __init__(self, api, variable, variableInfo, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = variableInfo[0]
self.var_units = variableInfo[1]
self.var_icon = variableInfo[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
if self.monitor_device is not None:
return "{} ({})".format(self.var_name, self.monitor_device)
else:
return self.var_name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self.var_id in ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']:
return self._api.temp_unit
else:
return self.var_units
def update(self):
"""Get the latest data for the states."""
if self._api is not None:
self._api.update()
class SynoNasUtilSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
network_sensors = ['network_up', 'network_down']
memory_sensors = ['memory_size', 'memory_cached',
'memory_available_swap', 'memory_available_real',
'memory_total_swap', 'memory_total_real']
if self.var_id in network_sensors or self.var_id in memory_sensors:
attr = getattr(self._api.utilisation, self.var_id)(False)
if self.var_id in network_sensors:
return round(attr / 1024.0, 1)
elif self.var_id in memory_sensors:
return round(attr / 1024.0 / 1024.0, 1)
else:
return getattr(self._api.utilisation, self.var_id)
class SynoNasStorageSensor(SynoNasSensor):
"""Representation a Synology Utilisation Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
temp_sensors = ['volume_disk_temp_avg', 'volume_disk_temp_max',
'disk_temp']
if self.monitor_device is not None:
if self.var_id in temp_sensors:
attr = getattr(self._api.storage,
self.var_id)(self.monitor_device)
if self._api.temp_unit == TEMP_CELSIUS:
return attr
else:
return round(attr * 1.8 + 32.0, 1)
else:
return getattr(self._api.storage,
self.var_id)(self.monitor_device)
|
|
import random
import math
import benchmarkFunctions
import copy
NUM_DIMENSIONS = 20
NUM_ITERATIONS = 1000
POPULATION_SIZE = 50
START_EXPLOTATION = 600
random_range_value = 1
INERTIA_NEIGHBORS = 0.9
INERTIA_FOOD = 0.9
CT = 0.5
N_MAX = 0.02
FORAGING_SPEED = 0.02
DIFUSION_SPEED = 0.005
EPSILON = 10**-5
CONVERGENCE_PRECISION = 10**-3
X_MAX = 32
X_MIN = -32
Y_MAX = 32
Y_MIN = -32
fitness = benchmarkFunctions.ackley
kbest = 10**9
kworst = 0
SOLUTION_FOUND_ITERATIONS = list()
CONVERGENT_EXECS = 0
CONVERGENT_INDIVIDUALS = list()
ALL_SOLVED_ITERATIONS = list()
INDIVIDUALS_FITNESS = list()
KBEST_FITNESS = list()
# individual representation, (self, self_historical_best, old_N, old_F)
def generate_population():
population = list()
for i in range(POPULATION_SIZE):
genome = list()
for s in range(NUM_DIMENSIONS):
individual = random.uniform(X_MIN, X_MAX);
genome.append(individual)
population.append((genome, genome, zero_vector(NUM_DIMENSIONS), zero_vector(NUM_DIMENSIONS)))
return population
def generate_population_branin():
population = list()
for i in range(POPULATION_SIZE):
genome = list()
individual1 = random.uniform(X_MIN, X_MAX)
individual2 = random.uniform(Y_MIN, Y_MAX)
genome.extend([individual1, individual2])
population.append((genome, genome, zero_vector(NUM_DIMENSIONS), zero_vector(NUM_DIMENSIONS)))
return population
#####
# Auxliar Functions
###
def make_rand_vector(dims):
vec = [random.uniform(-random_range_value, random_range_value) for i in range(dims)]
return [x for x in vec]
def zero_vector(dims):
return [0 for i in range(dims)]
def norm(vector):
return math.sqrt(sum(map(lambda x : x**2, vector)))
def vector_diff(vector1, vector2):
return [x_i - x_j for x_i, x_j in zip(vector1, vector2)]
def vector_sum(vector1, vector2):
return [x_i + x_j for x_i, x_j in zip(vector1, vector2)]
def vector_constant_product(vector1, constant):
return [x_i * constant for x_i in vector1]
def distance(v1, v2):
return norm(vector_diff(v1,v2))
#####
# Random Difusion
###
def random_difusion(iteration):
return vector_constant_product(make_rand_vector(NUM_DIMENSIONS), DIFUSION_SPEED * (1 - 3*iteration/NUM_ITERATIONS))
#####
# Ni
###
def k_hat(ki, kj):
global kworst
global kbest
return (ki - kj) / (kworst - kbest)
def x_hat(xi, xj):
diff = vector_diff(xj,xi)
norm_diff = norm(diff)
return [float(x)/(norm_diff + EPSILON) for x in diff]
def k_x_hat_product(krill_i,krill_j,fitness_i, fitness_j):
return vector_constant_product(x_hat(krill_i, krill_j), k_hat(fitness_i, fitness_j))
def alfa_local(krill, krill_fit, population, population_fitness):
(neighbors, neighbors_fit) = find_neighbors(krill, population, population_fitness)
# print "num neighbors:" +str(len(neighbors))
# print "neighbors:" +str(neighbors)
sum_vec = zero_vector(NUM_DIMENSIONS)
for idx, value in enumerate(neighbors):
sum_vec = vector_sum(sum_vec, k_x_hat_product(krill, value, krill_fit, neighbors_fit[idx]))
return sum_vec
def find_neighbors(krill, population, population_fitness):
ds = sensing_distance(krill,population)
# print "sensing_distance: " + str(ds)
neighbors = list()
neighbors_fit = list()
for idx, x in enumerate(population):
individual_i = x[0]
distance_i = distance(krill,individual_i)
# print distance_i
if(individual_i != krill and distance_i <= ds):
neighbors.append(x[0])
neighbors_fit.append(population_fitness[idx])
return (neighbors, neighbors_fit)
def sensing_distance(krill, population):
val1 = sum(map(lambda x : distance(x[0], krill), population))
# print val1
return float(val1)/(POPULATION_SIZE*5)
def alfa_target(krill, krill_fit, best, best_fit, iteration):
cbest = C_best(iteration)
return vector_constant_product(k_x_hat_product(krill, best, krill_fit, best_fit), cbest)
def alfa(krill, krill_fit, best, population, population_fitness, iteration):
best_fit = fitness(best)
local = alfa_local(krill, krill_fit, population, population_fitness)
target = alfa_target(krill, krill_fit, best, best_fit, iteration)
# print "local: "+ str(local)
# print "target: "+ str(target)
return vector_sum(local,target)
def C_best(iteration):
return 2 * (random.uniform(0,1) + float(iteration)/NUM_ITERATIONS)
def neighbors_induced_mov(krill, krill_fit, best, population, population_fitness, old_N, iteration):
return vector_sum(vector_constant_product(alfa(krill, krill_fit, best, population, population_fitness, iteration), N_MAX), vector_constant_product(old_N, INERTIA_NEIGHBORS))
#####
# Fi
###
def food_position(population, population_fitness):
sum_denominator = 0
sum_numerator = zero_vector(len(population[0][0]))
for idx, krill in enumerate(population):
fit_weight = 1.0/population_fitness[idx]
sum_numerator = vector_sum(sum_numerator, vector_constant_product(krill[0],fit_weight))
sum_denominator += fit_weight
return vector_constant_product(sum_numerator, 1/sum_denominator)
def beta_food(krill, krill_fit, food_pos, iteration):
# print (food_pos)
food_fit = fitness(food_pos)
return vector_constant_product(k_x_hat_product(krill, food_pos, krill_fit, food_fit), C_food(iteration))
def C_food(iteration):
return 2*(1 - float(iteration)/NUM_ITERATIONS)
def beta(krill, krill_fit, krill_best, x_food, population, population_fitness, iteration):
return vector_sum( beta_food(krill, krill_fit, x_food, iteration), k_x_hat_product(krill, krill_best, krill_fit, fitness(krill_best)))
def food_induced_mov(krill, krill_fit, krill_best, x_food, population, population_fitness, old_F, iteration):
return vector_sum(vector_constant_product(beta(krill, krill_fit, krill_best, x_food, population, population_fitness, iteration), FORAGING_SPEED), vector_constant_product(old_F, INERTIA_FOOD))
#####
# Movement
###
def dX_dt(krill, krill_fit, krill_best, best, x_food, population, population_fitness, old_N, old_F, iteration):
Ni = neighbors_induced_mov(krill, krill_fit, best, population, population_fitness, old_N, iteration)
# print Ni
Fi = food_induced_mov(krill, krill_fit, krill_best, x_food, population, population_fitness, old_F, iteration)
Di = random_difusion(iteration)
return (vector_sum(vector_sum(Ni,Fi),Di), Ni, Fi)
def move(krill, delta_t, delta_move):
return vector_sum( krill,vector_constant_product(delta_move, delta_t))
def select_best_krill(population):
min_krill = population[0]
min_fitness = 10**9
population_fitness = list()
for x in population:
curr_fit = fitness(x[0])
population_fitness.append(curr_fit)
if min_fitness > curr_fit:
min_krill = x
min_fitness = curr_fit
return (min_krill,population_fitness)
def delta_t(population, explore):
sumi = 0
lower_bound = copy.copy(population[0][0])
upper_bound = copy.copy(population[0][0])
c_t = CT
if not explore:
c_t /= 2
lower_bound = copy.copy(population[0][0])
upper_bound = copy.copy(population[0][0])
for x in population:
for xi in range(NUM_DIMENSIONS):
if lower_bound[xi] > x[0][xi]:
lower_bound[xi] = x[0][xi]
if upper_bound[xi] < x[0][xi]:
upper_bound[xi] = x[0][xi]
meanU = list()
for x in range(NUM_DIMENSIONS):
if not explore:
meanU.append(2*(upper_bound[x] - lower_bound[x]))
else:
meanU.append(X_MAX - X_MIN)
# list.sort(meanU)
# print(meanU)
return c_t * sum(meanU)
def delta_t_branin(population, explore):
sumi = 0
lower_bound = copy.copy(population[0][0])
upper_bound = copy.copy(population[0][0])
c_t = CT
if not explore:
c_t /= 2
lower_bound = copy.copy(population[0][0])
upper_bound = copy.copy(population[0][0])
for x in population:
for xi in range(NUM_DIMENSIONS):
if lower_bound[xi] > x[0][xi]:
lower_bound[xi] = x[0][xi]
if upper_bound[xi] < x[0][xi]:
upper_bound[xi] = x[0][xi]
meanU = list()
if not explore:
meanU.append(2*(upper_bound[x] - lower_bound[x]))
meanU.append(2*(upper_bound[x] - lower_bound[x]))
else:
meanU.append(X_MAX - X_MIN)
meanU.append(Y_MAX - Y_MIN)
return c_t * sum(meanU)
def check_for_solution(population):
solutions = 0
for x in population:
if abs(fitness(x[1])) < CONVERGENCE_PRECISION:
solutions += 1
return solutions
def evolve():
global CONVERGENT_EXECS
global kworst
global kbest
global INERTIA_NEIGHBORS
global FORAGING_SPEED
movement_vector = list()
population = generate_population()
krill = population[0]
solved = False
i = 0
best_change_iterations = 0
INERTIA_NEIGHBORS = 0.9
INERTIA_FOOD = 0.9
kworst = 0
kbest = 10**9
benchmarkFunctions.FUNCTION_EVALUATION = 0
while i < NUM_ITERATIONS:
i += 1
(best_krill, population_fitness) = select_best_krill(population)
x_food = food_position(population, population_fitness)
new_population = list()
iteration_min_fit = min(population_fitness)
iteration_max_fit = max(population_fitness)
if kworst < iteration_max_fit:
kworst = iteration_max_fit
if kbest > iteration_min_fit:
kbest = iteration_min_fit
best_change_iterations = 0
DIFUSION_SPEED = 0.005
else:
best_change_iterations += 1
INERTIA_NEIGHBORS = 0.1 + 0.8 * (1 - i/NUM_ITERATIONS)
INERTIA_FOOD = 0.1 + 0.8 * (1 - i/NUM_ITERATIONS)
print "iteration "+ str(i)+ ": kworst = "+ str(kworst)+ " | kbest = "+ str(kbest)
isExploration = int(i/50)%2 == 0
print isExploration
dt = delta_t(population, isExploration)
flag_test = False
if best_change_iterations > 20:
best_change_iterations = 0
DIFUSION_SPEED *= 10
flag_test = True;
if i > 500:
dt /= 3
# if i > 750:
# dt /= 3
print dt
for idx, krill in enumerate(population):
krill_best = krill[1]
(movement_vector, new_N, new_F) = dX_dt(krill[0], population_fitness[idx], krill_best, best_krill[0], x_food ,population, population_fitness, krill[2], krill[3],i)
new_krill_position = vector_sum(krill[0] ,vector_constant_product(movement_vector, dt))
if fitness(new_krill_position) < fitness(krill_best):
krill_best = new_krill_position
new_population.append((new_krill_position, krill_best, new_N, new_F));
population = new_population
print "########################"
global DIFUSION_SPEED
if flag_test:
flag_test = False
DIFUSION_SPEED = 0.005
# print population
solutions = check_for_solution(new_population)
CONVERGENT_INDIVIDUALS.append(solutions)
SOLUTION_FOUND_ITERATIONS.append(i)
print SOLUTION_FOUND_ITERATIONS
kbest_fit = map(lambda x: fitness(x[1]), population)
mean_pop_fitness = mean(kbest_fit)
KBEST_FITNESS.append(min(kbest_fit))
INDIVIDUALS_FITNESS.append(mean_pop_fitness)
print "best "+ str(population[kbest_fit.index(min(kbest_fit))][1])
print "Population fitness: " + str(mean_pop_fitness)
print "Convergent individuals: " + str(solutions)
if solutions > 0:
solved = True
CONVERGENT_EXECS+=1
print "Solution found after " + str(i) + " iterations"
else:
print "Solution not found!"
def mean(list_items):
return sum(list_items)/float(len(list_items))
def std_dev(list_items, mean_items):
variance_list = map(lambda x : pow(x-mean_items, 2), list_items)
return math.sqrt(sum(variance_list)/float(len(list_items)))
def initialize_function(benchmark_params, dims):
global fitness
global X_MIN
global X_MAX
global CONVERGENCE_PRECISION
global NUM_DIMENSIONS
fitness = benchmark_params[0]
if dims==None:
NUM_DIMENSIONS = benchmark_params[1]
else:
NUM_DIMENSIONS = dims
CONVERGENCE_PRECISION = benchmark_params[2]
X_MIN = benchmark_params[3]
X_MAX = benchmark_params[4]
if fitness == benchmarkFunctions.branin:
global Y_MIN
global Y_MAX
global generate_population
global delta_t
Y_MIN = benchmark_params[5]
Y_MAX = benchmark_params[6]
generate_population = generate_population_branin
delta_t = delta_t_branin
def main(num_of_trials, function_params, dims = None):
initialize_function(function_params, dims)
print CONVERGENCE_PRECISION
print NUM_DIMENSIONS
print X_MAX
print X_MIN
print Y_MAX
print Y_MIN
for i in range(num_of_trials):
print "Execution " + str(i+1)
evolve()
print ""
mean_iterations = mean(SOLUTION_FOUND_ITERATIONS)
mean_fitness = mean(INDIVIDUALS_FITNESS)
mean_individuals = mean(CONVERGENT_INDIVIDUALS)
print "Convergent executions: " + str(CONVERGENT_EXECS)
print "Mean of iterations: " + str(mean_iterations)
# print "Std of iterations: " + str(std_dev(SOLUTION_FOUND_ITERATIONS, mean_iterations))
print "Mean of fitness: " + str(mean_fitness)
print "Std of fitness: " + str(std_dev(INDIVIDUALS_FITNESS, mean_fitness))
print "Mean of convergent indivs: " + str(mean_individuals)
print "Std of convergent indivs: " + str(std_dev(CONVERGENT_INDIVIDUALS, mean_individuals))
print "Best solution found " + str(min(KBEST_FITNESS))
print "Mean solution found " + str(mean(KBEST_FITNESS))
# print "Mean of total convergence iterations: " + str(mean_iter_total)
#print "ACKLEY"
#main(5, benchmarkFunctions.ACKLEY())
#print "GRIEWANK"
#main(5, benchmarkFunctions.GRIEWANK())
#print "RASTRIGIN"
#main(5, benchmarkFunctions.RASTRIGIN())
#print "ROSENBROCK"
#main(5, benchmarkFunctions.ROSENBROCK())
#print "SCHEWEFEL 226"
#main(5, benchmarkFunctions.SCHWEFEL226())
#print "SCHEWEFEL 222"
#main(5, benchmarkFunctions.SCHWEFEL222())
#print "SPHERE"
#main(5, benchmarkFunctions.SPHERE())
print "BRANIN"
main(5, benchmarkFunctions.BRANIN())
print "ALPINE"
main(5, benchmarkFunctions.ALPINE())
|
|
# encoding: utf-8
"""
supervisor.py
Created by Thomas Mangin on 2011-11-29.
Copyright (c) 2011-2013 Exa Networks. All rights reserved.
"""
import os
import sys
import signal
import traceback
from socket import has_ipv6
from .util.pid import PID
from .util.daemon import Daemon
from .util.alarm import alarm_thread
from .reactor.content.manager import ContentManager
from .reactor.client.manager import ClientManager
from .reactor.resolver.manager import ResolverManager
from .network.async import Poller
from .network.server import Server
from .html.page import Page
from .monitor import Monitor
from .reactor import Reactor
from .reactor.redirector import fork_redirector
from .reactor.redirector import redirector_message_thread
from .configuration import load
from exaproxy.util.log.logger import Logger
from exaproxy.util.log.writer import SysLogWriter
from exaproxy.util.log.writer import UsageWriter
from exaproxy.util.interfaces import getifaddrs,AF_INET,AF_INET6
class Supervisor (object):
alarm_time = 0.1 # regular backend work
second_frequency = int(1/alarm_time) # when we record history
minute_frequency = int(60/alarm_time) # when we want to average history
increase_frequency = int(5/alarm_time) # when we add workers
decrease_frequency = int(60/alarm_time) # when we remove workers
saturation_frequency = int(20/alarm_time) # when we report connection saturation
interface_frequency = int(300/alarm_time) # when we check for new interfaces
# import os
# clear = [hex(ord(c)) for c in os.popen('clear').read()]
# clear = ''.join([chr(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']])
def __init__ (self,configuration):
self.configuration = configuration
# Only here so the introspection code can find them
self.log = Logger('supervisor', configuration.log.supervisor)
self.log.error('Starting exaproxy version %s' % configuration.proxy.version)
self.signal_log = Logger('signal', configuration.log.signal)
self.log_writer = SysLogWriter('log', configuration.log.destination, configuration.log.enable, level=configuration.log.level)
self.usage_writer = UsageWriter('usage', configuration.usage.destination, configuration.usage.enable)
sys.exitfunc = self.log_writer.writeMessages
self.log_writer.setIdentifier(configuration.daemon.identifier)
#self.usage_writer.setIdentifier(configuration.daemon.identifier)
if configuration.debug.log:
self.log_writer.toggleDebug()
self.usage_writer.toggleDebug()
self.log.error('python version %s' % sys.version.replace(os.linesep,' '))
self.log.debug('starting %s' % sys.argv[0])
self.pid = PID(self.configuration)
self.daemon = Daemon(self.configuration)
self.poller = Poller(self.configuration.daemon)
self.poller.setupRead('read_proxy') # Listening proxy sockets
self.poller.setupRead('read_web') # Listening webserver sockets
self.poller.setupRead('read_icap') # Listening icap sockets
self.poller.setupRead('read_redirector') # Pipes carrying responses from the redirector process
self.poller.setupRead('read_resolver') # Sockets currently listening for DNS responses
self.poller.setupRead('read_client') # Active clients
self.poller.setupRead('opening_client') # Clients we have not yet read a request from
self.poller.setupWrite('write_client') # Active clients with buffered data to send
self.poller.setupWrite('write_resolver') # Active DNS requests with buffered data to send
self.poller.setupRead('read_download') # Established connections
self.poller.setupWrite('write_download') # Established connections we have buffered data to send to
self.poller.setupWrite('opening_download') # Opening connections
self.poller.setupRead('read_interrupt') # Scheduled events
self.poller.setupRead('read_control') # Responses from commands sent to the redirector process
self.monitor = Monitor(self)
self.page = Page(self)
self.content = ContentManager(self,configuration)
self.client = ClientManager(self.poller, configuration)
self.resolver = ResolverManager(self.poller, self.configuration, configuration.dns.retries*10)
self.proxy = Server('http proxy',self.poller,'read_proxy', configuration.http.connections)
self.web = Server('web server',self.poller,'read_web', configuration.web.connections)
self.icap = Server('icap server',self.poller,'read_icap', configuration.icap.connections)
self._shutdown = True if self.daemon.filemax == 0 else False # stop the program
self._softstop = False # stop once all current connection have been dealt with
self._reload = False # unimplemented
self._toggle_debug = False # start logging a lot
self._decrease_spawn_limit = 0
self._increase_spawn_limit = 0
self._refork = False # unimplemented
self._pdb = False # turn on pdb debugging
self._listen = None # listening change ? None: no, True: listen, False: stop listeing
self.wait_time = 5.0 # how long do we wait at maximum once we have been soft-killed
self.local = set() # what addresses are on our local interfaces
if not self.initialise():
self._shutdown = True
elif self.daemon.drop_privileges():
self.log.critical('Could not drop privileges to \'%s\'. Refusing to run as root' % self.daemon.user)
self.log.critical('Set the environment value USER to change the unprivileged user')
self._shutdown = True
# fork the redirector process before performing any further setup
redirector = fork_redirector(self.poller, self.configuration)
# use simple blocking IO for communication with the redirector process
self.redirector = redirector_message_thread(redirector)
# NOTE: create threads _after_ all forking is done
# regularly interrupt the reactor for maintenance
self.interrupt_scheduler = alarm_thread(self.poller, self.alarm_time)
self.reactor = Reactor(self.configuration, self.web, self.proxy, self.icap, self.redirector, self.content, self.client, self.resolver, self.log_writer, self.usage_writer, self.poller)
self.interfaces()
signal.signal(signal.SIGQUIT, self.sigquit)
signal.signal(signal.SIGINT, self.sigterm)
signal.signal(signal.SIGTERM, self.sigterm)
# signal.signal(signal.SIGABRT, self.sigabrt)
# signal.signal(signal.SIGHUP, self.sighup)
signal.signal(signal.SIGTRAP, self.sigtrap)
signal.signal(signal.SIGUSR1, self.sigusr1)
signal.signal(signal.SIGUSR2, self.sigusr2)
signal.signal(signal.SIGTTOU, self.sigttou)
signal.signal(signal.SIGTTIN, self.sigttin)
# make sure we always have data in history
# (done in zero for dependencies reasons)
if self._shutdown is False:
self.redirector.requestStats()
command, control_data = self.redirector.readResponse()
stats_data = control_data if command == 'STATS' else None
stats = self.monitor.statistics(stats_data)
ok = self.monitor.zero(stats)
if ok:
self.redirector.requestStats()
else:
self._shutdown = True
def exit (self):
sys.exit()
def sigquit (self,signum, frame):
if self._softstop:
self.signal_log.critical('multiple SIG INT received, shutdown')
self._shutdown = True
else:
self.signal_log.critical('SIG INT received, soft-stop')
self._softstop = True
self._listen = False
def sigterm (self,signum, frame):
self.signal_log.critical('SIG TERM received, shutdown request')
if os.environ.get('PDB',False):
self._pdb = True
else:
self._shutdown = True
# def sigabrt (self,signum, frame):
# self.signal_log.info('SIG INFO received, refork request')
# self._refork = True
# def sighup (self,signum, frame):
# self.signal_log.info('SIG HUP received, reload request')
# self._reload = True
def sigtrap (self,signum, frame):
self.signal_log.critical('SIG TRAP received, toggle debug')
self._toggle_debug = True
def sigusr1 (self,signum, frame):
self.signal_log.critical('SIG USR1 received, decrease worker number')
self._decrease_spawn_limit += 1
def sigusr2 (self,signum, frame):
self.signal_log.critical('SIG USR2 received, increase worker number')
self._increase_spawn_limit += 1
def sigttou (self,signum, frame):
self.signal_log.critical('SIG TTOU received, stop listening')
self._listen = False
def sigttin (self,signum, frame):
self.signal_log.critical('SIG IN received, star listening')
self._listen = True
def interfaces (self):
local = { '127.0.0.1', '::1' }
for interface in getifaddrs():
if interface.family not in (AF_INET,AF_INET6):
continue
if interface.address not in self.local:
self.log.info('found new local ip %s (%s)' % (interface.address,interface.name))
local.add(interface.address)
for ip in self.local:
if ip not in local:
self.log.info('removed local ip %s' % ip)
if local == self.local:
self.log.info('no ip change')
else:
self.local = local
def run (self):
count_second = 0
count_minute = 0
count_saturation = 0
count_interface = 0
events = {'read_interrupt'}
while True:
count_second = (count_second + 1) % self.second_frequency
count_minute = (count_minute + 1) % self.minute_frequency
count_saturation = (count_saturation + 1) % self.saturation_frequency
count_interface = (count_interface + 1) % self.interface_frequency
try:
if self._pdb:
self._pdb = False
import pdb
pdb.set_trace()
# prime the alarm
if 'read_interrupt' in events:
self.interrupt_scheduler.setAlarm()
# check for IO change with select
status, events = self.reactor.run()
# shut down the server if a child process disappears
if status is False:
self._shutdown = True
# respond to control responses immediately
if 'read_control' in events:
command, control_data = self.redirector.readResponse()
if command == 'STATS':
ok = self.doStats(count_second, count_minute, control_data)
if ok is False:
self._shutdown = True
# jump straight back into the reactor if we haven't yet received an
# interrupt event
if 'read_interrupt' not in events:
continue
# clear the alarm condition
self.interrupt_scheduler.acknowledgeAlarm()
# must follow the reactor so we are sure to go through the reactor at least once
# and flush any logs
if self._shutdown:
self._shutdown = False
self.shutdown()
break
elif self._reload:
self._reload = False
self.reload()
elif self._refork:
self._refork = False
self.signal_log.warning('refork not implemented')
# stop listening to new connections
# refork the program (as we have been updated)
# just handle current open connection
# ask the redirector process for stats
self.redirector.requestStats()
if self._softstop:
if self._listen == False:
self.proxy.rejecting()
self._listen = None
if self.client.softstop():
self._shutdown = True
# only change listening if we are not shutting down
elif self._listen is not None:
if self._listen:
self._shutdown = not self.proxy.accepting()
self._listen = None
else:
self.proxy.rejecting()
self._listen = None
if self._toggle_debug:
self._toggle_debug = False
self.log_writer.toggleDebug()
if self._decrease_spawn_limit:
count = self._decrease_spawn_limit
self.redirector.decreaseSpawnLimit(count)
self._decrease_spawn_limit = 0
if self._increase_spawn_limit:
count = self._increase_spawn_limit
self.redirector.increaseSpawnLimit(count)
self._increase_spawn_limit = 0
# cleanup idle connections
# TODO: track all idle connections, not just the ones that have never sent data
expired = self.reactor.client.expire()
if expired:
self.proxy.notifyClose(None, count=expired)
# report if we saw too many connections
if count_saturation == 0:
self.proxy.saturation()
self.web.saturation()
if self.configuration.daemon.poll_interfaces and count_interface == 0:
self.interfaces()
except KeyboardInterrupt:
self.log.critical('^C received')
self._shutdown = True
except OSError,e:
# This shoould never happen as we are limiting how many connections we accept
if e.errno == 24: # Too many open files
self.log.critical('Too many opened files, shutting down')
for line in traceback.format_exc().split('\n'):
self.log.critical(line)
self._shutdown = True
else:
self.log.critical('unrecoverable io error')
for line in traceback.format_exc().split('\n'):
self.log.critical(line)
self._shutdown = True
finally:
pass
# try:
# from exaproxy.leak import objgraph
# if objgraph:
# count += 1
# if count >= 30:
# print "*"*10, time.strftime('%d-%m-%Y %H:%M:%S')
# print objgraph.show_most_common_types(limit=20)
# print "*"*10
# print
# except KeyboardInterrupt:
# self.log.info('^C received')
# self._shutdown = True
def doStats (self, count_second, count_minute, stats_data):
# parse the data we were sent
stats = self.monitor.statistics(stats_data)
# save our monitoring stats
if count_second == 0:
ok = self.monitor.second(stats)
else:
ok = True
expired = 0
if ok is True and count_minute == 0:
ok = self.monitor.minute(stats)
return ok
def initialise (self):
self.daemon.daemonise()
self.pid.save()
# only start listening once we know we were able to fork our worker processes
tcp4 = self.configuration.tcp4
tcp6 = self.configuration.tcp6
icap = self.configuration.icap
if not has_ipv6 and (tcp6.listen or tcp6.out or icap.ipv6):
tcp6.listen = False
tcp6.out = False
self.log.critical('your python interpreter does not have ipv6 support !')
out = bool(tcp4.out or tcp6.out)
if not out:
self.log.critical('we need to use IPv4 or IPv6 for outgoing connection - both can not be disabled !')
listen = bool(tcp4.listen or tcp6.listen) or bool(icap.host or icap.ipv6)
if not listen:
self.log.critical('Not listening on either IPv4 or IPv6.')
ok = out and listen
if ok and tcp4.listen:
s = self.proxy.listen(tcp4.host,tcp4.port, tcp4.timeout, tcp4.backlog)
ok = bool(s)
if not ok:
self.log.critical('IPv4 proxy, unable to listen on %s:%s' % (tcp4.host,tcp4.port))
if ok and tcp6.listen:
s = self.proxy.listen(tcp6.host,tcp6.port, tcp6.timeout, tcp6.backlog)
ok = bool(s)
if not ok:
self.log.critical('IPv6 proxy, unable to listen on %s:%s' % (tcp6.host,tcp6.port))
if ok and icap.enable:
s = self.icap.listen(icap.host, icap.port, tcp4.timeout, tcp4.backlog)
ok = bool(s)
if not ok:
self.log.critical('ICAP server, unable to listen on %s:%s' % (icap.host, icap.port))
if ok and icap.enable and tcp6.listen:
s = self.icap.listen(icap.ipv6, icap.port, tcp4.timeout, tcp4.backlog)
ok = bool(s)
if not ok:
self.log.critical('ICAP server, unable to listen on %s:%s' % (icap.host, icap.port))
if ok and self.configuration.web.enable:
s = self.web.listen(self.configuration.web.host,self.configuration.web.port, 10, 10)
ok = bool(s)
if not ok:
self.log.critical('internal web server, unable to listen on %s:%s' % (self.configuration.web.host, self.configuration.web.port))
return ok
def shutdown (self):
"""terminate all the current BGP connections"""
self.log.info('Performing shutdown')
try:
self.web.stop() # accept no new web connection
self.proxy.stop() # accept no new proxy connections
self.redirector.stop() # shut down redirector children
self.content.stop() # stop downloading data
self.client.stop() # close client connections
self.pid.remove()
self.interrupt_scheduler.stop()
except KeyboardInterrupt:
self.log.info('^C received while shutting down. Exiting immediately because you insisted.')
sys.exit()
def reload (self):
self.log.info('Performing reload of exaproxy %s' % self.configuration.proxy.version)
self.redirector.respawn()
|
|
import json
import itertools
import rtree
import requests
from parse import search
from geomet import wkt
from datetime import datetime, timedelta
from functools import partial, lru_cache
import pyproj
import numpy as np
from time import time
import logging
from shapely.geometry import shape, mapping, box
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.collection import GeometryCollection
from shapely.geometry.point import Point
from shapely.ops import unary_union, transform
__all__ = ['json2ogr', 'ogr2json', 'dissolve', 'intersect', 'project_local',
'project_global', 'buffer_to_dist', 'get_area', 'get_area_percent',
'esri_server2ogr', 'get_species_count', 'esri_server2histo',
'esri_count_groupby', 'cartodb2ogr', 'esri_count_30days',
'esri_last_instance', 'erase', 'get_date_from_timestamp',
'get_feature_count', 'test_ip', 'esri_attributes', 'get_presence',
'get_histo_loss_area', 'get_histo_pre2001_area', 'get_histo_total_area',
'get_area_by_attributes', 'get_geom_by_attributes', 'pad_counts',
'vals_by_year', 'split', 'split_features', 'get_counts_by_year',
'get_count_by_year', 'combine_counts_by_year', 'get_ok']
HA_CONVERSION = 10000
FEATURE_GEOG_THRESHOLD = 1.2
REQUEST_GEOG_THRESHOLD = 20
REQUEST_COORD_THRESHOLD = 20000
FUNCTION_COUNT = 0
def get_ok():
return 'ok'
def test_ip():
return requests.get('http://checkip.amazonaws.com').text.replace('\n', '')
def json2ogr(in_json):
'''
Convert geojson object to GDAL geometry
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION json2ogr STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if isinstance(in_json, str):
in_json = json.loads(in_json)
if not isinstance(in_json, dict):
raise ValueError('input json must be dictionary')
if 'features' not in in_json.keys():
raise ValueError('input json must contain features property')
for f in in_json['features']:
f['geometry'] = shape(f['geometry'])
if not f['geometry'].is_valid:
f['geometry'] = f['geometry'].buffer(0)
for i in range(len(in_json['features'])):
in_json['features'][i]['properties']['id'] = i
# logging.info('FUNCTION json2ogr STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return in_json
def ogr2json(featureset):
'''
Convert GDAL geometry to geojson object
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION ogr2json STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
new_features = []
for f in featureset['features']:
new_features.append(dict(geometry=mapping(f['geometry']),
properties=f['properties'],
type=f['type']))
# f['geometry'] = mapping(f['geometry'])
new_featureset = dict(type=featureset['type'],
features=new_features)
if 'crs' in featureset.keys():
new_featureset['crs'] = featureset['crs']
# logging.info('FUNCTION ogr2json STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return json.dumps(new_featureset)
def explode(coords):
"""Explode a GeoJSON geometry's coordinates object and yield coordinate
tuples. As long as the input is conforming, the type of the geometry
doesn't matter.
https://gis.stackexchange.com/questions/90553/fiona-get-each-feature-
extent-bounds"""
for e in coords:
if isinstance(e, (float, int)):
yield coords
break
else:
for f in explode(e):
yield f
def bounds(f):
if isinstance(f['geometry'], dict):
geom = f['geometry']['coordinates']
else:
try:
geom = mapping(f['geometry'])['coordinates']
except Exception as e:
raise ValueError((str(e),f['geometry'],mapping(f['geometry'])))
x, y = zip(*list(explode(geom)))
return min(x), min(y), max(x), max(y)
def bbox(f):
tups = mapping(box(*bounds(f)))['coordinates']
return [[list(tup) for tup in tups[0]]]
def ogr2rings(f):
return [[list(tup) for tup in mapping(f['geometry'])['coordinates'][0]]]
# @lru_cache(5)
def esri_server2ogr(layer_endpoint, aoi, out_fields, where='1=1', token=''):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION esri_server2ogr STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
url = layer_endpoint.replace('?f=pjson', '') + '/query'
params = {}
params['where'] = where
if 'objectid' not in out_fields:
out_fields = 'objectid,' + out_fields if out_fields else 'objectid'
params['outFields'] = out_fields
params['returnGeometry'] = True
params['returnM'] = False
params['returnZ'] = False
params['f'] = 'geojson'
params['geometryType'] = 'esriGeometryPolygon'
params['spatialRel'] = 'esriSpatialRelIntersects'
# if protected service, retrieve token
if token:
params['token'] = token
# iterate through aoi features (Esri does not accept multipart polygons
# as a spatial filter, and the aoi features may be too far apart to combine
# into one bounding box)
featureset = {'type': 'FeatureCollection', 'features': []}
features = []
objectids = []
if isinstance(aoi, str):
aoi = json.loads(aoi)
for f in aoi['features']:
params['geometry'] = str({'rings': bbox(f),
'spatialReference': {'wkid': 4326}})
req = requests.post(url, data=params)
req.raise_for_status()
try:
# response = json2ogr(req.text)
response = req.json()
assert 'features' in response
except:
raise ValueError((req.text, url, params))
# append response to full dataset, except features already included
for h in response['features']:
feat_id = ','.join([str(prop) for prop in h['properties'].values()])
if feat_id not in objectids:
features.append(h)
objectids.append(feat_id)
featureset = json2ogr(dict(type='FeatureCollection',
features=features))
# logging.info('FUNCTION esri_server2ogr STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return featureset
def esri_server2histo(layer_endpoint, aoi):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION esri_server2histo STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
url = layer_endpoint.replace('?f=pjson', '') + '/computeHistograms'
params = {}
params['f'] = 'json'
params['geometryType'] = 'esriGeometryPolygon'
params['spatialRel'] = 'esriSpatialRelIntersects'
params['returnGeometry'] = True
params['where'] = '1=1'
if isinstance(aoi, str):
aoi = json.loads(aoi)
# if featureset['features']:
# f = featureset['features'][0]
histogram = [0] * 256
for f in aoi['features']:
params['geometry'] = str({'rings': ogr2rings(f),
'spatialReference': {'wkid': 4326}})
req = requests.post(url, data=params)
req.raise_for_status()
try:
response = req.json()['histograms']
if response:
for i, count in enumerate(response[0]['counts']):
histogram[i] += count
except Exception as e:
raise ValueError('{} --- {}'.format(e, req.text))
# logging.info('FUNCTION esri_server2histo STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return histogram
def esri_attributes(layer_endpoint, aoi, out_fields):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION esri_attributes STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
url = layer_endpoint.replace('?f=pjson', '') + '/query'
params = {}
params['f'] = 'json'
params['geometryType'] = 'esriGeometryPolygon'
params['where'] = '1=1'
params['spatialRel'] = 'esriSpatialRelIntersects'
params['returnGeometry'] = False
params['outFields'] = out_fields
if isinstance(aoi, str):
aoi = json.loads(aoi)
objectids = []
attributes = []
for f in aoi['features']:
params['geometry'] = str({'rings': ogr2rings(f),
'spatialReference': {'wkid': 4326}})
req = requests.post(url, data=params)
req.raise_for_status()
# return [feat['attributes'] for feat in req.json()['features']]
for h in req.json()['features']:
feat_id = ','.join([str(prop) for prop in h['attributes'].values()])
if feat_id not in objectids:
attributes.append(h['attributes'])
objectids.append(feat_id)
# logging.info('FUNCTION esri_attributes STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return attributes
def esri_count_groupby(layer_endpoint, aoi, count_fields):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION esri_count_groupby STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
url = layer_endpoint.replace('?f=pjson', '') + '/query'
params = {}
params['f'] = 'json'
params['geometryType'] = 'esriGeometryPolygon'
params['where'] = '1=1'
params['spatialRel'] = 'esriSpatialRelIntersects'
params['returnGeometry'] = False
params['groupByFieldsForStatistics'] = count_fields
count_fields = count_fields.split(',')
params['outStatistics'] = json.dumps([{
'statisticType': 'count',
'onStatisticField': count_fields[0],
'outStatisticFieldName': 'count'
}])
if isinstance(aoi, str):
aoi = json.loads(aoi)
counts = {}
for f in aoi['features']:
params['geometry'] = str({'rings': ogr2rings(f),
'spatialReference': {'wkid': 4326}})
req = requests.post(url, data=params)
req.raise_for_status()
try:
f_counts = {'-'.join([str(item['attributes'][field]) for field in
count_fields]): item['attributes']['count']
for item in req.json()['features']}
for key, val in f_counts.items():
if not key in counts.keys():
counts[key] = val
else:
counts[key] += val
except Exception as e:
raise ValueError((str(e), url, params, req.text))
# logging.info('FUNCTION esri_count_groupby STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return counts
def esri_count_30days(layer_endpoint, aoi, date_field):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION esri_count_30days STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
url = layer_endpoint.replace('?f=pjson', '') + '/query'
date = (datetime.utcnow() - timedelta(days=30)).strftime('%Y-%m-%d')
params = {}
params['f'] = 'json'
params['geometryType'] = 'esriGeometryPolygon'
params['where'] = "{} >= date '{}'".format(date_field, date)
params['spatialRel'] = 'esriSpatialRelIntersects'
params['returnGeometry'] = False
params['returnCountOnly'] = True
if isinstance(aoi, str):
aoi = json.loads(aoi)
count = 0
for f in aoi['features']:
params['geometry'] = str({'rings': ogr2rings(f),
'spatialReference': {'wkid': 4326}})
req = requests.post(url, data=params)
req.raise_for_status()
count += req.json()['count']
# logging.info('FUNCTION esri_count_30days STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return count
def esri_last_instance(layer_endpoint, aoi, field):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION esri_last_instance STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
url = layer_endpoint.replace('?f=pjson', '') + '/query'
params = {}
params['f'] = 'json'
params['geometryType'] = 'esriGeometryPolygon'
params['where'] = '1=1'
params['spatialRel'] = 'esriSpatialRelIntersects'
params['returnGeometry'] = False
params['outFields'] = field
params['orderByFields'] = field
params['returnDistinctValues'] = True
if isinstance(aoi, str):
aoi = json.loads(aoi)
last_instance = None
for f in aoi['features']:
params['geometry'] = str({'rings': ogr2rings(f),
'spatialReference': {'wkid': 4326}})
req = requests.post(url, data=params)
req.raise_for_status()
try:
instances = [item['attributes'][field] for item in
req.json()['features']]
if instances:
if not last_instance:
last_instance = instances[-1]
elif instances[-1] > last_instance:
last_instance = instances[-1]
except Exception as e:
raise ValueError((str(e), url, params, req.text))
# logging.info('FUNCTION esri_last_instance STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return last_instance
# @lru_cache(5)
def cartodb2ogr(service_endpoint, aoi, out_fields, where='', _=''):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION cartodb2ogr STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
endpoint_template = 'https://{}.carto.com/tables/{}/'
username, table = search(endpoint_template, service_endpoint + '/')
url = 'https://{username}.carto.com/api/v2/sql'.format(username=username)
if isinstance(aoi, str):
aoi = json.loads(aoi)
params = {}
fields = ['ST_AsGeoJSON(the_geom) as geometry']
out_fields = out_fields.split(',')
for field in out_fields:
if field:
fields.append('{field} as {field}'.format(field=field))
temp = "ST_Intersects(ST_Buffer(ST_GeomFromText('{}',4326),0),the_geom)"
features = []
objectids = []
for f in aoi['features']:
where_clause = temp.format(wkt.dumps({'type': 'Polygon',
'coordinates': bbox(f)}))
if where and not where == '1=1':
where_clause += 'AND {}'.format(where)
q = 'SELECT {fields} FROM {table} WHERE {where}'
params = {'q': q.format(fields=','.join(fields), table=table,
where=where_clause)}
try:
req = requests.get(url, params=params)
req.raise_for_status()
except Exception as e:
raise ValueError((e, url, bbox(f)))
response = json.loads(req.text)['rows']
features += [{
'type': 'Feature',
'geometry': json.loads(h['geometry']),
'properties': {field: h[field] for field in out_fields if field}
} for h in response]
featureset = json2ogr({
'type': 'FeatureCollection',
'features': features
})
# logging.info('FUNCTION cartodb2ogr STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return featureset
def get_split_boxes(f):
'''
Check if number of vertices or width or height of bounding box exceed
thresholds. If they do, returns two revised bounding boxes (Left/Upper
and Right/Bottom) for intersecting with the geometry
'''
x1, y1, x2, y2 = bounds(f)
if (x2 - x1 > FEATURE_GEOG_THRESHOLD or y2 - y1 > FEATURE_GEOG_THRESHOLD):
if x2 - x1 > y2 - y1:
x_split = x1 + (x2 - x1) / 2
return [box(x1, y1, x_split, y2), box(x_split, y1, x2, y2)]
else:
y_split = y1 + (y2 - y1) / 2
return [box(x1, y1, x2, y_split), box(x1, y_split, x2, y2)]
return None
def split_multipolygon(f):
'''
Split multipolygon into coterminous polygons
'''
new_features = [{'type': 'Feature',
'properties': f['properties'],
'geometry': poly} for poly in f['geometry']]
return new_features
def split_polygon(f):
'''
Split complex geometry in half until they are below vertex and bounding
box size constraints
'''
bbs = get_split_boxes(f)
new_features = []
if bbs:
for bb in bbs:
geom = f['geometry']
if not geom.is_valid:
geom = geom.buffer(0)
split_feat = {'type': 'Feature',
'properties': f['properties'],
'geometry': geom.intersection(bb)}
if split_feat['geometry'].type == 'MultiPolygon':
poly_feats = split_multipolygon(split_feat)
for h in poly_feats:
new_features.extend(split_polygon(h))
else:
new_features.extend(split_polygon(split_feat))
else:
new_features.append(f)
return new_features
def split(featureset):
'''
First split all multipolygons into coterminous polygons. Then check each
against vertex and bounding box size constraints, and split into multiple
polygons using recursive halving if necessary
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION split STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
new_features = []
split_id = 0
for f in featureset['features']:
f['properties']['split_id'] = split_id
split_id += 1
if f['geometry'].type == 'MultiPolygon':
poly_feats = split_multipolygon(f)
for h in poly_feats:
new_features.extend(split_polygon(h))
elif f['geometry'].type == 'Polygon':
new_features.extend(split_polygon(f))
new_featureset = dict(type=featureset['type'],
features=new_features)
if 'crs' in featureset.keys():
new_featureset['crs'] = featureset['crs']
# logging.info('FUNCTION split STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
def get_centroid_bbox(features):
x, y = zip([f['properties']['centroid'] for f in features])
return min(x), min(y), max(x), max(y)
def split_features(featureset):
'''
Sort features into groups where all features have centroids clustered
within the geographic size limit and the total number of coordinates is
beneath the coordinate length limit
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION split_features STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
feature_groups = [featureset['features']]
# logging.info('FUNCTION split_features STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return
def condense_properties(properties):
'''
Combine common properties with duplicate values from all features
being dissolved
'''
return {key: val for key, val in properties[0].items()
if all(key in p.keys() and val == p[key] for p in properties)}
def dissolve(featureset, fields=None):
'''
Dissolve a set of geometries on a field, or dissolve fully to a single
feature if no field is provided
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION dissolve STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if fields:
def sort_func(k):
return ','.join([str(k['properties'][field])
for field in fields.split(',')])
else:
sort_func = None
new_features = []
dissolve_id = 0
try:
assert isinstance(featureset, dict)
assert 'features' in featureset.keys()
assert isinstance(featureset['features'], list)
except Exception as e:
raise ValueError((str(e),featureset))
if len(featureset['features']) > 0:
if sort_func:
features = sorted(featureset['features'], key=sort_func)
for key, group in itertools.groupby(features, key=sort_func):
properties, geoms = zip(*[(f['properties'],
f['geometry']) for f in group])
if geoms and not any(geom is None for geom in geoms):
try:
new_geom = unary_union(geoms)
except Exception as e:
new_geom = unary_union([geom if geom.is_valid
else geom.buffer(0)
for geom in geoms])
new_properties = condense_properties(properties)
new_properties['dissolve_id'] = dissolve_id
dissolve_id += 1
new_features.append(dict(type='Feature',
geometry=new_geom,
properties=new_properties))
else:
geoms = [f['geometry'] if f['geometry'].is_valid else
f['geometry'].buffer(0) for f in featureset['features']]
new_properties = condense_properties([f['properties'] for f in
featureset['features']])
new_properties['dissolve_id'] = dissolve_id
dissolve_id += 1
new_features.append(dict(type='Feature',
geometry=unary_union(geoms),
properties=new_properties))
new_featureset = dict(type=featureset['type'],
features=new_features)
if 'crs' in featureset.keys():
new_featureset['crs'] = featureset['crs']
# logging.info('FUNCTION dissolve STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
def index_featureset(featureset):
'''
'''
index = rtree.index.Index()
for i, f in enumerate(featureset['features']):
geom = f['geometry']
if isinstance(geom, GeometryCollection):
minx = np.min([item.bounds[0] for item in geom])
miny = np.min([item.bounds[1] for item in geom])
maxx = np.max([item.bounds[2] for item in geom])
maxy = np.max([item.bounds[3] for item in geom])
index.insert(i, (minx, miny, maxx, maxy))
else:
index.insert(i, geom.bounds)
return index
def intersect(featureset1, featureset2):
'''
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION intersect STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
index = index_featureset(featureset2)
new_features = []
for f in featureset1['features']:
feat1 = f
geom1 = f['geometry']
for fid in list(index.intersection(geom1.bounds)):
feat2 = featureset2['features'][fid]
geom2 = feat2['geometry']
if not geom1.is_valid:
# raise ValueError('Geometry from featureset1 is not valid')
geom1 = geom1.buffer(0)
if not geom2.is_valid:
# raise ValueError('Geometry from featureset2 is not valid')
geom2 = geom2.buffer(0)
if geom1.intersects(geom2): # TODO: optimize to on intersect call?
new_geom = geom1.intersection(geom2)
new_feat = dict(properties={**feat2['properties'],
**feat1['properties']},
geometry=new_geom,
type='Feature')
new_features.append(new_feat)
new_featureset = dict(type=featureset2['type'],
features=new_features)
if 'crs' in featureset2.keys():
new_featureset['crs'] = featureset2['crs']
# logging.info('FUNCTION intersect STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
def erase(featureset, erase_featureset):
'''
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION erase STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
index = index_featureset(erase_featureset)
new_features = []
for f in featureset['features']:
feat = f
geom = f['geometry']
for fid in list(index.intersection(geom.bounds)):
erase_feat = erase_featureset['features'][fid]
erase_geom = erase_feat['geometry']
if geom.intersects(erase_geom):
new_geom = geom.difference(erase_geom)
new_feat = dict(properties={**feat['properties']},
geometry=new_geom,
type='Feature')
new_features.append(new_feat)
new_featureset = dict(type=featureset['type'],
features=new_features)
if 'crs' in featureset.keys():
new_featureset['crs'] = featureset['crs']
# logging.info('FUNCTION erase STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
def project_feature(f, project):
if isinstance(f['geometry'], Polygon):
geom = Polygon(f['geometry'])
elif isinstance(f['geometry'], MultiPolygon):
geom = MultiPolygon(f['geometry'])
elif isinstance(f['geometry'], GeometryCollection):
geom = GeometryCollection(f['geometry'])
elif isinstance(f['geometry'], Point):
geom = Point(f['geometry'])
projected_geom = transform(project, geom)
new_feat = dict(properties=f['properties'],
geometry=projected_geom,
type='Feature')
return new_feat
def project_local(featureset):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION project_local STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if ('crs' in featureset.keys() and
featureset['crs']['properties']['name'] ==
'urn:ogc:def:uom:EPSG::9102'):
return featureset
name = 'urn:ogc:def:uom:EPSG::9102'
# get cumulative centroid of all features
new_features = []
for f in featureset['features']:
if isinstance(f['geometry'], GeometryCollection):
x = np.mean([geom_item.centroid.x for geom_item in f['geometry']])
y = np.mean([geom_item.centroid.y for geom_item in f['geometry']])
else:
x = f['geometry'].centroid.x
y = f['geometry'].centroid.y
# define local projection
proj4 = '+proj=aeqd +lat_0={} +lon_0={} +x_0=0 +y_0=0 +datum=WGS84 \
+units=m +no_defs +R=6371000 '.format(y, x)
# define projection transformation
project = partial(pyproj.transform,
pyproj.Proj(init='epsg:4326'),
pyproj.Proj(proj4))
# peoject features and add projection info
new_feat = project_feature(f, project)
new_feat['properties']['centroid'] = (x,y)
new_features.append(new_feat)
new_featureset = dict(type=featureset['type'],
features=new_features,
crs=dict(type="name",
properties=dict(name=name)))
# logging.info('FUNCTION project_local STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
def project_global(featureset):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION project_global STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if ('crs' in featureset.keys() and
featureset['crs']['properties']['name'] == 'EPSG:4326'):
return featureset
elif 'crs' not in featureset.keys():
raise ValueError('Local projection must have crs info to reproject')
name = 'EPSG:4326'
# [x, y] = featureset['crs']['properties']['centroid']
new_features = []
for f in featureset['features']:
(x, y) = f['properties']['centroid']
proj4 = '+proj=aeqd +lat_0={} +lon_0={} +x_0=0 +y_0=0 +datum=WGS84 \
+units=m +no_defs +R=6371000 '.format(y, x)
project = partial(pyproj.transform,
pyproj.Proj(proj4),
pyproj.Proj(init='epsg:4326'))
new_feat = project_feature(f, project)
new_feat['properties'] = {key: val for key, val in
new_feat['properties'].items()
if not key == 'centroid'}
new_features.append(new_feat)
new_featureset = dict(type=featureset['type'],
features=new_features,
crs=dict(type="name",
properties=dict(name=name)))
# logging.info('FUNCTION project_global STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
def buffer_to_dist(featureset, distance):
'''
Buffer a geometry with a given distance (assumed to be kilometers)
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION buffer_to_dist STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if not (featureset['crs']['properties']['name'] ==
'urn:ogc:def:uom:EPSG::9102'):
raise ValueError('geometries must be projected with the World ' +
'Azimuthal Equidistant coordinate system')
new_features = []
for f in featureset['features']:
geom = f['geometry']
buffered_geom = geom.buffer(int(distance) * 1000.0)
new_feat = dict(properties=f['properties'],
geometry=buffered_geom,
type='Feature')
new_features.append(new_feat)
new_featureset = dict(type=featureset['type'],
features=new_features)
if 'crs' in featureset.keys():
new_featureset['crs'] = featureset['crs']
# logging.info('FUNCTION buffer_to_dist STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
def get_presence(attributes, field):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_presence STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
result = any(item[field] > 0 for item in attributes)
# logging.info('FUNCTION get_presence STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return result
def get_area_by_attributes(featureset, posfields, negfields):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_area_by_attributes STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
posfields = posfields.split(',') if posfields else []
negfields = negfields.split(',') if negfields else []
try:
area_m = sum([f['geometry'].area for f in featureset['features']
if all(f['properties'][fld] and
f['properties'][fld] > 0 for fld in posfields)
and all(f['properties'][fld] and
f['properties'][fld] < 0 for fld in negfields)])
except:
raise ValueError([f['properties'] for field in posfields for f in featureset['features'] if f['properties'][field] is None])
# logging.info('FUNCTION get_area_by_attributes STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return area_m / HA_CONVERSION
def get_geom_by_attributes(featureset, posfields, negfields):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_geom_by_attributes STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
posfields = posfields.split(',') if posfields else []
negfields = negfields.split(',') if negfields else []
features = [f for f in featureset['features']
if all(f['properties'][fld] and f['properties'][fld] > 0
for fld in posfields)
and all(f['properties'][fld] and f['properties'][fld] < 0
for fld in negfields)]
new_featureset = dict(type=featureset['type'],
features=features)
if 'crs' in featureset.keys():
new_featureset['crs'] = featureset['crs']
# logging.info('FUNCTION get_geom_by_attributes STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_featureset
# def get_counts_by_year(layer_endpoints, featureset):
# global FUNCTION_COUNT
# FUNCTION_COUNT += 1
# logging.info('FUNCTION get_counts_by_year STEP {} START'.format(FUNCTION_COUNT))
# t0 = time()
# counts = {}
# for layer_endpoint in layer_endpoints.split(','):
# yr = layer_endpoint.replace('/ImageServer','')[-4:]
# frequencies = esri_server2histo(layer_endpoint, featureset)
# counts[yr] = sum([i * freq for i, freq in enumerate(frequencies)])
# logging.info('FUNCTION get_counts_by_year STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
# return counts
def get_count_by_year(layer_endpoint, featureset, yr):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_counts_by_year STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
frequencies = esri_server2histo(layer_endpoint.replace('2000', yr), featureset)
# logging.info('FUNCTION get_counts_by_year STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return (yr, sum([i * freq for i, freq in enumerate(frequencies)]))
def combine_counts_by_year(*counts):
# logging.info(counts)
return {yr: count for yr, count in counts}
# ------------------------- Calculation Functions --------------------------
def validate_featureset(featureset, fields=[None]):
'''
'''
valid_fields = [f for f in fields if f]
for field in valid_fields:
for f in featureset['features']:
if field not in f['properties'].keys():
raise ValueError('Featureset with category field must ' +
'have category field as a property of ' +
'every feature')
if len(valid_fields) == 0:
if len(featureset['features']) > 1:
raise ValueError('Featureset with multiple features must ' +
'be dissolved or have a category field in ' +
'order to calculate statistics')
def get_area(featureset, field=None):
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_area STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
# validate_featureset(featureset, [field])
if field:
area = {}
categories = set([f['properties'][field]
for f in featureset['features']])
for cat in categories:
area[cat] = sum([f['geometry'].area / HA_CONVERSION
for f in featureset['features']
if f['properties'][field] == cat])
else:
if featureset['features']:
area = sum([f['geometry'].area / HA_CONVERSION
for f in featureset['features']])
else:
area = 0
# logging.info('FUNCTION get_area STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return area
def get_histo_loss_area(histograms):
'''
Returns the sum of tree cover loss for years 2001 through 2014
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_histo_loss_area STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
year_indices = {(i+2001): i+1 for i in range(13)}
histo_area_loss = {year: 0.09 * histograms[year_index] for year, year_index in year_indices.items()}
# logging.info('FUNCTION get_histo_loss_area STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return histo_area_loss
def get_histo_pre2001_area(histograms):
'''
Returns the sum of histo on tree cover loss, aggregated on years prior to
2001
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_histo_pre2001_area STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
histo_area_loss = 0.09 * histograms[14]
# logging.info('FUNCTION get_histo_pre2001_area STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return histo_area_loss
def get_histo_total_area(histograms):
'''
Returns total area of histo within the aoi
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_histo_total_area STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
year_indices = {(i+2000): i for i in range(15)}
histo_area_total = {year: 0.09 * histograms[year_index] for year, year_index in year_indices.items()}
# logging.info('FUNCTION get_histo_total_area STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return histo_area_total
def get_date_from_timestamp(timestamp):
'''
Convert a timestamp (which may be in milliseconds, and is assumed to be
UTC) to a date string of the form YYYY-MM-DD
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_date_from_timestamp STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if not timestamp:
# logging.info('FUNCTION get_date_from_timestamp STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return None
if timestamp > 100000000000:
timestamp = timestamp/1000
# logging.info('FUNCTION get_date_from_timestamp STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')
def get_species_count(intersection, field):
'''
Count number of unique species found within the features of an
intersection with the user AOI
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_species_count STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
species_list = []
for f in intersection['features']:
species_string = f['properties'][field][1:-1].replace('"', '')
species_list += species_string.split(',')
species_set = set(species_list)
# logging.info('FUNCTION get_species_count STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return len(species_set)
def get_feature_count(intersection, field):
'''
Count the number of features, or the number of features for each
value in the intersection's field property
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION get_feature_count STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if field:
counts = {}
for f in intersection['features']:
if f['properties'][field] in counts.keys():
counts[f['properties'][field]] += 1
else:
counts[f['properties'][field]] = 1
# logging.info('FUNCTION get_feature_count STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return counts
else:
# logging.info('FUNCTION get_feature_count STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return len(intersection['features'])
def pad_counts(counts, start_yr, end_yr):
'''
Pad result object for fires counts by month or year with zeros
for all missing months or years
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION pad_counts STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
if counts:
if '-' in counts.keys():
new_counts = {'{}-{}'.format(yr, mn): 0 for mn in range(1, 13)
for yr in range(int(start_yr), int(end_yr)+1)}
else:
new_counts = {str(yr): 0 for yr in range(int(start_yr),
int(end_yr)+1)}
else:
new_counts = {}
for key in new_counts.keys():
if key in counts.keys():
new_counts[key] = counts[key]
# logging.info('FUNCTION pad_counts STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return new_counts
def vals_by_year(val, start_yr, end_yr):
'''
Store value in a by-year object (for consistency with the rest
of the Palm Risk tool)
'''
global FUNCTION_COUNT
FUNCTION_COUNT += 1
# logging.info('FUNCTION vals_by_year STEP {} START'.format(FUNCTION_COUNT))
t0 = time()
result = {str(yr): val for yr in range(int(start_yr), int(end_yr)+1)}
# logging.info('FUNCTION vals_by_year STEP {} DONE - {} SECONDS'.format(FUNCTION_COUNT, time()-t0))
return result
def is_valid(analysis_method):
'''
Validate that method exists
'''
return analysis_method in __all__
|
|
import os, sys
from pip.req import InstallRequirement, RequirementSet
from pip.req import parse_requirements
from pip.log import logger
from pip.locations import build_prefix, src_prefix
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import InstallationError
class InstallCommand(Command):
name = 'install'
usage = '%prog [OPTIONS] PACKAGE_NAMES...'
summary = 'Install packages'
bundle = False
def __init__(self):
super(InstallCommand, self).__init__()
self.parser.add_option(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='VCS+REPOS_URL[@REV]#egg=PACKAGE',
help='Install a package directly from a checkout. Source will be checked '
'out into src/PACKAGE (lower-case) and installed in-place (using '
'setup.py develop). You can run this on an existing directory/checkout (like '
'pip install -e src/mycheckout). This option may be provided multiple times. '
'Possible values for VCS are: svn, git, hg and bzr.')
self.parser.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='FILENAME',
help='Install all the packages listed in the given requirements file. '
'This option can be used multiple times.')
self.parser.add_option(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='URL',
help='URL to look for packages at')
self.parser.add_option(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='http://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default)')
self.parser.add_option(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url')
self.parser.add_option(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead)')
self.parser.add_option(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help='Use the PyPI mirrors as a fallback in case the main index is down.')
self.parser.add_option(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help='Specific mirror URLs to query when --use-mirrors is used')
self.parser.add_option(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='DIR',
default=None,
help='Unpack packages into DIR (default %s) and build from there' % build_prefix)
self.parser.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='DIR',
default=None,
help='Download packages into DIR instead of installing them')
self.parser.add_option(
'--download-cache',
dest='download_cache',
metavar='DIR',
default=None,
help='Cache downloaded packages in DIR')
self.parser.add_option(
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='DIR',
default=None,
help='Check out --editable packages into DIR (default %s)' % src_prefix)
self.parser.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all packages to the newest available version')
self.parser.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead)')
self.parser.add_option(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help='Ignore package dependencies')
self.parser.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="Download and unpack all packages, but don't actually install them")
self.parser.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="Don't download any packages, just install the ones already downloaded "
"(completes an install run with --no-install)")
self.parser.add_option(
'--install-option',
dest='install_options',
action='append',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/bin\"). "
"Use multiple --install-option options to pass multiple options to setup.py install. "
"If you are using an option with a directory path, be sure to use absolute path.")
self.parser.add_option(
'--global-option',
dest='global_options',
action='append',
help="Extra global options to be supplied to the setup.py"
"call before the install command")
self.parser.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install to user-site')
def _build_package_finder(self, options, index_urls):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(find_links=options.find_links,
index_urls=index_urls,
use_mirrors=options.use_mirrors,
mirrors=options.mirrors)
def run(self, options, args):
if not options.build_dir:
options.build_dir = build_prefix
if not options.src_dir:
options.src_dir = src_prefix
if options.download_dir:
options.no_install = True
options.ignore_installed = True
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
install_options.append('--user')
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
finder = self._build_package_finder(options, index_urls)
requirement_set = RequirementSet(
build_dir=options.build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
download_cache=options.download_cache,
upgrade=options.upgrade,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name, None))
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(name, default_vcs=options.default_vcs))
for filename in options.requirements:
for req in parse_requirements(filename, finder=finder, options=options):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
if options.find_links:
raise InstallationError('You must give at least one '
'requirement to %s (maybe you meant "pip install %s"?)'
% (self.name, " ".join(options.find_links)))
raise InstallationError('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % dict(name=self.name))
if (options.use_user_site and
sys.version_info < (2, 6)):
raise InstallationError('--user is only supported in Python version 2.6 and newer')
import setuptools
if (options.use_user_site and
requirement_set.has_editables and
not getattr(setuptools, '_distribute', False)):
raise InstallationError('--user --editable not supported with setuptools, use distribute')
if not options.no_download:
requirement_set.prepare_files(finder, force_root_egg_info=self.bundle, bundle=self.bundle)
else:
requirement_set.locate_files()
if not options.no_install and not self.bundle:
requirement_set.install(install_options, global_options)
installed = ' '.join([req.name for req in
requirement_set.successfully_installed])
if installed:
logger.notify('Successfully installed %s' % installed)
elif not self.bundle:
downloaded = ' '.join([req.name for req in
requirement_set.successfully_downloaded])
if downloaded:
logger.notify('Successfully downloaded %s' % downloaded)
elif self.bundle:
requirement_set.create_bundle(self.bundle_filename)
logger.notify('Created bundle in %s' % self.bundle_filename)
# Clean up
if not options.no_install:
requirement_set.cleanup_files(bundle=self.bundle)
return requirement_set
InstallCommand()
|
|
# -*- coding: utf-8 -*-
""" Survey Module -- a tool to create surveys.
@author: Robert O'Connor
"""
prefix = request.controller
resourcename = request.function
if prefix not in deployment_settings.modules:
session.error = T("Module disabled!")
redirect(URL(r=request, c="default", f="index"))
from gluon.html import *
from gluon.sqlhtml import SQLFORM
response.menu_options = [
[T("Templates"), False, URL(r=request, f="template"),[
[T("List"), False, URL(r=request, f="template")],
[T("Add"), False, URL(r=request, f="template", args="create")]
]],
[T("Series"), False, URL(r=request, f="series"),[
[T("List"), False, URL(r=request, f="series")],
[T("Add"), False, URL(r=request, f="series", args="create")]
]]]
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[prefix].name_nice
response.title = module_name
return dict(module_name=module_name)
def template():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
def prep(r):
s3xrc.model.configure(r.table,
create_next = URL(r=request, c="survey", f="questions"),
update_next = URL(r=request, c="survey", f="questions"))
return True
response.s3.prep = prep
table.uuid.requires = IS_NOT_ONE_OF(db,"%s.uuid" % tablename)
table.name.requires = IS_NOT_EMPTY()
table.name.label = T("Survey Name")
table.description.label = T("Description")
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_create = T("Add Survey Template"),
title_display = T("Survey Template Details"),
title_list = T("List Survey Templates"),
title_update = T("Edit Survey Template"),
subtitle_create = T("Add New Survey Template"),
subtitle_list = T("Survey Templates"),
label_list_button = T("List Survey Templates"),
label_create_button = T("Add Survey Template"),
label_delete_button = T("Delete Survey Template"),
msg_record_created = T("Survey Template added"),
msg_record_modified = T("Survey Template updated"),
msg_record_deleted = T("Survey Template deleted"),
msg_list_empty = T("No Survey Template currently registered"))
s3xrc.model.configure(table, listadd=False)
output = s3_rest_controller(prefix, resourcename)
#return transform_buttons(output, next=True, cancel=True)
return output
def template_link():
""" @todo: fix docstring, PEP8 """
response.s3.prep = lambda r: r.representation in ("xml", "json") and True or False
return s3_rest_controller("survey", "template_link")
def questions():
""" At this stage, the user the following workflow will be implemented:
- User adds questions via the drop down or clicks "Add Question" to add a new one.
"""
table = db["survey_questions"]
record = request.args(0)
template = db(db.survey_template.id == session.rcvars.survey_template).select(limitby=(0, 1)).first()
if not template:
session.error = T("No template found!")
redirect(URL(r=request, f="template"))
if not record:
questions_query = (db.survey_template_link.survey_questions_id == db.survey_questions.id) & (template.id == db.survey_template_link.survey_template_id)
record = db(questions_query).select(db.survey_questions.id, limitby=(0, 1)).first()
if record:
redirect(URL(r=request, f="questions", args=[record.id]))
questions_form = SQLFORM(table, record, deletable=True, keepvalues=True)
all_questions = db().select(db.survey_question.ALL)
output = dict(all_questions=all_questions)
# Let's avoid blowing up -- this loads questions
try:
query = (template.id == db.survey_template_link.survey_template_id)
contained_questions = db(query).select(db.survey_question.id)
if len(contained_questions) > 0:
output.update(contained_questions=contained_questions)
else:
output.update(contained_questions=contained_questions)
except:
output.update(contained_questions=[])
pass # this means we didn't pass an id, e.g., making a new section!
if questions_form.accepts(request.vars, session, keepvalues=True):
questions = request.post_vars.questions
if questions:
for question in questions:
if not has_dupe_questions(template.id, question):
db.survey_template_link.insert(survey_template_id=session.rcvars.survey_template,
survey_questions_id=questions_form.vars.id,
survey_question_id=question)
elif questions_form.errors:
response.error = T("Please correct all errors.")
output.update(form=questions_form)
return output
def table():
""" @todo: fix docstring, PEP8 """
if not "series_id" in request.vars:
response.error = T("You must provide a series id to proceed.")
return dict() # empty dict!
# store the necessary information -- we're safe at this point.
series_id = request.vars.series_id
# first check the series exists.
series = db(db.survey_series.id == series_id).select(limitby=(0, 1)).first()
if not series:
response.error = T("A survey series with id %s does not exist. Please go back and create one.") % (series_id)
return dict()
# query for the template to get the table name
template = db(db.survey_template.id == series.survey_template_id).select(limitby=(0, 1)).first()
# everything is good at this point!
table = get_table_for_template(template.id)
resourcename = "template_%s" % (template.id)
table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % template.table_name)
table.id.represent = lambda id: A(id, _href=URL(r=request, f="table", args=[id, "update"], vars={"series_id":request.vars.series_id}))
# CRUD Strings
s3.crud_strings[template.table_name] = Storage(
title_create = T("Add Survey Answer"),
title_display = T("Survey Answer Details"),
title_list = T("List Survey Answers"),
title_update = T("Edit Survey Answer"),
subtitle_create = T("Add New Survey Answer"),
subtitle_list = T("Survey Answer"),
label_list_button = T("List Survey Answers"),
label_create_button = T("Add Survey Answer"),
label_delete_button = T("Delete Survey Answer"),
msg_record_created = T("Survey Answer added"),
msg_record_modified = T("Survey Answer updated"),
msg_record_deleted = T("Survey Answer deleted"),
msg_list_empty = T("No Survey Answers currently registered"))
response.s3.filter = (table.series_id == series_id)
s3xrc.model.configure(table, listadd=False)
output = s3_rest_controller("survey", resourcename)
authorised = s3_has_permission("create", table)
if authorised:
output.update(add_btn=A(T("Add Survey Answer"),
_href=URL(r=request, f="table", args=["create"], vars={"series_id":request.vars.series_id}),
_class="action-btn"))
else:
output.update(add_btn="")
return output
def series():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
table.uuid.requires = IS_NOT_ONE_OF(db,"%s.uuid" % tablename)
table.name.requires = IS_NOT_EMPTY()
table.name.label = T("Survey Series Name")
table.description.label = T("Description")
table.survey_template_id.label = T("Survey Template")
table.survey_template_id.requires = IS_ONE_OF(db, "survey_template.id", "%(name)s")
table.survey_template_id.represent = lambda id: (id and [db(db.survey_template.id == id).select(db.survey_template.name, limitby=(0, 1)).first().name] or [""])[0]
table.start_date.label = T("Start of Period")
table.start_date.requires = IS_NOT_EMPTY()
table.end_date.label = T("End of Period")
table.end_date.requires = IS_NOT_EMPTY()
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_create = T("Add Survey Series"),
title_display = T("Survey Series Details"),
title_list = T("List Survey Series"),
title_update = T("Edit Survey Series"),
subtitle_create = T("Add New Survey Series"),
subtitle_list = T("Survey Series"),
label_list_button = T("List Survey Series"),
label_create_button = T("Add Survey Series"),
label_delete_button = T("Delete Survey Series"),
msg_record_created = T("Survey Series added"),
msg_record_modified = T("Survey Series updated"),
msg_record_deleted = T("Survey Series deleted"),
msg_list_empty = T("No Survey Series currently registered"))
# Post-processor
def postp(r, output):
shn_survey_action_buttons(r, deletable=False)
return output
response.s3.postp = postp
s3xrc.model.configure(table, listadd=False)
output = s3_rest_controller(prefix, resourcename)
return output
def question():
""" Question data, e.g., name, description, etc. """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
table.name.requires = IS_NOT_EMPTY()
table.name.label = T("Survey Question Display Name")
table.description.label = T("Description")
# table.tf_ta_columns.label = T("Number of Columns")
# table.ta_rows.label = T("Number of Rows")
# table.aggregation_type.writable = False
# table.aggregation_type.readable = False
question_types = {
# 1:T("Multiple Choice (Only One Answer)"),
# 2:T("Multiple Choice (Multiple Answers)"),
# 3:T("Matrix of Choices (Only one answer)"),
# 4:T("Matrix of Choices (Multiple Answers)"),
# 5:T("Rating Scale"),
6:T("Text"),
# 7:T("Multiple Text Fields"),
# 8:T("Matrix of Text Fields"),
9:T("Long Text"),
10:T("Number"),
11:T("Date"),
# 12:T("Image"),
# 13:T("Descriptive Text (e.g., Prose, etc)"),
# 14:T("Location"),
# 15:T("Organization"),
# 16:T("Person"),
## 16:T("Custom Database Resource (e.g., anything defined as a resource in Sahana)")
}
table.question_type.requires = IS_IN_SET(question_types)
# CRUD Strings
s3.crud_strings[tablename] = Storage(
title_create = T("Add Survey Question"),
title_display = T("Survey Question Details"),
title_list = T("List Survey Questions"),
title_update = T("Edit Survey Question"),
subtitle_create = T("Add New Survey Question"),
subtitle_list = T("Survey Question"),
label_list_button = T("List Survey Questions"),
label_create_button = T("Add Survey Question"),
label_delete_button = T("Delete Survey Question"),
msg_record_created = T("Survey Question added"),
msg_record_modified = T("Survey Question updated"),
msg_record_deleted = T("Survey Question deleted"),
msg_list_empty = T("No Survey Questions currently registered"))
s3xrc.model.configure(table, listadd=False)
output = s3_rest_controller(prefix, resourcename)
#return transform_buttons(output, cancel=True, save=True)
return output
def add_buttons(form, save = None, prev = None, next = None, finish = None, cancel=None):
""" Utility Function to reduce code duplication as this deals with:
1) removing the save button
2) adding the following: Cancel, Next, Previous and Finish (shown on the last step *ONLY*)
"""
form[0][-1][1][0] = "" # remove the original Save Button
if save:
form[-1][-1][1].append(INPUT(_type="submit", _name = "save", _value=T("Save"), _id="save"))
if cancel:
form[-1][-1][1].append(INPUT(_type="button", _name = "cancel", _value=T("Cancel"), _id="cancel"))
if prev:
form[-1][-1][1].append(INPUT(_type="submit",_name = "prev", _value=T("Previous"), _id="prev"))
if next:
form[-1][-1][1].append(INPUT(_type="submit", _value=T("Next"), _name="next", _id="next"))
if finish:
form[-1][-1][1].append(INPUT(_type="submit", _value=T("Finish"), _name="finish", _id="finish"))
return form
def transform_buttons(output,save = None, prev = None, next = None, finish = None, cancel=None):
""" @todo: fix docstring, PEP8 """
# fails when output is not HTML (e.g., JSON)
if isinstance(output, dict):
form = output.get("form", None)
if form:
add_buttons(form, save, prev, next, finish, cancel)
return output
def has_dupe_questions(template_id, question_id):
""" @todo: fix docstring, PEP8 """
question_query = (db.survey_template_link.survey_template_id == template_id) \
& (question_id == db.survey_template_link.survey_question_id)
questions = db(question_query).select(db.survey_question.ALL)
if len(questions) > 1:
return True
else:
return False
def prune_questions(questions_id, questions, all_questions):
""" @todo: fix docstring, PEP8 """
if not questions_id:
return # do nothing
if not questions:
return # nothing to act on.
for question in all_questions:
if not question in questions:
question_query = (db.survey_template_link.survey_questions_id == questions_id) \
& (question.id == db.survey_template_link.survey_question_id)
db(question_query).delete()
db.commit()
return questions
def get_contained_questions(questions_id):
""" @todo: fix docstring, PEP8 """
question_query = (db.survey_template_link.survey_questions_id == questions_id) & \
(db.survey_question.id == db.survey_template_link.survey_question_id) & \
(db.survey_template.id == db.survey_template_link.survey_template_id)
contained_questions = db(question_query).select(db.survey_question.ALL)
return contained_questions
def get_table_for_template(template_id):
"""
Returns the table for the given template and if it doesn't exist
-- creates and returns that
"""
# get the template first -- we need to get the table name
template = db(db.survey_template.id == template_id).select().first()
tbl = None
if template: # avoid blow ups!
fields = [Field("series_id", db.survey_series, writable=False, readable=False)
] # A list of Fields representing the questions
questions = db((db.survey_template_link.survey_template_id == template_id) & \
(db.survey_question.id == db.survey_template_link.survey_question_id)).select(db.survey_question.ALL)
# for each question, depending on its type create a Field
for question in questions:
question_type = question.question_type
if question_type == 6: # Single TF -- simple for now -- will introduce more types later.
fields.append(Field("question_%s" % (question.id), label=question.name))
elif question_type == 9:
fields.append(Field("question_%s" % (question.id), "text", label=question.name))
elif question_type == 10:
fields.append(Field("question_%s" % (question.id), "integer", label=question.name))
elif question_type == 11:
fields.append(Field("question_%s" % (question.id), "date", label=question.name))
fields.append(s3_meta_fields())
tbl = db.define_table("survey_template_%s" % (template_id),
#uuidstamp,
#deletion_status,
#authorstamp,
migrate=True,
*fields)
# now add the table name to the template record so we can reference it later.
db(db.survey_template.id == template_id).update(table_name="survey_template_%s" % (template.id))
db.commit()
# set up onaccept for this table.
def _onaccept(form):
db(tbl.id == form.vars.id).update(series_id=request.vars.series_id)
db.commit()
s3xrc.model.configure(tbl,
onaccept=lambda form: _onaccept(form))
# finally we return the newly created or existing table.
return tbl
def shn_survey_action_buttons(r, deletable=True):
"""
Over-ride the usual Action Buttons for Column views.
Designed to be called from a postp
"""
if r.component:
args = [r.component_name, "[id]"]
else:
args = ["[id]"]
if auth.is_logged_in():
# Provide the ability to delete records in bulk
if deletable:
response.s3.actions = [
dict(label=str(UPDATE), _class="action-btn", url=str(URL(r=request, args = args + ["update"]))),
dict(label=str(DELETE), _class="action-btn", url=str(URL(r=request, args = args + ["delete"])))
]
else:
url = URL(r=request, f="table", vars={"series_id":args})
response.s3.actions = [
dict(label=str(UPDATE), _class="action-btn", url=str(URL(r=request, args = args + ["update"]))),
dict(label="Answer", _class="action-btn", url=str(URL(r=request, f="table",args="create", vars={"series_id":"[id]"}))),
dict(label="Results", _class="action-btn", url=str(URL(r=request, f="table", vars={"series_id":"[id]"}))) ]
else:
response.s3.actions = [
dict(label=str(READ), _class="action-btn", url=str(URL(r=request, args = args)))
]
return
# Unused code
#def section():
# """ RESTful CRUD controller """
# tablename = "%s_%s" % (prefix, resourcename)
# table = db[tablename]
# table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
# table.name.requires = IS_NOT_EMPTY()
# table.name.label = T("Survey Section Display Name")
# table.description.label = T("Description")
#
# # CRUD Strings
# s3.crud_strings[tablename] = Storage(
# title_create = T("Add Survey Section"),
# title_display = T("Survey Section Details"),
# title_list = T("List Survey Sections"),
# title_update = T("Edit Survey Section"),
# subtitle_create = T("Add New Survey Section"),
# subtitle_list = T("Survey Section"),
# label_list_button = T("List Survey Sections"),
# label_create_button = T("Add Survey Section"),
# label_delete_button = T("Delete Survey Section"),
# msg_record_created = T("Survey Section added"),
# msg_record_modified = T("Survey Section updated"),
# msg_record_deleted = T("Survey Section deleted"),
# msg_list_empty = T("No Survey Sections currently registered"))
# output = s3_rest_controller(prefix, resourcename)
#
# return transform_buttons(output, save=True, cancel=True)
#def question_options():
# resourcename = "question"
# tablename = "%s_%s" % (prefix, resourcename)
# table = db[tablename]
# table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
# table.tf_ta_columns.label = T("Number of Columns")
# table.ta_rows.label = T("Number of Rows")
## table.answer_choices.label = T("Answer Choices (One Per Line)")
# table.aggregation_type.writable = False
# table.aggregation_type.readable = False
## table.row_choices.label = T("Row Choices (One Per Line)")
## table.column_choices.label = T("Column Choices (One Per Line")
## table.tf_choices.label = T("Text before each Text Field (One per line)")
# output = s3_rest_controller(prefix, resourcename)
# output.update(question_type=question_type)
# return transform_buttons(output, prev=True, finish=True, cancel=True)
#def get_options_for_questions(template_id):
# questions = db((db.survey_template_link.survey_template_id == template_id) & \
# (db.survey_question.id == db.survey_template_link.survey_question_id)).select(db.survey_question.ALL)
# opt_map = {}
# for question in questions:
# question_type = question.question_type
# if question_type == 6: # Single TF -- simple for now -- will introduce more types later.
# opt_map[question.id] = {"allow_comments":question.allow_comments,\
# "comment_display_label":question.comment_display_label,\
# "required":question.required}
#
# elif question_type == 9:
# opt_map[question.id] = { "allow_comments":question.allow_comments,\
# "comment_display_label":question.comment_display_label,\
# "required":question.required}
# elif question_type == 10:
# opt_map[question.id] = {"allow_comments":question.allow_comments,\
# "comment_display_label":question.comment_display_label,\
# "required":question.required}
#
# elif question_type == 11:
# opt_map[question.id] = {"allow_comments":question.allow_comments,\
# "comment_display_label":question.comment_display_label,\
# "required":question.required}
# return opt_map
|
|
# -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
from scipy import linalg
from ..bem import _check_origin
from ..io.constants import FIFF
from ..io.pick import pick_types, pick_info
from ..surface import get_head_surf, get_meg_helmet_surf
from ..io.proj import _has_eeg_average_ref_proj, make_projector
from ..transforms import (transform_surface_to, read_trans, _find_trans,
_ensure_trans)
from ._make_forward import _create_meg_coils, _create_eeg_els, _read_coil_defs
from ._lead_dots import (_do_self_dots, _do_surface_dots, _get_legen_table,
_do_cross_dots)
from ..parallel import check_n_jobs
from ..utils import logger, verbose
from ..externals.six import string_types
def _is_axial_coil(coil):
"""Determine if the coil is axial."""
is_ax = coil['coil_class'] in (FIFF.FWD_COILC_MAG,
FIFF.FWD_COILC_AXIAL_GRAD,
FIFF.FWD_COILC_AXIAL_GRAD2)
return is_ax
def _ad_hoc_noise(coils, ch_type='meg'):
"""Create ad-hoc noise covariance."""
# XXX should de-duplicate with make_ad_hoc_cov
v = np.empty(len(coils))
if ch_type == 'meg':
axs = np.array([_is_axial_coil(coil) for coil in coils], dtype=bool)
v[axs] = 4e-28 # 20e-15 ** 2
v[np.logical_not(axs)] = 2.5e-25 # 5e-13 ** 2
else:
v.fill(1e-12) # 1e-6 ** 2
cov = dict(diag=True, data=v, eig=None, eigvec=None)
return cov
def _setup_dots(mode, coils, ch_type):
"""Set up dot products."""
from scipy.interpolate import interp1d
int_rad = 0.06
noise = _ad_hoc_noise(coils, ch_type)
n_coeff, interp = (50, 'nearest') if mode == 'fast' else (100, 'linear')
lut, n_fact = _get_legen_table(ch_type, False, n_coeff, verbose=False)
lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp, axis=0)
return int_rad, noise, lut_fun, n_fact
def _compute_mapping_matrix(fmd, info):
"""Do the hairy computations."""
logger.info(' Preparing the mapping matrix...')
# assemble a projector and apply it to the data
ch_names = fmd['ch_names']
projs = info.get('projs', list())
proj_op = make_projector(projs, ch_names)[0]
proj_dots = np.dot(proj_op.T, np.dot(fmd['self_dots'], proj_op))
noise_cov = fmd['noise']
# Whiten
if not noise_cov['diag']:
raise NotImplementedError # this shouldn't happen
whitener = np.diag(1.0 / np.sqrt(noise_cov['data'].ravel()))
whitened_dots = np.dot(whitener.T, np.dot(proj_dots, whitener))
# SVD is numerically better than the eigenvalue composition even if
# mat is supposed to be symmetric and positive definite
uu, sing, vv = linalg.svd(whitened_dots, full_matrices=False,
overwrite_a=True)
# Eigenvalue truncation
sumk = np.cumsum(sing)
sumk /= sumk[-1]
fmd['nest'] = np.where(sumk > (1.0 - fmd['miss']))[0][0]
logger.info(' [Truncate at %d missing %g]' % (fmd['nest'], fmd['miss']))
sing = 1.0 / sing[:fmd['nest']]
# Put the inverse together
inv = np.dot(uu[:, :fmd['nest']] * sing, vv[:fmd['nest']]).T
# Sandwich with the whitener
inv_whitened = np.dot(whitener.T, np.dot(inv, whitener))
# Take into account that the lead fields used to compute
# d->surface_dots were unprojected
inv_whitened_proj = (np.dot(inv_whitened.T, proj_op)).T
# Finally sandwich in the selection matrix
# This one picks up the correct lead field projection
mapping_mat = np.dot(fmd['surface_dots'], inv_whitened_proj)
# Optionally apply the average electrode reference to the final field map
if fmd['kind'] == 'eeg':
if _has_eeg_average_ref_proj(projs):
logger.info(' The map will have average electrode reference')
mapping_mat -= np.mean(mapping_mat, axis=0)[np.newaxis, :]
return mapping_mat
def _map_meg_channels(info_from, info_to, mode='fast', origin=(0., 0., 0.04)):
"""Find mapping from one set of channels to another.
Parameters
----------
info_from : instance of Info
The measurement data to interpolate from.
info_to : instance of Info
The measurement info to interpolate to.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
Returns
-------
mapping : array
A mapping matrix of shape len(pick_to) x len(pick_from).
"""
# no need to apply trans because both from and to coils are in device
# coordinates
templates = _read_coil_defs(verbose=False)
coils_from = _create_meg_coils(info_from['chs'], 'normal',
info_from['dev_head_t'], templates)
coils_to = _create_meg_coils(info_to['chs'], 'normal',
info_to['dev_head_t'], templates)
miss = 1e-4 # Smoothing criterion for MEG
origin = _check_origin(origin, info_from)
#
# Step 2. Calculate the dot products
#
int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from, 'meg')
logger.info(' Computing dot products for %i coils...'
% (len(coils_from)))
self_dots = _do_self_dots(int_rad, False, coils_from, origin, 'meg',
lut_fun, n_fact, n_jobs=1)
logger.info(' Computing cross products for coils %i x %i coils...'
% (len(coils_from), len(coils_to)))
cross_dots = _do_cross_dots(int_rad, False, coils_from, coils_to,
origin, 'meg', lut_fun, n_fact).T
ch_names = [c['ch_name'] for c in info_from['chs']]
fmd = dict(kind='meg', ch_names=ch_names,
origin=origin, noise=noise, self_dots=self_dots,
surface_dots=cross_dots, int_rad=int_rad, miss=miss)
#
# Step 3. Compute the mapping matrix
#
mapping = _compute_mapping_matrix(fmd, info_from)
return mapping
def _as_meg_type_evoked(evoked, ch_type='grad', mode='fast'):
"""Compute virtual evoked using interpolated fields in mag/grad channels.
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
ch_type : str
The destination channel type. It can be 'mag' or 'grad'.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
Returns
-------
evoked : instance of mne.Evoked
The transformed evoked object containing only virtual channels.
"""
evoked = evoked.copy()
if ch_type not in ['mag', 'grad']:
raise ValueError('to_type must be "mag" or "grad", not "%s"'
% ch_type)
# pick the original and destination channels
pick_from = pick_types(evoked.info, meg=True, eeg=False,
ref_meg=False)
pick_to = pick_types(evoked.info, meg=ch_type, eeg=False,
ref_meg=False)
if len(pick_to) == 0:
raise ValueError('No channels matching the destination channel type'
' found in info. Please pass an evoked containing'
'both the original and destination channels. Only the'
' locations of the destination channels will be used'
' for interpolation.')
info_from = pick_info(evoked.info, pick_from)
info_to = pick_info(evoked.info, pick_to)
mapping = _map_meg_channels(info_from, info_to, mode=mode)
# compute evoked data by multiplying by the 'gain matrix' from
# original sensors to virtual sensors
data = np.dot(mapping, evoked.data[pick_from])
# keep only the destination channel types
evoked.pick_types(meg=ch_type, eeg=False, ref_meg=False)
evoked.data = data
# change channel names to emphasize they contain interpolated data
for ch in evoked.info['chs']:
ch['ch_name'] += '_virtual'
evoked.info._update_redundant()
evoked.info._check_consistency()
return evoked
@verbose
def _make_surface_mapping(info, surf, ch_type='meg', trans=None, mode='fast',
n_jobs=1, origin=(0., 0., 0.04), verbose=None):
"""Re-map M/EEG data to a surface.
Parameters
----------
info : instance of Info
Measurement info.
surf : dict
The surface to map the data to. The required fields are `'rr'`,
`'nn'`, and `'coord_frame'`. Must be in head coordinates.
ch_type : str
Must be either `'meg'` or `'eeg'`, determines the type of field.
trans : None | dict
If None, no transformation applied. Should be a Head<->MRI
transformation.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
n_jobs : int
Number of permutations to run in parallel (requires joblib package).
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in head
coords and in meters. The default is ``'auto'``, which means
a head-digitization-based origin fit.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
mapping : array
A n_vertices x n_sensors array that remaps the MEG or EEG data,
as `new_data = np.dot(mapping, data)`.
"""
if not all(key in surf for key in ['rr', 'nn']):
raise KeyError('surf must have both "rr" and "nn"')
if 'coord_frame' not in surf:
raise KeyError('The surface coordinate frame must be specified '
'in surf["coord_frame"]')
if mode not in ['accurate', 'fast']:
raise ValueError('mode must be "accurate" or "fast", not "%s"' % mode)
# deal with coordinate frames here -- always go to "head" (easiest)
orig_surf = surf
surf = transform_surface_to(deepcopy(surf), 'head', trans)
n_jobs = check_n_jobs(n_jobs)
origin = _check_origin(origin, info)
#
# Step 1. Prepare the coil definitions
# Do the dot products, assume surf in head coords
#
if ch_type not in ('meg', 'eeg'):
raise ValueError('unknown coil type "%s"' % ch_type)
if ch_type == 'meg':
picks = pick_types(info, meg=True, eeg=False, ref_meg=False)
logger.info('Prepare MEG mapping...')
else:
picks = pick_types(info, meg=False, eeg=True, ref_meg=False)
logger.info('Prepare EEG mapping...')
if len(picks) == 0:
raise RuntimeError('cannot map, no channels found')
chs = pick_info(info, picks)['chs']
# create coil defs in head coordinates
if ch_type == 'meg':
# Put them in head coordinates
coils = _create_meg_coils(chs, 'normal', info['dev_head_t'])
type_str = 'coils'
miss = 1e-4 # Smoothing criterion for MEG
else: # EEG
coils = _create_eeg_els(chs)
type_str = 'electrodes'
miss = 1e-3 # Smoothing criterion for EEG
#
# Step 2. Calculate the dot products
#
int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils, ch_type)
logger.info('Computing dot products for %i %s...' % (len(coils), type_str))
self_dots = _do_self_dots(int_rad, False, coils, origin, ch_type,
lut_fun, n_fact, n_jobs)
sel = np.arange(len(surf['rr'])) # eventually we should do sub-selection
logger.info('Computing dot products for %i surface locations...'
% len(sel))
surface_dots = _do_surface_dots(int_rad, False, coils, surf, sel,
origin, ch_type, lut_fun, n_fact,
n_jobs)
#
# Step 4. Return the result
#
ch_names = [c['ch_name'] for c in chs]
fmd = dict(kind=ch_type, surf=surf, ch_names=ch_names, coils=coils,
origin=origin, noise=noise, self_dots=self_dots,
surface_dots=surface_dots, int_rad=int_rad, miss=miss)
logger.info('Field mapping data ready')
fmd['data'] = _compute_mapping_matrix(fmd, info)
# bring the original back, whatever coord frame it was in
fmd['surf'] = orig_surf
# Remove some unnecessary fields
del fmd['self_dots']
del fmd['surface_dots']
del fmd['int_rad']
del fmd['miss']
return fmd
@verbose
def make_field_map(evoked, trans='auto', subject=None, subjects_dir=None,
ch_type=None, mode='fast', meg_surf='helmet',
origin=(0., 0., 0.04), n_jobs=1, verbose=None):
"""Compute surface maps used for field display in 3D.
Parameters
----------
evoked : Evoked | Epochs | Raw
The measurement file. Need to have info attribute.
trans : str | 'auto' | None
The full path to the `*-trans.fif` file produced during
coregistration. If present or found using 'auto'
the maps will be in MRI coordinates.
If None, map for EEG data will not be available.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None, map for EEG data will not be available.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, a map for each available channel type will be returned.
Else only the specified type will be used.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used. `'fast'` should be sufficient
for most applications.
meg_surf : str
Should be ``'helmet'`` or ``'head'`` to specify in which surface
to compute the MEG field map. The default value is ``'helmet'``
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in head
coords and in meters. Can be ``'auto'``, which means
a head-digitization-based origin fit. Default is ``(0., 0., 0.04)``.
.. versionadded:: 0.11
n_jobs : int
The number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
.. versionadded:: 0.11
Returns
-------
surf_maps : list
The surface maps to be used for field plots. The list contains
separate ones for MEG and EEG (if both MEG and EEG are present).
"""
info = evoked.info
if ch_type is None:
types = [t for t in ['eeg', 'meg'] if t in evoked]
else:
if ch_type not in ['eeg', 'meg']:
raise ValueError("ch_type should be 'eeg' or 'meg' (got %s)"
% ch_type)
types = [ch_type]
if trans == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans = _find_trans(subject, subjects_dir)
if 'eeg' in types and trans is None:
logger.info('No trans file available. EEG data ignored.')
types.remove('eeg')
if len(types) == 0:
raise RuntimeError('No data available for mapping.')
if trans is not None:
if isinstance(trans, string_types):
trans = read_trans(trans)
trans = _ensure_trans(trans, 'head', 'mri')
if meg_surf not in ['helmet', 'head']:
raise ValueError('Surface to plot MEG fields must be '
'"helmet" or "head"')
surfs = []
for this_type in types:
if this_type == 'meg' and meg_surf == 'helmet':
surf = get_meg_helmet_surf(info, trans)
else:
surf = get_head_surf(subject, subjects_dir=subjects_dir)
surfs.append(surf)
surf_maps = list()
for this_type, this_surf in zip(types, surfs):
this_map = _make_surface_mapping(evoked.info, this_surf, this_type,
trans, n_jobs=n_jobs, origin=origin,
mode=mode)
surf_maps.append(this_map)
return surf_maps
|
|
from __future__ import unicode_literals
import datetime
import re
from datetime import date
from decimal import Decimal
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.forms.models import (_get_foreign_key, inlineformset_factory,
modelformset_factory, BaseModelFormSet)
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (Author, BetterAuthor, Book, BookWithCustomPK,
BookWithOptionalAltEditor, AlternateBook, AuthorMeeting, CustomPrimaryKey,
Place, Owner, Location, OwnerProfile, Restaurant, Product, Price,
MexicanRestaurant, ClassyMexicanRestaurant, Repository, Revision,
Person, Membership, Team, Player, Poet, Poem, Post)
class DeletionTests(TestCase):
def test_deletion(self):
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': str(poet.pk),
'form-0-name': 'test',
'form-0-DELETE': 'on',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
formset.save(commit=False)
self.assertEqual(Poet.objects.count(), 1)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poet.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
# One existing untouched and two new unvalid forms
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'test',
'form-1-id': '',
'form-1-name': 'x' * 1000, # Too long
'form-2-id': six.text_type(poet.id), # Violate unique constraint
'form-2-name': 'test2',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data in new forms aren't actually valid.
data['form-0-DELETE'] = 'on'
data['form-1-DELETE'] = 'on'
data['form-2-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_outdated_deletion(self):
poet = Poet.objects.create(name='test')
poem = Poem.objects.create(name='Brevity is the soul of wit', poet=poet)
PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", can_delete=True)
# Simulate deletion of an object that doesn't exist in the database
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-id': str(poem.pk),
'form-0-name': 'foo',
'form-1-id': str(poem.pk + 1), # doesn't exist
'form-1-name': 'bar',
'form-1-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet, prefix="form")
# The formset is valid even though poem.pk + 1 doesn't exist,
# because it's marked for deletion anyway
self.assertTrue(formset.is_valid())
formset.save()
# Make sure the save went through correctly
self.assertEqual(Poem.objects.get(pk=poem.pk).name, "foo")
self.assertEqual(poet.poem_set.count(), 1)
self.assertFalse(Poem.objects.filter(pk=poem.pk + 1).exists())
class ModelFormsetTest(TestCase):
def test_modelformset_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelformset_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelformset_factory(Author)
def test_simple_save(self):
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /><input type="hidden" name="form-0-id" id="id_form-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /><input type="hidden" name="form-1-id" id="id_form-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Charles Baudelaire',
'form-1-name': 'Arthur Rimbaud',
'form-2-name': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
author1, author2 = saved
self.assertEqual(author1, Author.objects.get(name='Charles Baudelaire'))
self.assertEqual(author2, Author.objects.get(name='Arthur Rimbaud'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1])
# Gah! We forgot Paul Verlaine. Let's create a formset to edit the
# existing authors with an extra form to add him. We *could* pass in a
# queryset to restrict the Author objects we edit, but in this case
# we'll use it to display them in alphabetical order by name.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=False)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" /><input type="hidden" name="form-2-id" id="id_form-2-id" /></p>')
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '2', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-name': 'Paul Verlaine',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# Only changed or new objects are returned from formset.save()
saved = formset.save()
self.assertEqual(len(saved), 1)
author3 = saved[0]
self.assertEqual(author3, Author.objects.get(name='Paul Verlaine'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# This probably shouldn't happen, but it will. If an add form was
# marked for deletion, make sure we don't save that form.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=True)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" /></p>\n'
'<p><label for="id_form-0-DELETE">Delete:</label> <input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE" /><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" /></p>\n'
'<p><label for="id_form-1-DELETE">Delete:</label> <input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label> <input id="id_form-2-name" type="text" name="form-2-name" value="Paul Verlaine" maxlength="100" /></p>\n'
'<p><label for="id_form-2-DELETE">Delete:</label> <input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE" /><input type="hidden" name="form-2-id" value="%d" id="id_form-2-id" /></p>' % author3.id)
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_form-3-name">Name:</label> <input id="id_form-3-name" type="text" name="form-3-name" maxlength="100" /></p>\n'
'<p><label for="id_form-3-DELETE">Delete:</label> <input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE" /><input type="hidden" name="form-3-id" id="id_form-3-id" /></p>')
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': 'Walt Whitman',
'form-3-DELETE': 'on',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# No objects were changed or saved so nothing will come back.
self.assertEqual(formset.save(), [])
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# Let's edit a record to ensure save only returns that one record.
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Walt Whitman',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': '',
'form-3-DELETE': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# One record has changed.
saved = formset.save()
self.assertEqual(len(saved), 1)
self.assertEqual(saved[0], Author.objects.get(name='Walt Whitman'))
def test_commit_false(self):
# Test the behavior of commit=False and save_m2m
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
meeting = AuthorMeeting.objects.create(created=date.today())
meeting.authors = Author.objects.all()
# create an Author instance to add to the meeting.
author4 = Author.objects.create(name='John Steinbeck')
AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, fields="__all__", extra=1, can_delete=True)
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(meeting.id),
'form-0-name': '2nd Tuesday of the Week Meeting',
'form-0-authors': [author2.id, author1.id, author3.id, author4.id],
'form-1-name': '',
'form-1-authors': '',
'form-1-DELETE': '',
}
formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
self.assertTrue(formset.is_valid())
instances = formset.save(commit=False)
for instance in instances:
instance.created = date.today()
instance.save()
formset.save_m2m()
self.assertQuerysetEqual(instances[0].authors.all(), [
'<Author: Charles Baudelaire>',
'<Author: John Steinbeck>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_max_num(self):
# Test the behavior of max_num with model formsets. It should allow
# all existing related objects/inlines for a given object to be
# displayed, but not allow the creation of new inlines beyond max_num.
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
Author.objects.create(name='Walt Whitman')
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 6)
self.assertEqual(len(formset.extra_forms), 3)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(len(formset.extra_forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(len(formset.extra_forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_min_num(self):
# Test the behavior of min_num with model formsets. It should be
# added to extra.
qs = Author.objects.none()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 2)
def test_min_num_with_existing(self):
# Test the behavior of min_num with existing objects.
Author.objects.create(name='Charles Baudelaire')
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0, min_num=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
def test_custom_save_method(self):
class PoetForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Vladimir Mayakovsky" just to be a jerk.
author = super(PoetForm, self).save(commit=False)
author.name = "Vladimir Mayakovsky"
if commit:
author.save()
return author
PoetFormSet = modelformset_factory(Poet, fields="__all__", form=PoetForm)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Walt Whitman',
'form-1-name': 'Charles Baudelaire',
'form-2-name': '',
}
qs = Poet.objects.all()
formset = PoetFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
poets = formset.save()
self.assertEqual(len(poets), 2)
poet1, poet2 = poets
self.assertEqual(poet1.name, 'Vladimir Mayakovsky')
self.assertEqual(poet2.name, 'Vladimir Mayakovsky')
def test_custom_form(self):
""" Test that model_formset respects fields and exclude parameters of
custom form
"""
class PostForm1(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'posted')
class PostForm2(forms.ModelForm):
class Meta:
model = Post
exclude = ('subtitle',)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertFalse("subtitle" in formset.forms[0].fields)
def test_custom_queryset_init(self):
"""
Test that a queryset can be overridden in the __init__ method.
https://docs.djangoproject.com/en/dev/topics/forms/modelforms/#changing-the-queryset
"""
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
class BaseAuthorFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseAuthorFormSet, self).__init__(*args, **kwargs)
self.queryset = Author.objects.filter(name__startswith='Charles')
AuthorFormSet = modelformset_factory(Author, fields='__all__', formset=BaseAuthorFormSet)
formset = AuthorFormSet()
self.assertEqual(len(formset.get_queryset()), 1)
def test_model_inheritance(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__")
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="number" name="form-0-write_speed" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '1', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': '',
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
author1, = saved
self.assertEqual(author1, BetterAuthor.objects.get(name='Ernest Hemingway'))
hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label> <input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100" /></p>\n'
'<p><label for="id_form-0-write_speed">Write speed:</label> <input type="number" name="form-0-write_speed" value="10" id="id_form-0-write_speed" /><input type="hidden" name="form-0-author_ptr" value="%d" id="id_form-0-author_ptr" /></p>' % hemingway_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label> <input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /></p>\n'
'<p><label for="id_form-1-write_speed">Write speed:</label> <input type="number" name="form-1-write_speed" id="id_form-1-write_speed" /><input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr" /></p>')
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': hemingway_id,
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
'form-1-author_ptr': '',
'form-1-name': '',
'form-1-write_speed': '',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.save(), [])
def test_inline_formsets(self):
# We can also create a formset that is tied to a parent model. This is
# how the admin system's edit inline functionality works.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3, fields="__all__")
author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" id="id_book_set-0-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '0', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': '',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1, Book.objects.get(title='Les Fleurs du Mal'))
self.assertQuerysetEqual(author.book_set.all(), ['<Book: Les Fleurs du Mal>'])
# Now that we've added a book to Charles Baudelaire, let's try adding
# another one. This time though, an edit form will be available for
# every existing book.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.get(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="%d" id="id_book_set-0-id" /></p>' % (author.id, book1.id))
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id)
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book2, = saved
self.assertEqual(book2, Book.objects.get(title='Les Paradis Artificiels'))
# As you can see, 'Les Paradis Artificiels' is now a book belonging to
# Charles Baudelaire.
self.assertQuerysetEqual(author.book_set.order_by('title'), [
'<Book: Les Fleurs du Mal>',
'<Book: Les Paradis Artificiels>',
])
def test_inline_formsets_save_as_new(self):
# The save_as_new parameter lets you re-associate the data to a new
# instance. This is used in the admin for save_as functionality.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
Author.objects.create(name='Charles Baudelaire')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '2', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': '1',
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-id': '2',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
self.assertTrue(formset.is_valid())
new_author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.title, 'Les Paradis Artificiels')
# Test using a custom prefix on an inline formset.
formset = AuthorBooksFormSet(prefix="test")
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_test-0-title">Title:</label> <input id="id_test-0-title" type="text" name="test-0-title" maxlength="100" /><input type="hidden" name="test-0-author" id="id_test-0-author" /><input type="hidden" name="test-0-id" id="id_test-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_test-1-title">Title:</label> <input id="id_test-1-title" type="text" name="test-1-title" maxlength="100" /><input type="hidden" name="test-1-author" id="id_test-1-author" /><input type="hidden" name="test-1-id" id="id_test-1-id" /></p>')
def test_inline_formsets_with_custom_pk(self):
# Test inline formsets where the inline-edited object has a custom
# primary key that is not the fk to the parent object.
self.maxDiff = 1024
AuthorBooksFormSet2 = inlineformset_factory(Author, BookWithCustomPK, can_delete=False, extra=1, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet2(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label> <input id="id_bookwithcustompk_set-0-my_pk" type="number" name="bookwithcustompk_set-0-my_pk" step="1" /></p>\n'
'<p><label for="id_bookwithcustompk_set-0-title">Title:</label> <input id="id_bookwithcustompk_set-0-title" type="text" name="bookwithcustompk_set-0-title" maxlength="100" /><input type="hidden" name="bookwithcustompk_set-0-author" value="1" id="id_bookwithcustompk_set-0-author" /></p>')
data = {
'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered
'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithcustompk_set-0-my_pk': '77777',
'bookwithcustompk_set-0-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet2(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.pk, 77777)
book1 = author.bookwithcustompk_set.get()
self.assertEqual(book1.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_multi_table_inheritance(self):
# Test inline formsets where the inline-edited object uses multi-table
# inheritance, thus has a non AutoField yet auto-created primary key.
AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet3(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_alternatebook_set-0-title">Title:</label> <input id="id_alternatebook_set-0-title" type="text" name="alternatebook_set-0-title" maxlength="100" /></p>\n'
'<p><label for="id_alternatebook_set-0-notes">Notes:</label> <input id="id_alternatebook_set-0-notes" type="text" name="alternatebook_set-0-notes" maxlength="100" /><input type="hidden" name="alternatebook_set-0-author" value="1" id="id_alternatebook_set-0-author" /><input type="hidden" name="alternatebook_set-0-book_ptr" id="id_alternatebook_set-0-book_ptr" /></p>')
data = {
'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered
'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data
'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms
'alternatebook_set-0-title': 'Flowers of Evil',
'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal'
}
formset = AuthorBooksFormSet3(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.title, 'Flowers of Evil')
self.assertEqual(book1.notes, 'English translation of Les Fleurs du Mal')
@skipUnlessDBFeature('supports_partially_nullable_unique_constraints')
def test_inline_formsets_with_nullable_unique_together(self):
# Test inline formsets where the inline-edited object has a
# unique_together constraint with a nullable member
AuthorBooksFormSet4 = inlineformset_factory(Author, BookWithOptionalAltEditor, can_delete=False, extra=2, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered
'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithoptionalalteditor_set-0-author': '1',
'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal',
'bookwithoptionalalteditor_set-1-author': '1',
'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet4(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.author_id, 1)
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.author_id, 1)
self.assertEqual(book2.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_custom_save_method(self):
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
class PoemForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Brooklyn Bridge" just to be a jerk.
poem = super(PoemForm, self).save(commit=False)
poem.name = "Brooklyn Bridge"
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm, fields="__all__")
data = {
'poem_set-TOTAL_FORMS': '3', # the number of forms rendered
'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data
'poem_set-MAX_NUM_FORMS': '', # the max number of forms
'poem_set-0-name': 'The Cloud in Trousers',
'poem_set-1-name': 'I',
'poem_set-2-name': '',
}
poet = Poet.objects.create(name='Vladimir Mayakovsky')
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
poem1, poem2 = saved
self.assertEqual(poem1.name, 'Brooklyn Bridge')
self.assertEqual(poem2.name, 'Brooklyn Bridge')
# We can provide a custom queryset to our InlineFormSet:
custom_qs = Book.objects.order_by('-title')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Les Paradis Artificiels" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" value="Les Fleurs du Mal" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" value="3" id="id_book_set-2-id" /></p>')
self.assertHTMLEqual(formset.forms[3].as_p(),
'<p><label for="id_book_set-3-title">Title:</label> <input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100" /><input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author" /><input type="hidden" name="book_set-3-id" id="id_book_set-3-id" /></p>')
self.assertHTMLEqual(formset.forms[4].as_p(),
'<p><label for="id_book_set-4-title">Title:</label> <input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100" /><input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author" /><input type="hidden" name="book_set-4-id" id="id_book_set-4-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '5', # the number of forms rendered
'book_set-INITIAL_FORMS': '3', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Paradis Artificiels',
'book_set-1-id': str(book2.id),
'book_set-1-title': 'Les Fleurs du Mal',
'book_set-2-id': str(book3.id),
'book_set-2-title': 'Flowers of Evil',
'book_set-3-title': 'Revue des deux mondes',
'book_set-4-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
custom_qs = Book.objects.filter(title__startswith='F')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" name="book_set-0-title" value="Flowers of Evil" maxlength="100" /><input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" value="3" id="id_book_set-0-id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label> <input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" /><input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" /><input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label> <input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" /><input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" /><input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book3.id),
'book_set-0-title': 'Flowers of Evil',
'book_set-1-title': 'Revue des deux mondes',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
def test_custom_pk(self):
# We need to ensure that it is displayed
CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey, fields="__all__")
formset = CustomPrimaryKeyFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" name="form-0-my_pk" maxlength="10" /></p>\n'
'<p><label for="id_form-0-some_field">Some field:</label> <input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100" /></p>')
# Custom primary keys with ForeignKey, OneToOneField and AutoField ############
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False, fields="__all__")
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '2',
'owner_set-INITIAL_FORMS': '0',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': '',
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner1, = saved
self.assertEqual(owner1.name, 'Joe Perry')
self.assertEqual(owner1.place.name, 'Giordanos')
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label> <input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100" /><input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" /><input type="hidden" name="owner_set-0-auto_id" value="%d" id="id_owner_set-0-auto_id" /></p>'
% owner1.auto_id)
self.assertHTMLEqual(formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label> <input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" /><input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" /><input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>')
self.assertHTMLEqual(formset.forms[2].as_p(),
'<p><label for="id_owner_set-2-name">Name:</label> <input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100" /><input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place" /><input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id" /></p>')
data = {
'owner_set-TOTAL_FORMS': '3',
'owner_set-INITIAL_FORMS': '1',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': six.text_type(owner1.auto_id),
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': 'Jack Berry',
'owner_set-2-auto_id': '',
'owner_set-2-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner2, = saved
self.assertEqual(owner2.name, 'Jack Berry')
self.assertEqual(owner2.place.name, 'Giordanos')
# Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose.
FormSet = modelformset_factory(OwnerProfile, fields="__all__")
formset = FormSet()
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_form-0-owner">Owner:</label> <select name="form-0-owner" id="id_form-0-owner">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">Joe Perry at Giordanos</option>\n'
'<option value="%d">Jack Berry at Giordanos</option>\n'
'</select></p>\n'
'<p><label for="id_form-0-age">Age:</label> <input type="number" name="form-0-age" id="id_form-0-age" min="0" /></p>'
% (owner1.auto_id, owner2.auto_id))
owner1 = Owner.objects.get(name='Joe Perry')
FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="number" name="ownerprofile-0-age" id="id_ownerprofile-0-age" min="0" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '0',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': '',
'ownerprofile-0-age': '54',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 54)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label> <input type="number" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" min="0" /><input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '1',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': six.text_type(owner1.auto_id),
'ownerprofile-0-age': '55',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 55)
def test_unique_true_enforces_max_num_one(self):
# ForeignKey with unique=True should enforce max_num=1
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Location, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(formset.forms[0].as_p(),
'<p><label for="id_location_set-0-lat">Lat:</label> <input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100" /></p>\n'
'<p><label for="id_location_set-0-lon">Lon:</label> <input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100" /><input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place" /><input type="hidden" name="location_set-0-id" id="id_location_set-0-id" /></p>')
def test_foreign_keys_in_parents(self):
self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey)
self.assertEqual(type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey)
def test_unique_validation(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
product1, = saved
self.assertEqual(product1.slug, 'car-red')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'slug': ['Product with this Slug already exists.']}])
def test_modelformset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '2', # should be ignored
'form-0-price': '12.00',
'form-0-quantity': '1',
'form-1-price': '24.00',
'form-1-quantity': '2',
}
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1, validate_max=True)
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
# Now test the same thing without the validate_max flag to ensure
# default behavior is unchanged
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1)
formset = FormSet(data)
self.assertTrue(formset.is_valid())
def test_unique_together_validation(self):
FormSet = modelformset_factory(Price, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
price1, = saved
self.assertEqual(price1.price, Decimal('12.00'))
self.assertEqual(price1.quantity, 1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Price with this Price and Quantity already exists.']}])
def test_unique_together_with_inlineformset_factory(self):
# Also see bug #8882.
repository = Repository.objects.create(name='Test Repo')
FormSet = inlineformset_factory(Repository, Revision, extra=1, fields="__all__")
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
revision1, = saved
self.assertEqual(revision1.repository, repository)
self.assertEqual(revision1.revision, '146239817507f148d448db38840db7c3cbf47c76')
# attempt to save the same revision against against the same repo.
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Revision with this Repository and Revision already exists.']}])
# unique_together with inlineformset_factory with overridden form fields
# Also see #9494
FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
def test_callable_defaults(self):
# Use of callable defaults (see bug #7975).
person = Person.objects.create(name='Ringo')
FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1, fields="__all__")
formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a callable
# default. This is required to ensure the value is tested for change correctly
# when determine what extra forms have changed to save.
self.assertEqual(len(formset.forms), 1) # this formset only has one form
form = formset.forms[0]
now = form.fields['date_joined'].initial()
result = form.as_p()
result = re.sub(r'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}(?:\.[0-9]+)?', '__DATETIME__', result)
self.assertHTMLEqual(result,
'<p><label for="id_membership_set-0-date_joined">Date joined:</label> <input type="text" name="membership_set-0-date_joined" value="__DATETIME__" id="id_membership_set-0-date_joined" /><input type="hidden" name="initial-membership_set-0-date_joined" value="__DATETIME__" id="initial-membership_set-0-id_membership_set-0-date_joined" /></p>\n'
'<p><label for="id_membership_set-0-karma">Karma:</label> <input type="number" name="membership_set-0-karma" id="id_membership_set-0-karma" /><input type="hidden" name="membership_set-0-person" value="%d" id="id_membership_set-0-person" /><input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id" /></p>'
% person.id)
# test for validation with callable defaults. Validations rely on hidden fields
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
# now test for when the data changes
one_day_later = now + datetime.timedelta(days=1)
filled_data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(one_day_later.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(filled_data, instance=person)
self.assertFalse(formset.is_valid())
# now test with split datetime fields
class MembershipForm(forms.ModelForm):
date_joined = forms.SplitDateTimeField(initial=now)
class Meta:
model = Membership
fields = "__all__"
def __init__(self, **kwargs):
super(MembershipForm, self).__init__(**kwargs)
self.fields['date_joined'].widget = forms.SplitDateTimeWidget()
FormSet = inlineformset_factory(Person, Membership, form=MembershipForm, can_delete=False, extra=1, fields="__all__")
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined_0': six.text_type(now.strftime('%Y-%m-%d')),
'membership_set-0-date_joined_1': six.text_type(now.strftime('%H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_with_null_fk(self):
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exhibit the issue
team = Team.objects.create(name="Red Vipers")
Player(name="Timmy").save()
Player(name="Bobby", team=team).save()
PlayerInlineFormSet = inlineformset_factory(Team, Player, fields="__all__")
formset = PlayerInlineFormSet()
self.assertQuerysetEqual(formset.get_queryset(), [])
formset = PlayerInlineFormSet(instance=team)
players = formset.get_queryset()
self.assertEqual(len(players), 1)
player1, = players
self.assertEqual(player1.team, team)
self.assertEqual(player1.name, 'Bobby')
def test_model_formset_with_custom_pk(self):
# a formset for a Model that has a custom primary key that still needs to be
# added to the formset automatically
FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"])
self.assertEqual(sorted(FormSet().forms[0].fields.keys()), ['restaurant', 'tacos_are_yummy'])
def test_model_formset_with_initial_model_instance(self):
# has_changed should compare model instance and primary key
# see #18898
FormSet = modelformset_factory(Poem, fields='__all__')
john_milton = Poet(name="John Milton")
john_milton.save()
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-poet': str(john_milton.id),
}
formset = FormSet(initial=[{'poet': john_milton}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_model_formset_with_initial_queryset(self):
# has_changed should work with queryset and list of pk's
# see #18898
FormSet = modelformset_factory(AuthorMeeting, fields='__all__')
Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-created': '',
'form-0-authors': list(Author.objects.values_list('id', flat=True)),
}
formset = FormSet(initial=[{'authors': Author.objects.all()}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_prevent_duplicates_from_with_the_same_formset(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'red_car',
'form-1-slug': 'red_car',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug.'])
FormSet = modelformset_factory(Price, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-price': '25',
'form-0-quantity': '7',
'form-1-price': '25',
'form-1-quantity': '7',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for price and quantity, which must be unique.'])
# Only the price field is specified, this should skip any unique checks since
# the unique_together is not fulfilled. This will fail with a KeyError if broken.
FormSet = modelformset_factory(Price, fields=("price",), extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '24',
'form-1-price': '24',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
FormSet = inlineformset_factory(Author, Book, extra=0, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
Book.objects.create(pk=3, author=author, title='Flowers of Evil')
book_ids = author.book_set.order_by('id').values_list('id', flat=True)
data = {
'book_set-TOTAL_FORMS': '2',
'book_set-INITIAL_FORMS': '2',
'book_set-MAX_NUM_FORMS': '',
'book_set-0-title': 'The 2008 Election',
'book_set-0-author': str(author.id),
'book_set-0-id': str(book_ids[0]),
'book_set-1-title': 'The 2008 Election',
'book_set-1-author': str(author.id),
'book_set-1-id': str(book_ids[1]),
}
formset = FormSet(data=data, instance=author)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
FormSet = modelformset_factory(Post, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'blah',
'form-0-slug': 'Morning',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-01-01'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title which must be unique for the date in posted.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug which must be unique for the year in posted.'])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'rawr',
'form-0-posted': '2008-08-01',
'form-1-title': 'blah',
'form-1-slug': 'Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for subtitle which must be unique for the month in posted.'])
class TestModelFormsetOverridesTroughFormMeta(TestCase):
def test_modelformset_factory_widgets(self):
widgets = {
'name': forms.TextInput(attrs={'class': 'poet'})
}
PoetFormSet = modelformset_factory(Poet, fields="__all__", widgets=widgets)
form = PoetFormSet.form()
self.assertHTMLEqual(
"%s" % form['name'],
'<input id="id_name" maxlength="100" type="text" class="poet" name="name" />'
)
def test_inlineformset_factory_widgets(self):
widgets = {
'title': forms.TextInput(attrs={'class': 'book'})
}
BookFormSet = inlineformset_factory(Author, Book, widgets=widgets, fields="__all__")
form = BookFormSet.form()
self.assertHTMLEqual(
"%s" % form['title'],
'<input class="book" id="id_title" maxlength="100" name="title" type="text" />'
)
def test_modelformset_factory_labels_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_inlineformset_factory_labels_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_modelformset_factory_help_text_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_inlineformset_factory_help_text_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_modelformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = modelformset_factory(Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
def test_inlineformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
|
|
from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from typing import (cast, Any, Callable, Dict, Generator, Iterable, Iterator, List, Mapping,
Optional, Sized, Tuple, Union, IO)
from django.core.urlresolvers import LocaleRegexURLResolver
from django.conf import settings
from django.test import TestCase
from django.test.client import (
BOUNDARY, MULTIPART_CONTENT, encode_multipart,
)
from django.template import loader
from django.http import HttpResponse
from django.db.utils import IntegrityError
from django.utils.translation import ugettext as _
from zerver.lib.avatar import avatar_url
from zerver.lib.initial_password import initial_password
from zerver.lib.db import TimeTrackingCursor
from zerver.lib.str_utils import force_text
from zerver.lib import cache
from zerver.tornado import event_queue
from zerver.tornado.handlers import allocate_handler_id
from zerver.worker import queue_processors
from zerver.lib.actions import (
check_send_message, create_stream_if_needed, bulk_add_subscriptions,
get_display_recipient, bulk_remove_subscriptions
)
from zerver.models import (
get_realm,
get_stream,
get_user_profile_by_email,
Client,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
)
from zerver.lib.request import JsonableError
import collections
import base64
import mock
import os
import re
import sys
import time
import ujson
import unittest
from six.moves import urllib
from six import text_type, binary_type
from zerver.lib.str_utils import NonBinaryStr
from contextlib import contextmanager
import six
@contextmanager
def simulated_queue_client(client):
# type: (type) -> Iterator[None]
real_SimpleQueueClient = queue_processors.SimpleQueueClient
queue_processors.SimpleQueueClient = client # type: ignore # https://github.com/JukkaL/mypy/issues/1152
yield
queue_processors.SimpleQueueClient = real_SimpleQueueClient # type: ignore # https://github.com/JukkaL/mypy/issues/1152
@contextmanager
def tornado_redirected_to_list(lst):
# type: (List[Mapping[str, Any]]) -> Iterator[None]
real_event_queue_process_notification = event_queue.process_notification
event_queue.process_notification = lst.append
yield
event_queue.process_notification = real_event_queue_process_notification
@contextmanager
def simulated_empty_cache():
# type: () -> Generator[List[Tuple[str, Union[text_type, List[text_type]], text_type]], None, None]
cache_queries = [] # type: List[Tuple[str, Union[text_type, List[text_type]], text_type]]
def my_cache_get(key, cache_name=None):
# type: (text_type, Optional[str]) -> Any
cache_queries.append(('get', key, cache_name))
return None
def my_cache_get_many(keys, cache_name=None):
# type: (List[text_type], Optional[str]) -> Dict[text_type, Any]
cache_queries.append(('getmany', keys, cache_name))
return None
old_get = cache.cache_get
old_get_many = cache.cache_get_many
cache.cache_get = my_cache_get
cache.cache_get_many = my_cache_get_many
yield cache_queries
cache.cache_get = old_get
cache.cache_get_many = old_get_many
@contextmanager
def queries_captured(include_savepoints=False):
# type: (Optional[bool]) -> Generator[List[Dict[str, Union[str, binary_type]]], None, None]
'''
Allow a user to capture just the queries executed during
the with statement.
'''
queries = [] # type: List[Dict[str, Union[str, binary_type]]]
def wrapper_execute(self, action, sql, params=()):
# type: (TimeTrackingCursor, Callable, NonBinaryStr, Iterable[Any]) -> None
start = time.time()
try:
return action(sql, params)
finally:
stop = time.time()
duration = stop - start
if include_savepoints or ('SAVEPOINT' not in sql):
queries.append({
'sql': self.mogrify(sql, params).decode('utf-8'),
'time': "%.3f" % duration,
})
old_execute = TimeTrackingCursor.execute
old_executemany = TimeTrackingCursor.executemany
def cursor_execute(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).execute, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.execute = cursor_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def cursor_executemany(self, sql, params=()):
# type: (TimeTrackingCursor, NonBinaryStr, Iterable[Any]) -> None
return wrapper_execute(self, super(TimeTrackingCursor, self).executemany, sql, params) # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = cursor_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
yield queries
TimeTrackingCursor.execute = old_execute # type: ignore # https://github.com/JukkaL/mypy/issues/1167
TimeTrackingCursor.executemany = old_executemany # type: ignore # https://github.com/JukkaL/mypy/issues/1167
def get_test_image_file(filename):
# type: (str) -> IO[Any]
test_avatar_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../tests/images'))
return open(os.path.join(test_avatar_dir, filename), 'rb')
def avatar_disk_path(user_profile, medium=False):
# type: (UserProfile, bool) -> str
avatar_url_path = avatar_url(user_profile, medium)
avatar_disk_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_url_path.split("/")[-1].split("?")[0])
return avatar_disk_path
def make_client(name):
# type: (str) -> Client
client, _ = Client.objects.get_or_create(name=name)
return client
def find_key_by_email(address):
# type: (text_type) -> text_type
from django.core.mail import outbox
key_regex = re.compile("accounts/do_confirm/([a-f0-9]{40})>")
for message in reversed(outbox):
if address in message.to:
return key_regex.search(message.body).groups()[0]
def message_ids(result):
# type: (Dict[str, Any]) -> Set[int]
return set(message['id'] for message in result['messages'])
def message_stream_count(user_profile):
# type: (UserProfile) -> int
return UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
count()
def most_recent_usermessage(user_profile):
# type: (UserProfile) -> UserMessage
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('-message')
return query[0] # Django does LIMIT here
def most_recent_message(user_profile):
# type: (UserProfile) -> Message
usermessage = most_recent_usermessage(user_profile)
return usermessage.message
def get_user_messages(user_profile):
# type: (UserProfile) -> List[Message]
query = UserMessage.objects. \
select_related("message"). \
filter(user_profile=user_profile). \
order_by('message')
return [um.message for um in query]
class DummyHandler(object):
def __init__(self):
# type: () -> None
allocate_handler_id(self) # type: ignore # this is a testing mock
class POSTRequestMock(object):
method = "POST"
def __init__(self, post_data, user_profile):
# type: (Dict[str, Any], UserProfile) -> None
self.GET = {} # type: Dict[str, Any]
self.POST = post_data
self.user = user_profile
self._tornado_handler = DummyHandler()
self._log_data = {} # type: Dict[str, Any]
self.META = {'PATH_INFO': 'test'}
class HostRequestMock(object):
"""A mock request object where get_host() works. Useful for testing
routes that use Zulip's subdomains feature"""
def __init__(self, host=settings.EXTERNAL_HOST):
# type: (text_type) -> None
self.host = host
def get_host(self):
# type: () -> text_type
return self.host
class MockPythonResponse(object):
def __init__(self, text, status_code):
# type: (text_type, int) -> None
self.text = text
self.status_code = status_code
@property
def ok(self):
# type: () -> bool
return self.status_code == 200
INSTRUMENTING = os.environ.get('TEST_INSTRUMENT_URL_COVERAGE', '') == 'TRUE'
INSTRUMENTED_CALLS = [] # type: List[Dict[str, Any]]
UrlFuncT = Callable[..., HttpResponse] # TODO: make more specific
def instrument_url(f):
# type: (UrlFuncT) -> UrlFuncT
if not INSTRUMENTING:
return f
else:
def wrapper(self, url, info={}, **kwargs):
# type: (Any, text_type, Dict[str, Any], **Any) -> HttpResponse
start = time.time()
result = f(self, url, info, **kwargs)
delay = time.time() - start
test_name = self.id()
if '?' in url:
url, extra_info = url.split('?', 1)
else:
extra_info = ''
INSTRUMENTED_CALLS.append(dict(
url=url,
status_code=result.status_code,
method=f.__name__,
delay=delay,
extra_info=extra_info,
info=info,
test_name=test_name,
kwargs=kwargs))
return result
return wrapper
def write_instrumentation_reports(full_suite):
# type: (bool) -> None
if INSTRUMENTING:
calls = INSTRUMENTED_CALLS
from zproject.urls import urlpatterns, v1_api_and_json_patterns
# Find our untested urls.
pattern_cnt = collections.defaultdict(int) # type: Dict[str, int]
def re_strip(r):
# type: (Any) -> str
return str(r).lstrip('^').rstrip('$')
def find_patterns(patterns, prefixes):
# type: (List[Any], List[str]) -> None
for pattern in patterns:
find_pattern(pattern, prefixes)
def cleanup_url(url):
# type: (str) -> str
if url.startswith('/'):
url = url[1:]
if url.startswith('http://testserver/'):
url = url[len('http://testserver/'):]
if url.startswith('http://zulip.testserver/'):
url = url[len('http://zulip.testserver/'):]
if url.startswith('http://testserver:9080/'):
url = url[len('http://testserver:9080/'):]
return url
def find_pattern(pattern, prefixes):
# type: (Any, List[str]) -> None
if isinstance(pattern, type(LocaleRegexURLResolver)):
return
if hasattr(pattern, 'url_patterns'):
return
canon_pattern = prefixes[0] + re_strip(pattern.regex.pattern)
cnt = 0
for call in calls:
if 'pattern' in call:
continue
url = cleanup_url(call['url'])
for prefix in prefixes:
if url.startswith(prefix):
match_url = url[len(prefix):]
if pattern.regex.match(match_url):
if call['status_code'] in [200, 204, 301, 302]:
cnt += 1
call['pattern'] = canon_pattern
pattern_cnt[canon_pattern] += cnt
find_patterns(urlpatterns, ['', 'en/', 'de/'])
find_patterns(v1_api_and_json_patterns, ['api/v1/', 'json/'])
assert len(pattern_cnt) > 100
untested_patterns = set([p for p in pattern_cnt if pattern_cnt[p] == 0])
# We exempt some patterns that are called via Tornado.
exempt_patterns = set([
'api/v1/events',
'api/v1/register',
])
untested_patterns -= exempt_patterns
var_dir = 'var' # TODO make sure path is robust here
fn = os.path.join(var_dir, 'url_coverage.txt')
with open(fn, 'w') as f:
for call in calls:
try:
line = ujson.dumps(call)
f.write(line + '\n')
except OverflowError:
print('''
A JSON overflow error was encountered while
producing the URL coverage report. Sometimes
this indicates that a test is passing objects
into methods like client_post(), which is
unnecessary and leads to false positives.
''')
print(call)
if full_suite:
print('INFO: URL coverage report is in %s' % (fn,))
print('INFO: Try running: ./tools/create-test-api-docs')
if full_suite and len(untested_patterns):
print("\nERROR: Some URLs are untested! Here's the list of untested URLs:")
for untested_pattern in sorted(untested_patterns):
print(" %s" % (untested_pattern,))
sys.exit(1)
def get_all_templates():
# type: () -> List[str]
templates = []
relpath = os.path.relpath
isfile = os.path.isfile
path_exists = os.path.exists
def is_valid_template(p, n):
# type: (text_type, text_type) -> bool
return (not n.startswith('.') and
not n.startswith('__init__') and
not n.endswith(".md") and
isfile(p))
def process(template_dir, dirname, fnames):
# type: (str, str, Iterable[str]) -> None
for name in fnames:
path = os.path.join(dirname, name)
if is_valid_template(path, name):
templates.append(relpath(path, template_dir))
for engine in loader.engines.all():
template_dirs = [d for d in engine.template_dirs if path_exists(d)]
for template_dir in template_dirs:
template_dir = os.path.normpath(template_dir)
for dirpath, dirnames, fnames in os.walk(template_dir):
process(template_dir, dirpath, fnames)
return templates
|
|
from scipy.misc import imresize
import utils
import numpy as np
from scipy import ndimage
from utils import to_int
def get_random_transform_params(input_shape, rotation_range = 0., height_shift_range = 0., width_shift_range = 0.,
shear_range = 0., zoom_range = (1, 1), horizontal_flip = False, resize_range = None,
distortion_prob = 0., additive_gaussian_noise_range = None, multiplication_gaussian = 0,
transform_colorspace_param = None, transform_colorspace_bounds = (-1, 1)):
"""
This closure function returns generative function that gets random instance trough parameter and
together with closed input parameters generates random parameters for transformation matrix.
:param distortion_prob: Probability of the downsampling and upsampling of the image
:param resize_range: Defines uniform interval of downsampling factor
:param input_shape: Shape of images to be transformed with matrix with this parameters
:param rotation_range: Interval of rotation in degrees (used for in both direction)
:param height_shift_range: Value of two-sided interval of random shift in vertical direction
:param width_shift_range: Value of two-sided interval of random shift in horizontal direction
:param shear_range: Value of two-sided interval of random shear in horizontal direction
:param zoom_range: Tuple with 2 values representing range of random zoom (values > 1.0 is for zoom out)
:param horizontal_flip: Whether do random horizontal flip image
:return: Function that with given random instance generates random parameters for transformation matrix
"""
def get_instance(rnd):
U = rnd.uniform
N = rnd.normal
rr = rotation_range
hs = height_shift_range
ws = width_shift_range
sr = shear_range
agn = additive_gaussian_noise_range
dp = distortion_prob
mg = multiplication_gaussian
tcp = transform_colorspace_param
tcb = transform_colorspace_bounds
return {
'input_shape': input_shape,
'theta': np.pi / 180 * U(-rr, rr) if rr else 0,
'ty': U(-hs, hs) * input_shape[0] if hs else 0,
'tx': U(-ws, ws) * input_shape[1] if ws else 0,
'shear': U(-sr, sr) if shear_range else 0,
'z': U(zoom_range[0], zoom_range[1]) if zoom_range != (1, 1) else 1,
'h_flip': rnd.rand() < 0.5 if horizontal_flip else False,
'add_noise': N(0, U(agn[0], agn[1]), input_shape) if agn is not None else None,
'resize': U(*resize_range) if U(0, 1) < dp else None,
'resize_smooth': U(0, 1) < 0.5,
'mul': N(1, mg) if mg > 0 else None,
'color_m': utils.crop_value(N(tcp[0], tcp[1], (3, 3)), tcb) if tcp is not None else None,
'agn': agn
}
return get_instance
def assemble_transformation_matrix(input_shape, theta = 0, tx = 0, ty = 0, shear = 0, z = 1):
"""
Creates transformation matrix with given parameters. That resulting matrix has origin in centre of the image
:param input_shape: Shape of images to be transformed with matrix. Origin of transformation matrix is set
in the middle of image.
:param theta: Rotation in radians
:param tx: Translation in X axis
:param ty: Translation in Y axis
:param shear: Shear in horizontal direction
:param z: Image zoom
:return: Transformation matrix
"""
def transform_matrix_offset_center(matrix, x, y):
"""
Creates translation matrix from input matrix with origin in the centre of image
:param matrix: Input matrix
:param x: Width of the image
:param y: Height of the image
:return: Returns shifted input matrix with origin in [y/2, x/2]
"""
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
t_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return t_matrix
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
translation_matrix = np.array([[1, 0, ty],
[0, 1, tx],
[0, 0, 1]])
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
zoom_matrix = np.array([[z, 0, 0],
[0, z, 0],
[0, 0, 1]])
# Assembling transformation matrix
transform_matrix = np.dot(np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
# Set origin of transformation to center of the image
h, w = input_shape[0], input_shape[1]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
return transform_matrix
def transform(v, t_matrix, h_flip = False, add_noise = None, resize = None, resize_smooth = None,
mul = None, color_m = None):
"""
Transform image with (inverted) transformation matrix
:param v: Input image to be transformed
:param t_matrix: Transformation matrix
:param h_flip: Whether do horizontal flip
:return: Transformed image
"""
def apply_transform(x, transform_matrix, channel_index = 0, fill_mode = 'nearest', cval = 0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndimage.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order = 2, mode = fill_mode,
cval = cval)
for x_channel in x]
x = np.stack(channel_images, axis = 0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
v = apply_transform(v, t_matrix, 2)
if h_flip:
v = flip_axis(v, 1)
if color_m is not None or mul is not None or add_noise is not None:
v = v.astype(np.float32)
shape = v.shape
if mul is not None:
v *= mul
if color_m is not None:
v = np.reshape(v, [-1, 3])
v = np.matmul(v, color_m)
v = np.reshape(v, shape)
if add_noise is not None:
v += add_noise
if resize is not None:
interpolation = 'bilinear' if resize_smooth else 'nearest'
v = imresize(v, (resize * np.array(shape[:2])).astype(np.uint16), interpolation)
v = imresize(v, shape[:2], interpolation)
v = utils.crop_value(v, [np.zeros(shape), np.ones(shape) * 255])
return v.astype(np.uint8)
def crop_data(img, labels, new_img_size, new_label_size = None, crop_label = True):
"""
Both images and labels will be cropped to match the given size
:param img: Images to be cropped
:param labels: Labels to be cropped
:param new_img_size: New image size
:param new_label_size: New labels size
:return: Cropped image and labels
"""
img_size = img.shape[-3]
r = to_int((img_size - new_img_size) / 2)
img = img[..., r:r + new_img_size, r:r + new_img_size, :]
if crop_label:
labels -= r
if new_label_size is not None:
labels = np.array((labels / new_img_size) * new_label_size, dtype = np.int32)
return img, labels
def flip_body_joints(points):
"""
Change semantic of labels after flip transformation - i.e. left leg will be now right and so on.
:param points: Body joints to be changed
"""
def swap(a, b):
points[:, [a, b]] = points[:, [b, a]]
# Leg
swap(0, 5)
swap(1, 4)
swap(2, 3)
# Arm
swap(10, 15)
swap(11, 14)
swap(12, 13)
def generate_random__transformation(X, rseed = 0, t_params_f = None):
rnd = np.random.RandomState(rseed)
if not t_params_f:
raise Exception('No attributes given!')
n = X.shape[0]
X_t = []
t_params = t_params_f(rnd)
h_flip = t_params.pop('h_flip')
add_noise = t_params.pop('add_noise')
resize = t_params.pop('resize')
mul = t_params.pop('mul')
agn = t_params.pop('agn')
color_m = t_params.pop('color_m')
resize_smooth = t_params.pop('resize_smooth')
t_matrix = assemble_transformation_matrix(**t_params)
for k in range(n):
inp = np.squeeze(X[k])
if agn is not None:
gauss = rnd.normal(0, rnd.uniform(agn[0], agn[1]), inp.shape)
else:
gauss = None
x_t = transform(inp, t_matrix, h_flip, gauss, resize, resize_smooth, mul, color_m)
X_t.append(x_t)
return np.array(X_t)
def generate_random_sequences(X, Y, sequence_size = 32, shift = 16, rseed = 0, final_size = None,
t_params_f = None, final_heatmap_size = None):
rnd = np.random.RandomState(rseed)
if not t_params_f:
raise Exception('No attributes given!')
if final_size is None:
final_size = min(X.shape[2], X.shape[3])
n = X.shape[0]
perm = rnd.permutation(range(0, n, shift))
perm_n = perm.shape[0]
for idx in range(perm_n):
b = range(perm[idx], min(perm[idx] + sequence_size, n))
X_t = []
Y_t = []
t_params = t_params_f(rnd)
h_flip = t_params.pop('h_flip')
add_noise = t_params.pop('add_noise')
resize = t_params.pop('resize')
mul = t_params.pop('mul')
agn = t_params.pop('agn')
color_m = t_params.pop('color_m')
resize_smooth = t_params.pop('resize_smooth')
t_matrix = assemble_transformation_matrix(**t_params)
for k in b:
inp = np.squeeze(X[k])
if agn is not None:
gauss = rnd.normal(0, rnd.uniform(agn[0], agn[1]), inp.shape)
else:
gauss = None
x_t = transform(inp, t_matrix, h_flip, gauss, resize, resize_smooth, mul, color_m)
y_t = utils.get_affine_transform(np.squeeze(Y[k]), np.linalg.inv(t_matrix)) if Y is not None else None
x_t, y_t = crop_data(x_t, y_t, final_size, final_heatmap_size)
X_t.append(x_t)
if Y is not None:
if h_flip:
y_t[1, :] = (final_size if final_heatmap_size is None else final_heatmap_size) - y_t[1, :]
flip_body_joints(y_t)
Y_t.append(y_t)
if Y is not None:
yield np.array(X_t), np.array(Y_t), idx
else:
yield np.array(X_t), idx
def generate_minibatches(X, Y = None, batch_size = 32, rseed = 0,
final_size = None, t_params_f = None, final_heatmap_size = None):
"""
This function splits whole input batch of images into minibatches of given size. All images in batch are
transformed using affine transformations in order to prevent over-fitting during training.
:param X: Batch of input images to be divided. It has to be 4D tensor [batch, channel, height, width]
:param Y: Labels of input images (joint positions on heatmap). 3D tensor [batch, image dimension, joint].
E.g. joint with index 4 present in 10th image (i.e. index 9) that is in position [50, 80] is in
indexes: Y[9, :, 4] == [50, 80]
:param batch_size: Size of each mini-batch
:param rseed: Random seed
:param t_params_f: Function that generates parameters for transformation matrix (see get_random_transform_params)
:param final_size: Transformed images are cropped to match the given size
:param final_heatmap_size: Size of heatmaps
:return: Sequence of randomly ordered and transformed mini-batches
"""
rnd = np.random.RandomState(rseed)
if not t_params_f:
raise Exception('No attributes given!')
if final_size is None:
final_size = min(X.shape[2], X.shape[3])
n = X.shape[0]
perm = rnd.permutation(n)
for idx in range(0, n, batch_size):
b = perm[idx:min(idx + batch_size, n)]
X_t = []
Y_t = []
for k in b:
t_params = t_params_f(rnd)
h_flip = t_params.pop('h_flip')
add_noise = t_params.pop('add_noise')
resize = t_params.pop('resize')
mul = t_params.pop('mul')
color_m = t_params.pop('color_m')
agn = t_params.pop('agn')
resize_smooth = t_params.pop('resize_smooth')
t_matrix = assemble_transformation_matrix(**t_params)
x_t = transform(np.squeeze(X[k]), t_matrix, h_flip, add_noise, resize, resize_smooth, mul, color_m)
y_t = utils.get_affine_transform(np.squeeze(Y[k]), np.linalg.inv(t_matrix)) if Y is not None else None
x_t, y_t = crop_data(x_t, y_t, final_size, final_heatmap_size)
X_t.append(x_t)
if Y is not None:
if h_flip:
y_t[1, :] = (final_size if final_heatmap_size is None else final_heatmap_size) - y_t[1, :]
flip_body_joints(y_t)
Y_t.append(y_t)
if Y is not None:
yield np.array(X_t), np.array(Y_t), b
else:
yield np.array(X_t), b
|
|
# Copyright (c) 2011 Intel Corporation
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The FilterScheduler is for creating volumes.
You can customize this scheduler by specifying your own volume Filters and
Weighing Functions.
"""
from oslo_config import cfg
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
from cinder.volume import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class FilterScheduler(driver.Scheduler):
"""Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(FilterScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = None
self.options = scheduler_options.SchedulerOptions()
self.max_attempts = self._max_attempts()
def schedule(self, context, topic, method, *args, **kwargs):
"""Schedule contract that returns best-suited host for this request."""
self._schedule(context, topic, *args, **kwargs)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def populate_filter_properties(self, request_spec, filter_properties):
"""Stuff things into filter_properties.
Can be overridden in a subclass to add more data.
"""
vol = request_spec['volume_properties']
filter_properties['size'] = vol['size']
filter_properties['availability_zone'] = vol.get('availability_zone')
filter_properties['user_id'] = vol.get('user_id')
filter_properties['metadata'] = vol.get('metadata')
filter_properties['qos_specs'] = vol.get('qos_specs')
def schedule_create_consistencygroup(self, context, group,
request_spec_list,
filter_properties_list):
weighed_host = self._schedule_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_host:
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
updated_group = driver.group_update_db(context, group, host)
self.volume_rpcapi.create_consistencygroup(context,
updated_group, host)
def schedule_create_volume(self, context, request_spec, filter_properties):
weighed_host = self._schedule(context, request_spec,
filter_properties)
if not weighed_host:
raise exception.NoValidHost(reason=_("No weighed hosts available"))
host = weighed_host.obj.host
volume_id = request_spec['volume_id']
updated_volume = driver.volume_update_db(context, volume_id, host)
self._post_select_populate_filter_properties(filter_properties,
weighed_host.obj)
# context is not serializable
filter_properties.pop('context', None)
self.volume_rpcapi.create_volume(context, updated_volume, host,
request_spec, filter_properties,
allow_reschedule=True)
def host_passes_filters(self, context, host, request_spec,
filter_properties):
"""Check if the specified host passes the filters."""
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
if host_state.host == host:
return host_state
raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on '
'%(host)s') %
{'id': request_spec['volume_id'],
'host': host})
def find_retype_host(self, context, request_spec, filter_properties=None,
migration_policy='never'):
"""Find a host that can accept the volume with its new type."""
filter_properties = filter_properties or {}
current_host = request_spec['volume_properties']['host']
# The volume already exists on this host, and so we shouldn't check if
# it can accept the volume again in the CapacityFilter.
filter_properties['vol_exists_on'] = current_host
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
if not weighed_hosts:
raise exception.NoValidHost(reason=_('No valid hosts for volume '
'%(id)s with type %(type)s') %
{'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
if host_state.host == current_host:
return host_state
if utils.extract_host(current_host, 'pool') is None:
# legacy volumes created before pool is introduced has no pool
# info in host. But host_state.host always include pool level
# info. In this case if above exact match didn't work out, we
# find host_state that are of the same host of volume being
# retyped. In other words, for legacy volumes, retyping could
# cause migration between pools on same host, which we consider
# it is different from migration between hosts thus allow that
# to happen even migration policy is 'never'.
for weighed_host in weighed_hosts:
host_state = weighed_host.obj
backend = utils.extract_host(host_state.host, 'backend')
if backend == current_host:
return host_state
if migration_policy == 'never':
raise exception.NoValidHost(reason=_('Current host not valid for '
'volume %(id)s with type '
'%(type)s, migration not '
'allowed') %
{'id': request_spec['volume_id'],
'type': request_spec['volume_type']})
top_host = self._choose_top_host(weighed_hosts, request_spec)
return top_host.obj
def get_pools(self, context, filters):
# TODO(zhiteng) Add filters support
return self.host_manager.get_pools(context)
def _post_select_populate_filter_properties(self, filter_properties,
host_state):
"""Populate filter properties with additional information.
Add additional information to the filter properties after a host has
been selected by the scheduling process.
"""
# Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.host)
def _add_retry_host(self, filter_properties, host):
"""Add a retry entry for the selected volume backend.
In the event that the request gets re-scheduled, this entry will signal
that the given backend has already been tried.
"""
retry = filter_properties.get('retry', None)
if not retry:
return
hosts = retry['hosts']
hosts.append(host)
def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts
if max_attempts < 1:
raise exception.InvalidParameterValue(
err=_("Invalid value for 'scheduler_max_attempts', "
"must be >=1"))
return max_attempts
def _log_volume_error(self, volume_id, retry):
"""Log requests with exceptions from previous volume operations."""
exc = retry.pop('exc', None) # string-ified exception from volume
if not exc:
return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None)
if not hosts:
return # no previously attempted hosts, skip
last_host = hosts[-1]
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
"%(last_host)s : %(exc)s"),
{'volume_id': volume_id,
'last_host': last_host,
'exc': exc})
def _populate_retry(self, filter_properties, properties):
"""Populate filter properties with history of retries for request.
If maximum retries is exceeded, raise NoValidHost.
"""
max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {})
if max_attempts == 1:
# re-scheduling is disabled.
return
# retry is enabled, update attempt count:
if retry:
retry['num_attempts'] += 1
else:
retry = {
'num_attempts': 1,
'hosts': [] # list of volume service hosts tried
}
filter_properties['retry'] = retry
volume_id = properties.get('volume_id')
self._log_volume_error(volume_id, retry)
if retry['num_attempts'] > max_attempts:
raise exception.NoValidHost(
reason=_("Exceeded max scheduling attempts %(max_attempts)d "
"for volume %(volume_id)s") %
{'max_attempts': max_attempts,
'volume_id': volume_id})
def _get_weighted_candidates(self, context, request_spec,
filter_properties=None):
"""Return a list of hosts that meet required specs.
Returned list is ordered by their fitness.
"""
elevated = context.elevated()
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, copying
# 'volume_XX' to 'resource_XX' will make both filters happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# If multiattach is enabled on a volume, we need to add
# multiattach to extra specs, so that the capability
# filtering is enabled.
multiattach = volume_properties.get('multiattach', False)
if multiattach and 'multiattach' not in resource_type.get(
'extra_specs', {}):
if 'extra_specs' not in resource_type:
resource_type['extra_specs'] = {}
resource_type['extra_specs'].update(
multiattach='<is> True')
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
hosts = self.host_manager.get_all_host_states(elevated)
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
return weighed_hosts
def _get_weighted_candidates_group(self, context, request_spec_list,
filter_properties_list=None):
"""Finds hosts that supports the consistencygroup.
Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
elevated = context.elevated()
weighed_hosts = []
index = 0
for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively,
# copying 'volume_XX' to 'resource_XX' will make both filters
# happy.
resource_properties = volume_properties.copy()
volume_type = request_spec.get("volume_type", None)
resource_type = request_spec.get("volume_type", None)
request_spec.update({'resource_properties': resource_properties})
config_options = self._get_configuration_options()
filter_properties = {}
if filter_properties_list:
filter_properties = filter_properties_list[index]
if filter_properties is None:
filter_properties = {}
self._populate_retry(filter_properties, resource_properties)
# Add consistencygroup_support in extra_specs if it is not there.
# Make sure it is populated in filter_properties
if 'consistencygroup_support' not in resource_type.get(
'extra_specs', {}):
resource_type['extra_specs'].update(
consistencygroup_support='<is> True')
filter_properties.update({'context': context,
'request_spec': request_spec,
'config_options': config_options,
'volume_type': volume_type,
'resource_type': resource_type})
self.populate_filter_properties(request_spec,
filter_properties)
# Find our local list of acceptable hosts by filtering and
# weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only
# traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated)
if not all_hosts:
return []
# Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts,
filter_properties)
if not hosts:
return []
LOG.debug("Filtered %s", hosts)
# weighted_host = WeightedHost() ... the best
# host for the job.
temp_weighed_hosts = self.host_manager.get_weighed_hosts(
hosts,
filter_properties)
if not temp_weighed_hosts:
return []
if index == 0:
weighed_hosts = temp_weighed_hosts
else:
new_weighed_hosts = []
for host1 in weighed_hosts:
for host2 in temp_weighed_hosts:
if host1.obj.host == host2.obj.host:
new_weighed_hosts.append(host1)
weighed_hosts = new_weighed_hosts
if not weighed_hosts:
return []
index += 1
return weighed_hosts
def _schedule(self, context, request_spec, filter_properties=None):
weighed_hosts = self._get_weighted_candidates(context, request_spec,
filter_properties)
if not weighed_hosts:
LOG.warning(_LW('No weighed hosts found for volume '
'with properties: %s'),
filter_properties['request_spec']['volume_type'])
return None
return self._choose_top_host(weighed_hosts, request_spec)
def _schedule_group(self, context, request_spec_list,
filter_properties_list=None):
weighed_hosts = self._get_weighted_candidates_group(
context,
request_spec_list,
filter_properties_list)
if not weighed_hosts:
return None
return self._choose_top_host_group(weighed_hosts, request_spec_list)
def _choose_top_host(self, weighed_hosts, request_spec):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s", host_state.host)
volume_properties = request_spec['volume_properties']
host_state.consume_from_volume(volume_properties)
return top_host
def _choose_top_host_group(self, weighed_hosts, request_spec_list):
top_host = weighed_hosts[0]
host_state = top_host.obj
LOG.debug("Choosing %s", host_state.host)
return top_host
|
|
""" A lan connect class using udp
"""
__author__ = "Oliver Lindemann <[email protected]>"
__version__ = "0.5"
import atexit
import os
import socket
from multiprocessing import Process, Event, Queue
import logging
from .types import UDPData
from .polling_time_profile import PollingTimeProfile
from .process_priority_manager import get_priority
from .timer import Timer, app_timer, get_time_ms
def get_lan_ip():
if os.name != "nt":
# linux
from subprocess import check_output
rtn = check_output("hostname -I".split(" "))
rtn = rtn.decode().split(" ")
return rtn[0].strip()
else:
# windows
# code bas on http://stackoverflow.com/questions/11735821/python-get-localhost-ip
return socket.gethostbyname(socket.gethostname())
class UDPConnection(object):
# DOC document the usage "connecting" "unconnecting"
COMMAND_CHAR = b"$"
CONNECT = COMMAND_CHAR + b"connect"
UNCONNECT = COMMAND_CHAR + b"unconnect"
COMMAND_REPLY = COMMAND_CHAR + b"ok"
PING = COMMAND_CHAR + b"ping"
MY_IP = get_lan_ip()
def __init__(self, udp_port=5005):
self.udp_port = udp_port
self._socket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self._socket.bind((UDPConnection.MY_IP, self.udp_port))
self._socket.setblocking(False)
self.peer_ip = None
self.timer = Timer(sync_timer=app_timer) # own timer, because often
# used in own process
@property
def my_ip(self):
return UDPConnection.MY_IP
def __str__(self):
return "ip: {0} (port: {1}); peer: {2}".format(UDPConnection.MY_IP,
self.udp_port, self.peer_ip)
def receive(self, timeout):
"""checks for received data and returns it
In contrast to poll the function keep polling until timeout if no new
data are available.
timeout in seconds
"""
t = get_time_ms()
timeout_ms = int(timeout*1000)
while True:
rtn = self.poll()
if rtn is not None:
#print("UDP receive: {0}".format(rtn))
return rtn
if (get_time_ms() - t) > timeout_ms:
return None
def poll(self):
"""returns data (bytes) or None if no data found
process also commands
if send is unkown input is ignored
"""
try:
data, sender = self._socket.recvfrom(1024)
except:
return None
# process data
if data == UDPConnection.CONNECT:
#connection request
self.peer_ip = sender[0]
if not self.send(UDPConnection.COMMAND_REPLY):
self.peer_ip = None
elif sender[0] != self.peer_ip:
return None # ignore data
elif data == UDPConnection.PING:
self.send(UDPConnection.COMMAND_REPLY)
elif data == self.UNCONNECT:
self.unconnect_peer()
return data
def send(self, data, timeout=1.0):
"""returns if problems or not
timeout in seconds (default = 1.0)
return False if failed to send
"""
timeout_ms = int(timeout*1000)
if self.peer_ip is None:
return False
start = get_time_ms()
if isinstance(data, str):
data = data.encode() # force to byte
while get_time_ms() - start < timeout_ms:
try:
self._socket.sendto(data, (self.peer_ip, self.udp_port))
#print("UDP send: {0}".format(data))
return True
except:
pass
return False
def connect_peer(self, peer_ip, timeout=1.0):
self.unconnect_peer()
self.peer_ip = peer_ip
if self.send(UDPConnection.CONNECT, timeout=timeout) and \
self.wait_input(UDPConnection.COMMAND_REPLY, duration=timeout):
return True
self.peer_ip = None
return False
def wait_input(self, input_string, duration=1.0):
"""poll the connection and waits for a specific input"""
start = get_time_ms()
duration_ms = int(duration*1000)
while get_time_ms() - start < duration_ms:
in_ = self.poll()
if in_ == input_string:
return True
return False
def unconnect_peer(self, timeout=1.0):
self.send(UDPConnection.UNCONNECT, timeout=timeout)
self.peer_ip = None
@property
def is_connected(self):
return self.peer_ip is not None
def ping(self, timeout=0.5):
"""returns boolean if succeeded and ping time in ms"""
if self.peer_ip == None:
return False, None
start = get_time_ms()
if self.send(UDPConnection.PING, timeout=timeout) and \
self.wait_input(UDPConnection.COMMAND_REPLY, duration=timeout):
return True, get_time_ms() - start
return False, None
def clear_receive_buffer(self):
data = ""
while data is not None:
data = self.poll()
def poll_last_data(self):
"""polls all data and returns only the last one
return None if not data found"""
rtn = None
tmp = self.poll()
while tmp is not None:
rtn = tmp
tmp = self.poll()
return rtn
class UDPConnectionProcess(Process):
"""UDPConnectionProcess polls and writes to a data queue.
Example::
# Server that prints each input and echos it to the client
# that is currently connected
from udp_connection import UDPConnectionProcess, Queue
receive_queue = Queue()
udp_p = UDPConnectionProcess(receive_queue=receive_queue)
udp_p.start()
udp_p.event_polling.set() # start polling
while True:
data = receive_queue.get()
print(data)
if data is not None:
udp_p.send_queue.put(data.string)
Example::
# connecting to a server
""" # DOC
def __init__(self, event_trigger = (),
event_ignore_tag = None):
"""Initialize UDPConnectionProcess
Parameters
----------
receive_queue: multiprocessing.Queue
the queue to which the received data should be put
peer_ip : string
the IP of the peer to which the connection should be established
sync_clock : Clock
the internal clock for timestamps will synchronized with this clock
event_trigger: multiprocessing.Event() (or list of..)
event trigger(s) to be set. If Udp event is received and it is not a
command to set this event (typical of sensor recording processes).
event_ignore_tag:
udp data that start with this tag will be ignored for event triggering
""" # DOC
super(UDPConnectionProcess, self).__init__()
self.receive_queue = Queue()
self.send_queue = Queue()
self.event_is_connected = Event()
self._event_quit_request = Event()
self._event_is_polling = Event()
self._event_ignore_tag = event_ignore_tag
if isinstance(event_trigger, type(Event) ):
event_trigger = (event_trigger)
try:
self._event_trigger = tuple(event_trigger)
except:
self._event_trigger = ()
atexit.register(self.quit)
@property
def my_ip(self):
return UDPConnection.MY_IP
def quit(self):
self._event_quit_request.set()
if self.is_alive():
self.join()
def pause(self):
self._event_is_polling.clear()
def start_polling(self):
self._event_is_polling.set()
def run(self):
udp_connection = UDPConnection(udp_port=5005)
self.start_polling()
ptp = PollingTimeProfile()
prev_event_polling = None
while not self._event_quit_request.is_set():
if prev_event_polling != self._event_is_polling.is_set():
# event pooling changed
prev_event_polling = self._event_is_polling.is_set()
if prev_event_polling:
logging.warning("UDP start, pid {}, priority {}".format(
self.pid, get_priority(self.pid)))
else:
logging.warning("UDP stop")
ptp.stop()
if not self._event_is_polling.is_set():
self._event_is_polling.wait(timeout=0.1)
else:
data = udp_connection.poll()
t = udp_connection.timer.time
ptp.update(t)
if data is not None:
d = UDPData(string=data, time=t)
self.receive_queue.put(d)
if self._event_ignore_tag is not None and \
not d.startswith(self._event_ignore_tag):
for ev in self._event_trigger:
# set all connected trigger
ev.set()
try:
udp_connection.send(self.send_queue.get_nowait())
except:
pass
# has connection changed?
if self.event_is_connected.is_set() != udp_connection.is_connected:
if udp_connection.is_connected:
self.event_is_connected.set()
else:
self.event_is_connected.clear()
if not udp_connection.is_connected:
udp_connection.timer.wait(200)
udp_connection.unconnect_peer()
logging.warning("UDP quit, {}".format(ptp.get_profile_str()))
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now, cint
from frappe.model import no_value_fields
from frappe.model.document import Document
from frappe.model.db_schema import type_map
from frappe.core.doctype.property_setter.property_setter import make_property_setter
from frappe.core.doctype.notification_count.notification_count import delete_notification_count_for
from frappe.modules import make_boilerplate
form_grid_templates = {
"fields": "templates/form_grid/fields.html"
}
class DocType(Document):
def validate(self):
if not frappe.conf.get("developer_mode"):
frappe.throw(_("Not in Developer Mode! Set in site_config.json"))
for c in [".", "/", "#", "&", "=", ":", "'", '"']:
if c in self.name:
frappe.throw(_("{0} not allowed in name").format(c))
self.validate_series()
self.scrub_field_names()
self.validate_title_field()
validate_fields(self.get("fields"))
if self.istable:
# no permission records for child table
self.permissions = []
else:
validate_permissions(self)
self.make_amendable()
def change_modified_of_parent(self):
if frappe.flags.in_import:
return
parent_list = frappe.db.sql("""SELECT parent
from tabDocField where fieldtype="Table" and options=%s""", self.name)
for p in parent_list:
frappe.db.sql('UPDATE tabDocType SET modified=%s WHERE `name`=%s', (now(), p[0]))
def scrub_field_names(self):
restricted = ('name','parent','creation','modified','modified_by',
'parentfield','parenttype',"file_list")
for d in self.get("fields"):
if d.fieldtype:
if (not getattr(d, "fieldname", None)):
if d.label:
d.fieldname = d.label.strip().lower().replace(' ','_')
if d.fieldname in restricted:
d.fieldname = d.fieldname + '1'
else:
d.fieldname = d.fieldtype.lower().replace(" ","_") + "_" + str(d.idx)
def validate_title_field(self):
if self.title_field and \
self.title_field not in [d.fieldname for d in self.get("fields")]:
frappe.throw(_("Title field must be a valid fieldname"))
def validate_series(self, autoname=None, name=None):
if not autoname: autoname = self.autoname
if not name: name = self.name
if not autoname and self.get("fields", {"fieldname":"naming_series"}):
self.autoname = "naming_series:"
if autoname and (not autoname.startswith('field:')) \
and (not autoname.startswith('eval:')) \
and (not autoname in ('Prompt', 'hash')) \
and (not autoname.startswith('naming_series:')):
prefix = autoname.split('.')[0]
used_in = frappe.db.sql('select name from tabDocType where substring_index(autoname, ".", 1) = %s and name!=%s', (prefix, name))
if used_in:
frappe.throw(_("Series {0} already used in {1}").format(prefix, used_in[0][0]))
def on_update(self):
from frappe.model.db_schema import updatedb
updatedb(self.name)
self.change_modified_of_parent()
make_module_and_roles(self)
from frappe import conf
if not (frappe.flags.in_import or frappe.flags.in_test) and conf.get('developer_mode') or 0:
self.export_doc()
self.make_controller_template()
# update index
if not getattr(self, "custom", False):
from frappe.modules import load_doctype_module
module = load_doctype_module(self.name, self.module)
if hasattr(module, "on_doctype_update"):
module.on_doctype_update()
delete_notification_count_for(doctype=self.name)
frappe.clear_cache(doctype=self.name)
def before_rename(self, old, new, merge=False):
if merge:
frappe.throw(_("DocType can not be merged"))
def after_rename(self, old, new, merge=False):
if self.issingle:
frappe.db.sql("""update tabSingles set doctype=%s where doctype=%s""", (new, old))
else:
frappe.db.sql("rename table `tab%s` to `tab%s`" % (old, new))
def before_reload(self):
if not (self.issingle and self.istable):
self.preserve_naming_series_options_in_property_setter()
def preserve_naming_series_options_in_property_setter(self):
"""preserve naming_series as property setter if it does not exist"""
naming_series = self.get("fields", {"fieldname": "naming_series"})
if not naming_series:
return
# check if atleast 1 record exists
if not (frappe.db.table_exists("tab" + self.name) and frappe.db.sql("select name from `tab{}` limit 1".format(self.name))):
return
existing_property_setter = frappe.db.get_value("Property Setter", {"doc_type": self.name,
"property": "options", "field_name": "naming_series"})
if not existing_property_setter:
make_property_setter(self.name, "naming_series", "options", naming_series[0].options, "Text", validate_fields_for_doctype=False)
if naming_series[0].default:
make_property_setter(self.name, "naming_series", "default", naming_series[0].default, "Text", validate_fields_for_doctype=False)
def export_doc(self):
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['DocType', self.name]])
def import_doc(self):
from frappe.modules.import_module import import_from_files
import_from_files(record_list=[[self.module, 'doctype', self.name]])
def make_controller_template(self):
make_boilerplate("controller.py", self)
if not (self.istable or self.issingle):
make_boilerplate("test_controller.py", self)
make_boilerplate("test_records.json", self)
def make_amendable(self):
"""
if is_submittable is set, add amended_from docfields
"""
if self.is_submittable:
if not frappe.db.sql("""select name from tabDocField
where fieldname = 'amended_from' and parent = %s""", self.name):
self.append("fields", {
"label": "Amended From",
"fieldtype": "Link",
"fieldname": "amended_from",
"options": self.name,
"read_only": 1,
"print_hide": 1,
"no_copy": 1
})
def get_max_idx(self):
max_idx = frappe.db.sql("""select max(idx) from `tabDocField` where parent = %s""",
self.name)
return max_idx and max_idx[0][0] or 0
def validate_fields_for_doctype(doctype):
validate_fields(frappe.get_meta(doctype).get("fields"))
# this is separate because it is also called via custom field
def validate_fields(fields):
def check_illegal_characters(fieldname):
for c in ['.', ',', ' ', '-', '&', '%', '=', '"', "'", '*', '$',
'(', ')', '[', ']', '/']:
if c in fieldname:
frappe.throw(_("{0} not allowed in fieldname {1}").format(c, fieldname))
def check_unique_fieldname(fieldname):
duplicates = filter(None, map(lambda df: df.fieldname==fieldname and str(df.idx) or None, fields))
if len(duplicates) > 1:
frappe.throw(_("Fieldname {0} appears multiple times in rows {1}").format(fieldname, ", ".join(duplicates)))
def check_illegal_mandatory(d):
if (d.fieldtype in no_value_fields) and d.fieldtype!="Table" and d.reqd:
frappe.throw(_("Field {0} of type {1} cannot be mandatory").format(d.label, d.fieldtype))
def check_link_table_options(d):
if d.fieldtype in ("Link", "Table"):
if not d.options:
frappe.throw(_("Options requried for Link or Table type field {0} in row {1}").format(d.label, d.idx))
if d.options=="[Select]" or d.options==d.parent:
return
if d.options != d.parent and not frappe.db.exists("DocType", d.options):
frappe.throw(_("Options must be a valid DocType for field {0} in row {1}").format(d.label, d.idx))
def check_hidden_and_mandatory(d):
if d.hidden and d.reqd and not d.default:
frappe.throw(_("Field {0} in row {1} cannot be hidden and mandatory without default").format(d.label, d.idx))
def check_min_items_in_list(fields):
if len(filter(lambda d: d.in_list_view, fields))==0:
for d in fields[:5]:
if d.fieldtype in type_map:
d.in_list_view = 1
def check_width(d):
if d.fieldtype == "Currency" and cint(d.width) < 100:
frappe.throw(_("Max width for type Currency is 100px in row {0}").format(d.idx))
def check_in_list_view(d):
if d.in_list_view and d.fieldtype!="Image" and (d.fieldtype in no_value_fields):
frappe.throw(_("'In List View' not allowed for type {0} in row {1}").format(d.fieldtype, d.idx))
def check_dynamic_link_options(d):
if d.fieldtype=="Dynamic Link":
doctype_pointer = filter(lambda df: df.fieldname==d.options, fields)
if not doctype_pointer or (doctype_pointer[0].fieldtype!="Link") \
or (doctype_pointer[0].options!="DocType"):
frappe.throw(_("Options 'Dynamic Link' type of field must point to another Link Field with options as 'DocType'"))
def check_fold(fields):
fold_exists = False
for i, f in enumerate(fields):
if f.fieldtype=="Fold":
if fold_exists:
frappe.throw(_("There can be only one Fold in a form"))
fold_exists = True
if i < len(fields)-1:
nxt = fields[i+1]
if nxt.fieldtype != "Section Break" \
or (nxt.fieldtype=="Section Break" and not nxt.label):
frappe.throw(_("Fold must come before a labelled Section Break"))
else:
frappe.throw(_("Fold can not be at the end of the form"))
for d in fields:
if not d.permlevel: d.permlevel = 0
if not d.fieldname:
frappe.throw(_("Fieldname is required in row {0}").format(d.idx))
check_illegal_characters(d.fieldname)
check_unique_fieldname(d.fieldname)
check_illegal_mandatory(d)
check_link_table_options(d)
check_dynamic_link_options(d)
check_hidden_and_mandatory(d)
check_in_list_view(d)
check_min_items_in_list(fields)
check_fold(fields)
def validate_permissions_for_doctype(doctype, for_remove=False):
doctype = frappe.get_doc("DocType", doctype)
if frappe.conf.developer_mode and not frappe.flags.in_test:
# save doctype
doctype.save()
else:
validate_permissions(doctype, for_remove)
# save permissions
for perm in doctype.get("permissions"):
perm.db_update()
def validate_permissions(doctype, for_remove=False):
permissions = doctype.get("permissions")
if not permissions:
frappe.throw(_('Enter at least one permission row'), frappe.MandatoryError)
issingle = issubmittable = isimportable = False
if doctype:
issingle = cint(doctype.issingle)
issubmittable = cint(doctype.is_submittable)
isimportable = cint(doctype.allow_import)
def get_txt(d):
return _("For {0} at level {1} in {2} in row {3}").format(d.role, d.permlevel, d.parent, d.idx)
def check_atleast_one_set(d):
if not d.read and not d.write and not d.submit and not d.cancel and not d.create:
frappe.throw(_("{0}: No basic permissions set").format(get_txt(d)))
def check_double(d):
has_similar = False
for p in permissions:
if (p.role==d.role and p.permlevel==d.permlevel
and p.apply_user_permissions==d.apply_user_permissions and p!=d):
has_similar = True
break
if has_similar:
frappe.throw(_("{0}: Only one rule allowed with the same Role, Level and Apply User Permissions").format(get_txt(d)))
def check_level_zero_is_set(d):
if cint(d.permlevel) > 0 and d.role != 'All':
has_zero_perm = False
for p in permissions:
if p.role==d.role and (p.permlevel or 0)==0 and p!=d:
has_zero_perm = True
break
if not has_zero_perm:
frappe.throw(_("{0}: Permission at level 0 must be set before higher levels are set").format(get_txt(d)))
if d.create or d.submit or d.cancel or d.amend or d.match:
frappe.throw(_("{0}: Create, Submit, Cancel and Amend only valid at level 0").format(get_txt(d)))
def check_permission_dependency(d):
if d.cancel and not d.submit:
frappe.throw(_("{0}: Cannot set Cancel without Submit").format(get_txt(d)))
if (d.submit or d.cancel or d.amend) and not d.write:
frappe.throw(_("{0}: Cannot set Submit, Cancel, Amend without Write").format(get_txt(d)))
if d.amend and not d.write:
frappe.throw(_("{0}: Cannot set Amend without Cancel").format(get_txt(d)))
if d.get("import") and not d.create:
frappe.throw(_("{0}: Cannot set Import without Create").format(get_txt(d)))
def remove_rights_for_single(d):
if not issingle:
return
if d.report:
frappe.msgprint(_("Report cannot be set for Single types"))
d.report = 0
d.set("import", 0)
d.set("export", 0)
for ptype, label in (
("set_user_permissions", _("Set User Permissions")),
("apply_user_permissions", _("Apply User Permissions"))):
if d.get(ptype):
d.set(ptype, 0)
frappe.msgprint(_("{0} cannot be set for Single types").format(label))
def check_if_submittable(d):
if d.submit and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Submit if not Submittable").format(get_txt(d)))
elif d.amend and not issubmittable:
frappe.throw(_("{0}: Cannot set Assign Amend if not Submittable").format(get_txt(d)))
def check_if_importable(d):
if d.get("import") and not isimportable:
frappe.throw(_("{0}: Cannot set import as {1} is not importable").format(get_txt(d), doctype))
for d in permissions:
if not d.permlevel:
d.permlevel=0
check_atleast_one_set(d)
if not for_remove:
check_double(d)
check_permission_dependency(d)
check_if_submittable(d)
check_if_importable(d)
check_level_zero_is_set(d)
remove_rights_for_single(d)
def make_module_and_roles(doc, perm_fieldname="permissions"):
try:
if not frappe.db.exists("Module Def", doc.module):
m = frappe.get_doc({"doctype": "Module Def", "module_name": doc.module})
m.app_name = frappe.local.module_app[frappe.scrub(doc.module)]
m.ignore_mandatory = m.ignore_permissions = True
m.insert()
default_roles = ["Administrator", "Guest", "All"]
roles = [p.role for p in doc.get("permissions") or []] + default_roles
for role in list(set(roles)):
if not frappe.db.exists("Role", role):
r = frappe.get_doc({"doctype": "Role", "role_name": role})
r.role_name = role
r.ignore_mandatory = r.ignore_permissions = True
r.insert()
except frappe.DoesNotExistError, e:
pass
except frappe.SQLError, e:
if e.args[0]==1146:
pass
else:
raise
def init_list(doctype):
doc = frappe.get_meta(doctype)
make_boilerplate("controller_list.js", doc)
make_boilerplate("controller_list.html", doc)
|
|
#!/usr/bin/env python
# pylint: disable=protected-access, unused-variable, locally-disabled, len-as-condition
"""Lint helper to generate lint summary of source.
Copyright by Contributors
"""
from __future__ import print_function
import argparse
import codecs
import sys
import re
import os
import cpplint
from cpplint import _cpplint_state
from pylint import epylint
CXX_SUFFIX = set(['cc', 'c', 'cpp', 'h', 'cu', 'hpp'])
PYTHON_SUFFIX = set(['py'])
def filepath_enumerate(paths):
"""Enumerate the file paths of all subfiles of the list of paths"""
out = []
for path in paths:
if os.path.isfile(path):
out.append(path)
else:
for root, dirs, files in os.walk(path):
for name in files:
out.append(os.path.normpath(os.path.join(root, name)))
return out
# pylint: disable=useless-object-inheritance
class LintHelper(object):
"""Class to help runing the lint and records summary"""
@staticmethod
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.items() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.items():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass
def __init__(self):
self.project_name = None
self.cpp_header_map = {}
self.cpp_src_map = {}
self.python_map = {}
pylint_disable = ['superfluous-parens',
'too-many-instance-attributes',
'too-few-public-methods']
# setup pylint
self.pylint_opts = ['--extension-pkg-whitelist=numpy',
'--disable=' + ','.join(pylint_disable)]
self.pylint_cats = set(['error', 'warning', 'convention', 'refactor'])
# setup cpp lint
cpplint_args = ['.', '--extensions=' + (','.join(CXX_SUFFIX))]
_ = cpplint.ParseArguments(cpplint_args)
cpplint._SetFilters(','.join(['-build/c++11',
'-build/namespaces',
'-build/include,',
'+build/include_what_you_use',
'+build/include_order']))
cpplint._SetCountingStyle('toplevel')
cpplint._line_length = 100
def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors
def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
err = pylint_stderr.read()
if len(err):
print(err)
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
self.python_map[str(path)] = emap
def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr
# singleton helper for lint check
_HELPER = LintHelper()
def get_header_guard_dmlc(filename):
"""Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
"""
fileinfo = cpplint.FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
inc_list = ['include', 'api', 'wrapper', 'contrib']
if os.name == 'nt':
inc_list.append("mshadow")
if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None:
idx = file_path_from_root.find('src/')
file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:]
else:
idx = file_path_from_root.find("include/")
if idx != -1:
file_path_from_root = file_path_from_root[idx + 8:]
for spath in inc_list:
prefix = spath + '/'
if file_path_from_root.startswith(prefix):
file_path_from_root = re.sub('^' + prefix, '', file_path_from_root)
break
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
cpplint.GetHeaderGuardCPPVariable = get_header_guard_dmlc
def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX:
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname)
def main():
"""Main entry function."""
parser = argparse.ArgumentParser(description="lint source codes")
parser.add_argument('project', help='project name')
parser.add_argument('filetype', choices=['python', 'cpp', 'all'],
help='source code type')
parser.add_argument('path', nargs='+', help='path to traverse')
parser.add_argument('--exclude_path', nargs='+', default=[],
help='exclude this path, and all subfolders if path is a folder')
parser.add_argument('--pylint-rc', default=None,
help='pylint rc file')
args = parser.parse_args()
_HELPER.project_name = args.project
if args.pylint_rc is not None:
_HELPER.pylint_opts = ['--rcfile='+args.pylint_rc,]
file_type = args.filetype
allow_type = []
if file_type in ('python', 'all'):
allow_type += [x for x in PYTHON_SUFFIX]
if file_type in ('cpp', 'all'):
allow_type += [x for x in CXX_SUFFIX]
allow_type = set(allow_type)
if sys.version_info.major == 2 and os.name != 'nt':
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
# get excluded files
excluded_paths = filepath_enumerate(args.exclude_path)
for path in args.path:
if os.path.isfile(path):
normpath = os.path.normpath(path)
if normpath not in excluded_paths:
process(path, allow_type)
else:
for root, dirs, files in os.walk(path):
for name in files:
file_path = os.path.normpath(os.path.join(root, name))
if file_path not in excluded_paths:
process(file_path, allow_type)
nerr = _HELPER.print_summary(sys.stderr)
sys.exit(nerr > 0)
if __name__ == '__main__':
main()
|
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Needs to stay compatible with Python 2.5 due to GAE.
#
# Copyright 2007 Google Inc. All Rights Reserved.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = '[email protected] (Will Robinson)'
from protobuf26.internal import api_implementation
if api_implementation.Type() == 'cpp':
# Used by MakeDescriptor in cpp mode
import os
import uuid
if api_implementation.Version() == 2:
from protobuf26.pyext import _message
else:
from protobuf26.internal import cpp_message
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
class DescriptorBase(object):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionaility.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from protobuf26 import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
options: (descriptor_pb2.MessageOptions) Protocol message options or None
to use default message options.
oneofs: (list of OneofDescriptor) The list of descriptors for oneof fields
in this message.
oneofs_by_name: (dict str -> OneofDescriptor) Same objects as in |oneofs|,
but indexed by "name" attribute.
file: (FileDescriptor) Reference to file descriptor.
"""
# NOTE(tmarek): The file argument redefining a builtin is nothing we can
# fix right now since we don't know how many clients already rely on the
# name of the argument.
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None): # pylint:disable=redefined-builtin
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self.nested_types = nested_types
for nested_type in nested_types:
nested_type.containing_type = self
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self.oneofs = oneofs if oneofs is not None else []
self.oneofs_by_name = dict((o.name, o) for o in self.oneofs)
for oneof in self.oneofs:
oneof.containing_type = self
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attributes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value: (bool) True if this field has a default value defined,
otherwise false.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
containing_oneof: (OneofDescriptor) If the field is a member of a oneof
union, contains its descriptor. Otherwise, None.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber,
# and kLastReservedNumber in descriptor.h
MAX_FIELD_NUMBER = (1 << 29) - 1
FIRST_RESERVED_FIELD_NUMBER = 19000
LAST_RESERVED_FIELD_NUMBER = 19999
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
has_default_value=True, containing_oneof=None):
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
self.containing_oneof = containing_oneof
if api_implementation.Type() == 'cpp':
if is_extension:
if api_implementation.Version() == 2:
# pylint: disable=protected-access
self._cdescriptor = (
_message.Message._GetExtensionDescriptor(full_name))
# pylint: enable=protected-access
else:
self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name)
else:
if api_implementation.Version() == 2:
# pylint: disable=protected-access
self._cdescriptor = _message.Message._GetFieldDescriptor(full_name)
# pylint: enable=protected-access
else:
self._cdescriptor = cpp_message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = None
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file: (FileDescriptor) Reference to file descriptor.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto: An empty descriptor_pb2.EnumDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class OneofDescriptor(object):
"""Descriptor for a oneof field.
name: (str) Name of the oneof field.
full_name: (str) Full name of the oneof field, including package name.
index: (int) 0-based index giving the order of the oneof field inside
its containing type.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
fields: (list of FieldDescriptor) The list of field descriptors this
oneof can contain.
"""
def __init__(self, name, full_name, index, containing_type, fields):
"""Arguments are as described in the attribute description above."""
self.name = name
self.full_name = full_name
self.index = index
self.containing_type = containing_type
self.fields = fields
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
file: (FileDescriptor) Reference to file info.
"""
def __init__(self, name, full_name, index, methods, options=None, file=None,
serialized_start=None, serialized_end=None):
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end)
self.index = index
self.methods = methods
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
for method in self.methods:
if name == method.name:
return method
return None
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto: An empty descriptor_pb2.ServiceDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
Note that enum_types_by_name, extensions_by_name, and dependencies
fields are only set by the message_factory module, and not by the
generated proto code.
name: name of file, relative to root of source tree.
package: name of the package
serialized_pb: (str) Byte string of serialized
descriptor_pb2.FileDescriptorProto.
dependencies: List of other FileDescriptors this FileDescriptor depends on.
message_types_by_name: Dict of message names of their descriptors.
enum_types_by_name: Dict of enum names and their descriptors.
extensions_by_name: Dict of extension names and their descriptors.
"""
def __init__(self, name, package, options=None, serialized_pb=None,
dependencies=None):
"""Constructor."""
super(FileDescriptor, self).__init__(options, 'FileOptions')
self.message_types_by_name = {}
self.name = name
self.package = package
self.serialized_pb = serialized_pb
self.enum_types_by_name = {}
self.extensions_by_name = {}
self.dependencies = (dependencies or [])
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
if api_implementation.Version() == 2:
# pylint: disable=protected-access
_message.Message._BuildFile(self.serialized_pb)
# pylint: enable=protected-access
else:
cpp_message.BuildFile(self.serialized_pb)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
Returns:
A Descriptor for protobuf messages.
"""
if api_implementation.Type() == 'cpp' and build_file_if_cpp:
# The C++ implementation requires all descriptors to be backed by the same
# definition in the C++ descriptor pool. To do this, we build a
# FileDescriptorProto with the same definition as this descriptor and build
# it into the pool.
from protobuf26 import descriptor_pb2
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
# Generate a random name for this proto file to prevent conflicts with
# any imported ones. We need to specify a file name so BuildFile accepts
# our FileDescriptorProto, but it is not important what that file name
# is actually set to.
proto_name = str(uuid.uuid4())
if package:
file_descriptor_proto.name = os.path.join(package.replace('.', '/'),
proto_name + '.proto')
file_descriptor_proto.package = package
else:
file_descriptor_proto.name = proto_name + '.proto'
if api_implementation.Version() == 2:
# pylint: disable=protected-access
_message.Message._BuildFile(file_descriptor_proto.SerializeToString())
# pylint: enable=protected-access
else:
cpp_message.BuildFile(file_descriptor_proto.SerializeToString())
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join(full_message_name + [enum_proto.name])
enum_desc = EnumDescriptor(
enum_proto.name, full_name, None, [
EnumValueDescriptor(enum_val.name, ii, enum_val.number)
for ii, enum_val in enumerate(enum_proto.value)])
enum_types[full_name] = enum_desc
# Create Descriptors for nested types
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join(full_message_name + [nested_proto.name])
# Nested types are just those defined inside of the message, not all types
# used by fields in the message, so no loops are possible here.
nested_desc = MakeDescriptor(nested_proto,
package='.'.join(full_message_name),
build_file_if_cpp=False)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
enum_desc = None
nested_desc = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join(full_message_name +
[type_name[type_name.rfind('.')+1:]])
if full_type_name in nested_types:
nested_desc = nested_types[full_type_name]
elif full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
# Else type_name references a non-local type, which isn't implemented
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, nested_desc, enum_desc, None, False, None,
has_default_value=False)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
nested_types.values(), enum_types.values(), [])
|
|
"""A few small utilities."""
import pandas
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Tuple
from functools import reduce
#
# Join shortcuts
#
def join(sep, s):
"""Return 's' joined with 'sep'. Coerces to str."""
return sep.join(str(x) for x in list(s))
def sjoin(s):
"""Return 's' joined with spaces."""
return join(' ', [str(x) for x in s])
def njoin(s):
"""Return 's' joined with newlines."""
return join('\n', s)
def cjoin(s):
"""Return 's' joined with commas."""
return join(',', s)
def tjoin(s):
"""Return 's' joined with tabs."""
return join('\t', s)
#
# Misc
#
def shape(n, nbatch):
"""Return NumPy shape."""
if isinstance(n, (list, tuple)):
return [nbatch] + list(n)
return [nbatch, n]
def product(xs):
"""Return product of factors."""
return reduce(lambda x, y: x * y, xs, 1)
def flatten(xs):
"""Flatten list of lists to a list."""
return sum(xs, [])
def write_tsv(path, records, meta={}, overwrite=False):
"""Write tab separated file."""
path = Path(path)
dat = []
with open(path, 'a') as f:
if overwrite:
f.truncate(0)
if f.tell() == 0:
if meta is not None:
for k, v in meta.items():
dat.append(f'# {k}: {v}')
dat += [tjoin([str(x) for x in r]) for r in records]
f.write(njoin(dat))
f.write('\n')
def write_csv(path, records, meta={}, overwrite=False):
"""Write commas separated file."""
path = Path(path)
dat = []
with open(path, 'a') as f:
if overwrite:
f.truncate(0)
if meta is not None:
for k, v in meta.items():
dat.append(f'# {k}: {v}')
dat += [cjoin([str(x) for x in r]) for r in records]
f.write(njoin(dat))
#
# DAT files
#
@dataclass
class Sample:
"""Dyna-rider/rider timing sample: list of times for a given length+batch.
This corresponds to a single line of a dat file.
"""
lengths: List[int]
nbatch: int
times: List[float]
label: str = None
def __post_init__(self):
self.label = 'x'.join(map(str, self.lengths)) + 'b' + str(self.nbatch)
@dataclass
class DAT:
"""Dyna-rider/rider DAT.
This corresponds to a single .dat file.
"""
tag: str
path: Path
samples: Dict[Tuple, Sample]
meta: Dict[str, str]
def sorted_samples(self):
keys = sorted(self.samples.keys(), key=lambda x: product(x))
for key in keys:
yield key, product(key), self.samples[key]
def print(self):
print("tag:", self.tag)
print("path:", self.path)
print("meta:", self.meta)
print("samples:", self.samples)
@dataclass
class Run:
"""Dyna-rider/rider runs.
This corresponds to a directory of .dat files.
"""
title: str
path: Path
dats: Dict[Path, DAT]
def write_dat(fname, length, nbatch, seconds, meta={}):
"""Append record to dyna-rider/rider .dat file."""
if isinstance(length, int):
length = [length]
record = [len(length)] + list(length) + [nbatch, len(seconds)] + seconds
write_tsv(fname, [record], meta=meta, overwrite=False)
def read_dat(fname):
"""Read dyna-rider/rider .dat file."""
path = Path(fname)
records, meta = {}, {}
for line in path.read_text().splitlines():
if line.startswith('# '):
k, v = [x.strip() for x in line[2:].split(':', 1)]
meta[k] = v
continue
words = line.split("\t")
dim = int(words[0])
lengths = tuple(map(int, words[1:dim + 1]))
nbatch = int(words[dim + 1])
times = list(map(float, words[dim + 3:]))
records[lengths] = Sample(list(lengths), nbatch, times)
tag = meta['title'].replace(' ', '_')
return DAT(tag, path, records, meta)
def read_run(dname):
"""Read all .dat files in a directory."""
path = Path(dname)
dats = {}
for dat in sorted(path.glob('**/*.dat')):
dats[dat.stem] = read_dat(dat)
return Run(path.stem, path, dats)
def list_run(dname):
"""List all .dat files in a directory."""
path = Path(dname)
return sorted(list(path.glob('*.dat')))
def read_runs(dnames):
"""Read all .dat files in directories."""
return [read_run(dname) for dname in dnames]
def get_post_processed(dname, docdir, outdirs):
"""Return file names of post-processed performance data.
The 'primary' files contain median confidence intervals for each
DAT file.
The 'secondary' files contain XXX.
"""
primary = []
for outdir in outdirs:
path = (Path(outdir) / dname).with_suffix('.mdat')
if path.exists():
primary.append(path)
secondary = []
for outdir in outdirs[1:]:
path = (docdir / (str(outdir.name) + "-over-" + str(outdirs[0].name) + "-" + dname)).with_suffix('.sdat')
if path.exists():
secondary.append(path)
return primary, secondary
def by_dat(runs):
r = {}
for dat in runs[0].dats.values():
dstem = dat.path.stem
r[dstem] = {
run.path: run.dats[dstem] for run in runs if dstem in run.dats
}
return r
def to_data_frames(primaries, secondaries):
data_frames = []
for primary in primaries:
df = pandas.read_csv(primary, delimiter='\t', comment='#')
data_frames.append(df)
for i, secondary in enumerate(secondaries):
df = pandas.read_csv(secondary, delimiter='\t', comment='#')
data_frames[i+1] = data_frames[i+1].merge(df, how='left', on='length', suffixes=('', '_y'))
return data_frames
def write_pts_dat(fname, records, meta={}):
"""Write data to *.ptsdat"""
write_csv(fname, records, meta=meta, overwrite=True)
|
|
# Copyright 2016 Raytheon BBN Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
import os
import sys
if sys.platform == 'win32' or 'NOFORKING' in os.environ:
import threading as mp
from queue import Queue
else:
import multiprocessing as mp
from multiprocessing import Queue
from multiprocessing import Value, RawValue, RawArray
import ctypes
import logging
import numbers
import itertools
import time
import datetime
import numpy as np
from functools import reduce
from auspex.log import logger
def cartesian(arrays, out=None, dtype='f'):
"""http://stackoverflow.com/questions/28684492/numpy-equivalent-of-itertools-product"""
arrays = [np.asarray(x) for x in arrays]
# dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = int(n / arrays[0].size)
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in range(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
class DataAxis(object):
"""An axis in a data stream"""
def __init__(self, name, points=[], unit=None, metadata=None, dtype=np.float32):
super(DataAxis, self).__init__()
if isinstance(name, list):
self.unstructured = True
self.name = name
else:
self.unstructured = False
self.name = str(name)
# self.points holds the CURRENT set of points. During adaptive sweeps
# this will hold the most recently added points of the axis.
self.points = np.array(points)
self.unit = unit
self.refine_func = None
self.metadata = metadata
# By definition data axes will be done after every experiment.run() call
self.done = True
# For adaptive sweeps, etc., keep a record of the original points that we had around
self.original_points = self.points
self.has_been_extended = False
self.num_new_points = 0
self.dtype = dtype
if self.unstructured:
if unit is not None and len(name) != len(unit):
raise ValueError("DataAxis unit length {} and tuples length {} must match.".format(len(unit),len(name)))
if self.unstructured and len(name) != len(points[0]):
raise ValueError("DataAxis points length {} and names length {} must match.".format(len(points[0]), len(name)))
def data_type(self, with_metadata=False):
dtype = []
if self.unstructured:
name = "+".join(self.name)
dtype.extend([(p.name, 'f') for p in self.parameter])
else:
name = self.name
dtype.append((name, 'f'))
if with_metadata and self.metadata is not None:
dtype.append((name + "_metadata", str))
return dtype
def points_with_metadata(self):
if self.metadata is not None:
if self.unstructured:
return [list(self.original_points[i]) + [self.metadata[i]] for i in range(len(self.original_points))]
return [(self.original_points[i], self.metadata[i], ) for i in range(len(self.original_points))]
if self.unstructured:
return [tuple(self.original_points[i]) for i in range(len(self.original_points))]
return [(self.original_points[i],) for i in range(len(self.original_points))]
def tuple_width(self):
if self.unstructured:
width = len(name)
else:
width = 1
if self.metadata:
width += 1
return width
def num_points(self):
if self.has_been_extended:
return len(self.points)
else:
return len(self.original_points)
def add_points(self, points):
if self.unstructured and len(self.parameter) != len(points[0]):
raise ValueError("Parameter value tuples must be the same length as the number of parameters.")
if type(points) in [list, np.ndarray]:
points = np.array(points)
else:
# Somebody gave one point to the "add_points" method...
points = np.array([points])
self.num_new_points = len(points)
self.points = np.append(self.points, points, axis=0)
self.has_been_extended = True
def reset(self):
self.points = self.original_points
self.has_been_extended = False
self.num_new_points = 0
def __repr__(self):
return "<DataAxis(name={}, start={}, stop={}, num={}, unit={})>".format(
self.name, self.points[0], self.points[-1], len(self.points), self.unit)
def __str__(self):
return "<DataAxis(name={}, start={}, stop={}, num={}, unit={})>".format(
self.name, self.points[0], self.points[-1], len(self.points), self.unit)
class SweepAxis(DataAxis):
""" Structure for sweep axis, separate from DataAxis.
Can be an unstructured axis, in which case 'parameter' is actually a list of parameters. """
def __init__(self, parameter, points = [], metadata=None, refine_func=None, callback_func=None):
self.unstructured = hasattr(parameter, '__iter__')
self.parameter = parameter
if self.unstructured:
unit = [p.unit for p in parameter]
super(SweepAxis, self).__init__([p.name for p in parameter], points=points, unit=unit, metadata=metadata)
self.value = points[0]
else:
super(SweepAxis, self).__init__(parameter.name, points, unit=parameter.unit, metadata=metadata)
self.value = points[0]
# Current value of the metadata
if self.metadata is not None:
self.metadata_value = self.metadata[0]
# This is run at the end of this sweep axis
# Refine_func receives the sweep axis and the experiment as arguments
self.refine_func = refine_func
# This is run before each point in the sweep axis is executed
# Callback_func receives the sweep axis and the experiment as arguments
self.callback_func = callback_func
self.step = 0
self.done = False
self.experiment = None # Should be explicitly set by the experiment
if self.unstructured and len(parameter) != len(points[0]):
raise ValueError("Parameter value tuples must be the same length as the number of parameters.")
logger.debug("Created {}".format(self.__repr__()))
def update(self):
""" Update value after each run.
"""
if self.step < self.num_points():
if self.callback_func:
self.callback_func(self, self.experiment)
self.value = self.points[self.step]
if self.metadata is not None:
self.metadata_value = self.metadata[self.step]
logger.debug("Sweep Axis '{}' at step {} takes value: {}.".format(self.name,
self.step,self.value))
self.push()
self.step += 1
self.done = False
def check_for_refinement(self, output_connectors_dict):
"""Check to see if we need to perform any refinements. If there is a refine_func
and it returns a list of points, then we need to extend the axes. Otherwise, if the
refine_func returns None or false, then we reset the axis to its original set of points. If
there is no refine_func then we don't do anything at all."""
if not self.done and self.step==self.num_points():
logger.debug("Refining on axis {}".format(self.name))
if self.refine_func:
points = self.refine_func(self, self.experiment)
if points is None or points is False:
# Returns false if no refinements needed, otherwise adds points to list
self.step = 0
self.done = True
self.reset()
logger.debug("Sweep Axis '{}' complete.".format(self.name))
# Push to ocs, which should push to processes
for oc in output_connectors_dict.values():
oc.push_event("refined", (self.name, True, self.original_points)) # axis name, reset, points
return False
self.add_points(points)
self.done = False
for oc in output_connectors_dict.values():
oc.push_event("refined", (self.name, False, points)) # axis name, reset, points
return True
else:
self.step = 0
self.done = True
logger.debug("Sweep Axis '{}' complete.".format(self.name))
return False
def push(self):
""" Push parameter value(s) """
if self.unstructured:
for p, v in zip(self.parameter, self.value):
p.value = v
p.push()
else:
self.parameter.value = self.value
self.parameter.push()
def __repr__(self):
return "<SweepAxis(name={},length={},unit={},value={},unstructured={}>".format(self.name,
self.num_points(),self.unit,self.value,self.unstructured)
class DataStreamDescriptor(object):
"""Axes information"""
def __init__(self, dtype=np.float32):
super(DataStreamDescriptor, self).__init__()
self.data_name = "Data"
self.data_unit = "Unit"
self.axes = []
self.unit = None
self.params = {} # Parameters associated with each dataset
self.parent = None
self._exp_src = None # Actual source code from the underlying experiment
self.dtype = dtype
self.metadata = {}
# Buffer size multiplier: use this to inflate the size of the
# shared memory buffer. This is needed for partial averages, which
# may require more space than their descriptors would indicate
# since they are emitted as often as possible.
self.buffer_mult_factor = 1
# Keep track of the parameter permutations we have actually used...
self.visited_tuples = []
def is_adaptive(self):
return True in [a.refine_func is not None for a in self.axes]
def add_axis(self, axis, position=0):
# Check if axis is DataAxis or SweepAxis (which inherits from DataAxis)
if isinstance(axis, DataAxis):
logger.debug("Adding DataAxis into DataStreamDescriptor: {}".format(axis))
self.axes.insert(position, axis)
else:
raise TypeError("Failed adding axis. Object is not DataAxis: {}".format(axis))
def add_param(self, key, value):
self.params[key] = value
def num_dims(self):
# Number of axes
return len(self.axes)
def extent(self, flip=False):
"""Convenience function for matplotlib.imshow, which expects extent=(left, right, bottom, top)."""
if self.num_dims() == 2:
return (self.axes[1].points[0], self.axes[1].points[-1], self.axes[0].points[0], self.axes[0].points[-1])
else:
raise Exception("Can't get extent for any number of axes other than two.")
def data_dims(self):
# Return dimension (length) of the data axes, exclude sweep axes (return 1 for each)
dims = []
for a in self.axes:
if isinstance(a, SweepAxis):
dims.append(1)
else:
dims.append(len(a.points))
return dims
def tuple_width(self):
return sum([a.tuple_width() for a in self.axes])
def dims(self):
dims = []
for a in self.axes:
dims.append(len(a.points))
return [a.num_points() for a in self.axes]
def axes_done(self):
# The axis is considered done when all of the sub-axes are done
# This can happen mulitple times for a single axis
doneness = [a.done for a in self.axes]
return [np.all(doneness[i:]) for i in range(len(doneness))]
def done(self):
return np.all([a.done for a in self.axes])
def num_points(self):
if len(self.axes)>0:
return reduce(lambda x,y: x*y, [a.num_points() for a in self.axes])
else:
return 0
def expected_num_points(self):
if len(self.axes)>0:
return reduce(lambda x,y: x*y, [len(a.original_points) for a in self.axes])
else:
return 0
def last_data_axis(self):
# Return the outer most data axis but not sweep axis
data_axes_idx = [i for i, a in enumerate(self.axes) if not isinstance(a,SweepAxis)]
if len(data_axes_idx)>0:
return data_axes_idx[0]
else:
logger.warning("DataStreamDescriptor has no pure DataAxis. Return None.")
return None
def axis_data_type(self, with_metadata=False, excluding_axis=None):
dtype = []
for a in self.axes:
if a.name != excluding_axis:
dtype.extend(a.data_type(with_metadata=with_metadata))
return dtype
def tuples(self, as_structured_array=True):
"""Returns a list of all tuples visited by the sweeper. Should only
be used with adaptive sweeps."""
if len(self.visited_tuples) == 0:
self.visited_tuples = self.expected_tuples(with_metadata=True)
if as_structured_array:
# If we already have a structured array
if type(self.visited_tuples) is np.ndarray and type(self.visited_tuples.dtype.names) is tuple:
return self.visited_tuples
elif type(self.visited_tuples) is np.ndarray:
return np.rec.fromarrays(self.visited_tuples.T, dtype=self.axis_data_type(with_metadata=True))
return np.core.records.fromrecords(self.visited_tuples, dtype=self.axis_data_type(with_metadata=True))
return self.visited_tuples
def expected_tuples(self, with_metadata=False, as_structured_array=True):
"""Returns a list of tuples representing the cartesian product of the axis values. Should only
be used with non-adaptive sweeps."""
vals = [a.points_with_metadata() for a in self.axes]
#
# TODO: avoid this slow list comprehension
simple = True
if True in [a.unstructured for a in self.axes]:
simple = False
if True in [a.metadata is not None for a in self.axes]:
simple = False
if self.axes == []:
simple = False
if simple:
# flattened_list = [tuple((val for sublist in line for val in sublist)) for line in nested_list]
flattened_list = cartesian(vals)
else:
nested_list = itertools.product(*vals)
flattened_list = [tuple((val for sublist in line for val in sublist)) for line in nested_list]
# flattened_list = np.array(list(nested_list)).reshape(-1, self.tuple_width())
if as_structured_array:
if simple:
return np.rec.fromarrays(flattened_list.T, dtype=self.axis_data_type(with_metadata=True))
return np.rec.fromrecords(flattened_list, dtype=self.axis_data_type(with_metadata=True))
return flattened_list
def axis_names(self, with_metadata=False):
"""Returns all axis names included those from unstructured axes"""
vals = []
for a in self.axes:
if a.unstructured:
for p in a.parameter:
vals.append(p.name)
else:
vals.append(a.name)
if with_metadata and a.metadata is not None:
if a.unstructured:
vals.append("+".join(a.name) + "_metadata")
else:
vals.append(a.name + "_metadata")
return vals
def num_data_axis_points(self):
return self.num_points_through_axis(self.last_data_axis())
def data_axis_values(self):
"""Returns a list of point lists for each data axis, ignoring sweep axes."""
return [a.points_with_metadata() for a in self.axes if not isinstance(a,SweepAxis) ]
def reset(self):
for a in self.axes:
a.done = False
a.reset()
def __copy__(self):
newone = type(self)()
newone.__dict__.update(self.__dict__)
newone.axes = self.axes[:]
return newone
def copy(self):
return self.__copy__()
def axis(self, axis_name):
return self.axes[self.axis_num(axis_name)]
def axis_num(self, axis_name):
names = [a.name for a in self.axes]
return names.index(axis_name)
def pop_axis(self, axis_name):
# Pop the time axis (which should be here)
names = [a.name for a in self.axes]
if axis_name not in names:
raise Exception("Couldn't pop axis {} from descriptor, it probably doesn't exist.".format(axis_name))
return self.axes.pop(names.index(axis_name))
def num_points_through_axis(self, axis_name):
if type(axis_name) is int:
axis_num = axis_name
else:
axis_num = self.axis_num(axis_name)
# if False in [a.refine_func is None for a in self.axes[axis_num:]]:
# raise Exception("Cannot call num_points_through_axis with interior adaptive sweeps.")
if axis_num >= len(self.axes):
return 0
elif len(self.axes) == 1:
return self.axes[0].num_points()
else:
return reduce(lambda x,y: x*y, [a.num_points() for a in self.axes[axis_num:]])
def num_new_points_through_axis(self, axis_name):
if type(axis_name) is int:
axis_num = axis_name
else:
axis_num = self.axis_num(axis_name)
if axis_num >= len(self.axes):
return 0
elif len(self.axes) == 1:
return self.axes[0].num_new_points
else:
return self.axes[axis_num].num_new_points * reduce(lambda x,y: x*y, [a.num_points() for a in self.axes[axis_num+1:]])
def __repr__(self):
return "<DataStreamDescriptor(num_dims={}, num_points={})>".format(
self.num_dims(), self.num_points())
def __getitem__(self, axis_name):
return self.axis(axis_name).points
def _ipython_key_completions_(self):
return [a.name for a in self.axes]
class DataStream(object):
"""A stream of data"""
def __init__(self, name=None, unit=None):
super(DataStream, self).__init__()
self.queue = Queue()
self.name = name
self.unit = unit
self.points_taken_lock = mp.Lock()
self.points_taken = Value('i', 0) # Using shared memory since these are used in filter processes
self.descriptor = None
self.start_connector = None
self.end_connector = None
self.closed = False
# Shared memory interface
self.buffer_lock = mp.Lock()
# self.buffer_size = 500000
self.buff_idx = Value('i', 0)
def final_init(self):
self.buffer_size = self.descriptor.num_points()*self.descriptor.buffer_mult_factor
# logger.info(f"{self.start_connector.parent}:{self.start_connector} to {self.end_connector.parent}:{self.end_connector} buffer of size {self.buffer_size}")
if self.buffer_size > 50e6:
logger.debug(f"Limiting buffer size of {self} to 50 Million Points")
self.buffer_size = 50e6
self.buff_shared_re = RawArray(ctypes.c_double, int(self.buffer_size))
self.buff_shared_im = RawArray(ctypes.c_double, int(self.buffer_size))
self.re_np = np.frombuffer(self.buff_shared_re, dtype=np.float64)
self.im_np = np.frombuffer(self.buff_shared_im, dtype=np.float64)
def set_descriptor(self, descriptor):
if isinstance(descriptor,DataStreamDescriptor):
logger.debug("Setting descriptor on stream '%s' to '%s'", self.name, descriptor)
self.descriptor = descriptor
else:
raise TypeError("Failed setting descriptor. Object is not DataStreamDescriptor: {}".format(descriptor))
def num_points(self):
if self.descriptor is not None:
return self.descriptor.num_points()
else:
logger.warning("Stream '{}' has no descriptor. Function num_points() returns 0.".format(self.name))
return 0
def done(self):
with self.points_taken_lock:
return self.points_taken.value >= self.num_points()
def percent_complete(self):
if (self.descriptor is not None) and self.num_points()>0:
with self.points_taken_lock:
return 100.0*self.points_taken.value/self.num_points()
else:
return 0.0
def reset(self):
self.descriptor.reset()
with self.points_taken_lock:
self.points_taken.value = 0
while not self.queue.empty():
self.queue.get_nowait()
if self.start_connector is not None:
self.start_connector.points_taken.value = 0
def __repr__(self):
return "<DataStream(name={}, completion={}%, descriptor={})>".format(
self.name, self.percent_complete(), self.descriptor)
def push(self, data):
if self.closed:
raise Exception("The queue is closed and should not be receiving any more data")
with self.points_taken_lock:
if hasattr(data, 'size'):
self.points_taken.value += data.size
else:
try:
self.points_taken.value += len(data)
except:
try:
junk = data + 1.0
self.points_taken.value += 1
except:
raise ValueError("Got data {} that is neither an array nor a float".format(data))
with self.buffer_lock:
start = self.buff_idx.value
re = np.real(np.array(data)).flatten()
if start+re.size > self.re_np.size:
raise ValueError(f"Stream {self} received more data than fits in the shared memory buffer. \
This is probably due to digitizer raw streams producing data too quickly for the pipeline.")
self.re_np[start:start+re.size] = re
if np.issubdtype(self.descriptor.dtype, np.complexfloating):
im = np.imag(data).flatten()
self.im_np[start:start+im.size] = im
message = {"type": "data", "data": None}
self.buff_idx.value = start + np.array(data).size
self.queue.put(message)
def pop(self):
result = None
with self.buffer_lock:
idx = self.buff_idx.value
if idx != 0:
result = self.re_np[:idx]
if np.issubdtype(self.descriptor.dtype, np.complexfloating):
result = result.astype(np.complex128) + 1.0j*self.im_np[:idx]
self.buff_idx.value = 0
result = result.copy()
return result
def push_event(self, event_type, data=None):
if self.closed:
raise Exception("The queue is closed and should not be receiving any more data")
message = {"type": "event", "event_type": event_type, "data": data}
self.queue.put(message)
if event_type == "done":
logger.debug(f"Closing out queue {self}")
self.queue.close()
self.closed = True
# These connectors are where we attached the DataStreams
class InputConnector(object):
def __init__(self, name="", parent=None, datatype=None, max_input_streams=1):
self.name = name
self.stream = None
self.max_input_streams = max_input_streams
self.num_input_streams = 0
self.input_streams = []
self.descriptor = None
self.parent = parent
def add_input_stream(self, stream):
logger.debug("Adding input stream '%s' to input connector %s.", stream, self)
if self.num_input_streams < self.max_input_streams:
self.input_streams.append(stream)
self.num_input_streams += 1
stream.end_connector = self
else:
raise ValueError("Reached maximum number of input connectors. Could not add another input stream to the connector.")
def done(self):
return all([stream.done() for stream in self.input_streams])
def num_points(self):
if len(self.input_streams) > 0:
return self.input_streams[0].num_points()
else:
raise ValueError("Cannot get num_points since no input streams are present on this connector.")
def update_descriptors(self):
logger.debug("Starting descriptor update in input connector %s.", self.name)
self.descriptor = self.input_streams[0].descriptor
self.parent.update_descriptors()
def __repr__(self):
return "<InputConnector(name={})>".format(self.name)
class OutputConnector(object):
def __init__(self, name="", data_name=None, unit=None, parent=None, dtype=np.float32):
self.name = name
self.output_streams = []
self.parent = parent
self.unit = unit
self.points_taken_lock = mp.Lock()
self.points_taken = Value('i', 0) # Using shared memory since these are used in filter processes
# if data_name is not none, then it is the origin of the whole chain
self.data_name = data_name
self.data_unit = unit
# Set up a default descriptor, and add access
# to its methods for convenience.
self.descriptor = DataStreamDescriptor(dtype=dtype)
if self.data_name:
self.descriptor.data_name = self.data_name
self.descriptor.unit = self.unit
self.add_axis = self.descriptor.add_axis
# Determine whether we need to deal with adaptive sweeps
self.has_adaptive_sweeps = False
def __len__(self):
with self.points_taken_lock:
return self.points_taken.value
# We allow the connectors itself to posess
# a descriptor, that it may pass
def set_descriptor(self, descriptor):
self.descriptor = descriptor
def add_output_stream(self, stream):
logger.debug("Adding output stream '%s' to output connector %s.", stream, self)
self.output_streams.append(stream)
stream.start_connector = self
def update_descriptors(self):
logger.debug("Starting descriptor update in output connector %s, where the descriptor is %s",
self.name, self.descriptor)
for stream in self.output_streams:
logger.debug("\tnow setting stream %s to %s", stream, self.descriptor)
stream.set_descriptor(self.descriptor)
logger.debug("\tnow setting stream end connector %s to %s", stream.end_connector, self.descriptor)
stream.end_connector.update_descriptors()
def num_points(self):
return self.descriptor.num_points()
def done(self):
return all([stream.done() for stream in self.output_streams])
def push(self, data):
with self.points_taken_lock:
if hasattr(data, 'size'):
self.points_taken.value += data.size
elif isinstance(data, numbers.Number):
self.points_taken.value += 1
else:
self.points_taken.value += len(data)
for stream in self.output_streams:
stream.push(data)
def push_event(self, event_type, data=None):
for stream in self.output_streams:
stream.push_event(event_type, data)
def __repr__(self):
return "<OutputConnector(name={})>".format(self.name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.