gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Support for an interface to work with a remote instance of Home Assistant.
If a connection error occurs while communicating with the API a
HomeAssistantError will be raised.
For more details about the Python API, please refer to the documentation at
https://home-assistant.io/developers/python_api/
"""
from datetime import datetime
import enum
import json
import logging
import threading
import urllib.parse
import requests
import blumate.bootstrap as bootstrap
import blumate.core as bm
from blumate.const import (
HTTP_HEADER_HA_AUTH, SERVER_PORT, URL_API, URL_API_EVENT_FORWARD,
URL_API_EVENTS, URL_API_EVENTS_EVENT, URL_API_SERVICES,
URL_API_SERVICES_SERVICE, URL_API_STATES, URL_API_STATES_ENTITY,
HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_JSON)
from blumate.exceptions import BluMateError
METHOD_GET = "get"
METHOD_POST = "post"
METHOD_DELETE = "delete"
_LOGGER = logging.getLogger(__name__)
class APIStatus(enum.Enum):
"""Represent API status."""
# pylint: disable=no-init,invalid-name,too-few-public-methods
OK = "ok"
INVALID_PASSWORD = "invalid_password"
CANNOT_CONNECT = "cannot_connect"
UNKNOWN = "unknown"
def __str__(self):
"""Return the state."""
return self.value
class API(object):
"""Object to pass around Home Assistant API location and credentials."""
# pylint: disable=too-few-public-methods
def __init__(self, host, api_password=None, port=None, use_ssl=False):
"""Initalize the API."""
self.host = host
self.port = port or SERVER_PORT
self.api_password = api_password
if use_ssl:
self.base_url = "https://{}:{}".format(host, self.port)
else:
self.base_url = "http://{}:{}".format(host, self.port)
self.status = None
self._headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_JSON,
}
if api_password is not None:
self._headers[HTTP_HEADER_HA_AUTH] = api_password
def validate_api(self, force_validate=False):
"""Test if we can communicate with the API."""
if self.status is None or force_validate:
self.status = validate_api(self)
return self.status == APIStatus.OK
def __call__(self, method, path, data=None):
"""Make a call to the Home Assistant API."""
if data is not None:
data = json.dumps(data, cls=JSONEncoder)
url = urllib.parse.urljoin(self.base_url, path)
try:
if method == METHOD_GET:
return requests.get(
url, params=data, timeout=5, headers=self._headers)
else:
return requests.request(
method, url, data=data, timeout=5, headers=self._headers)
except requests.exceptions.ConnectionError:
_LOGGER.exception("Error connecting to server")
raise BluMateError("Error connecting to server")
except requests.exceptions.Timeout:
error = "Timeout when talking to {}".format(self.host)
_LOGGER.exception(error)
raise BluMateError(error)
def __repr__(self):
"""Return the representation of the API."""
return "API({}, {}, {})".format(
self.host, self.api_password, self.port)
class BluMate(bm.BluMate):
"""Home Assistant that forwards work."""
# pylint: disable=super-init-not-called,too-many-instance-attributes
def __init__(self, remote_api, local_api=None):
"""Initalize the forward instance."""
if not remote_api.validate_api():
raise BluMateError(
"Remote API at {}:{} not valid: {}".format(
remote_api.host, remote_api.port, remote_api.status))
self.remote_api = remote_api
self.pool = pool = bm.create_worker_pool()
self.bus = EventBus(remote_api, pool)
self.services = bm.ServiceRegistry(self.bus, pool)
self.states = StateMachine(self.bus, self.remote_api)
self.config = bm.Config()
self.config.api = local_api
def start(self):
"""Start the instance."""
# Ensure a local API exists to connect with remote
if 'api' not in self.config.components:
if not bootstrap.setup_component(self, 'api'):
raise BluMateError(
'Unable to setup local API to receive events')
bm.create_timer(self)
self.bus.fire(bm.EVENT_BLUMATE_START,
origin=bm.EventOrigin.remote)
# Give eventlet time to startup
import eventlet
eventlet.sleep(0.1)
# Setup that events from remote_api get forwarded to local_api
# Do this after we fire START, otherwise HTTP is not started
if not connect_remote_events(self.remote_api, self.config.api):
raise BluMateError((
'Could not setup event forwarding from api {} to '
'local api {}').format(self.remote_api, self.config.api))
def stop(self):
"""Stop Home Assistant and shuts down all threads."""
_LOGGER.info("Stopping")
self.bus.fire(bm.EVENT_BLUMATE_STOP,
origin=bm.EventOrigin.remote)
self.pool.stop()
# Disconnect master event forwarding
disconnect_remote_events(self.remote_api, self.config.api)
class EventBus(bm.EventBus):
"""EventBus implementation that forwards fire_event to remote API."""
# pylint: disable=too-few-public-methods
def __init__(self, api, pool=None):
"""Initalize the eventbus."""
super().__init__(pool)
self._api = api
def fire(self, event_type, event_data=None, origin=bm.EventOrigin.local):
"""Forward local events to remote target.
Handles remote event as usual.
"""
# All local events that are not TIME_CHANGED are forwarded to API
if origin == bm.EventOrigin.local and \
event_type != bm.EVENT_TIME_CHANGED:
fire_event(self._api, event_type, event_data)
else:
super().fire(event_type, event_data, origin)
class EventForwarder(object):
"""Listens for events and forwards to specified APIs."""
def __init__(self, hass, restrict_origin=None):
"""Initalize the event forwarder."""
self.hass = hass
self.restrict_origin = restrict_origin
# We use a tuple (host, port) as key to ensure
# that we do not forward to the same host twice
self._targets = {}
self._lock = threading.Lock()
def connect(self, api):
"""Attach to a Home Assistant instance and forward events.
Will overwrite old target if one exists with same host/port.
"""
with self._lock:
if len(self._targets) == 0:
# First target we get, setup listener for events
self.hass.bus.listen(bm.MATCH_ALL, self._event_listener)
key = (api.host, api.port)
self._targets[key] = api
def disconnect(self, api):
"""Remove target from being forwarded to."""
with self._lock:
key = (api.host, api.port)
did_remove = self._targets.pop(key, None) is None
if len(self._targets) == 0:
# Remove event listener if no forwarding targets present
self.hass.bus.remove_listener(bm.MATCH_ALL,
self._event_listener)
return did_remove
def _event_listener(self, event):
"""Listen and forward all events."""
with self._lock:
# We don't forward time events or, if enabled, non-local events
if event.event_type == bm.EVENT_TIME_CHANGED or \
(self.restrict_origin and event.origin != self.restrict_origin):
return
for api in self._targets.values():
fire_event(api, event.event_type, event.data)
class StateMachine(bm.StateMachine):
"""Fire set events to an API. Uses state_change events to track states."""
def __init__(self, bus, api):
"""Initalize the statemachine."""
super().__init__(None)
self._api = api
self.mirror()
bus.listen(bm.EVENT_STATE_CHANGED, self._state_changed_listener)
def remove(self, entity_id):
"""Remove the state of an entity.
Returns boolean to indicate if an entity was removed.
"""
return remove_state(self._api, entity_id)
def set(self, entity_id, new_state, attributes=None):
"""Call set_state on remote API."""
set_state(self._api, entity_id, new_state, attributes)
def mirror(self):
"""Discard current data and mirrors the remote state machine."""
self._states = {state.entity_id: state for state
in get_states(self._api)}
def _state_changed_listener(self, event):
"""Listen for state changed events and applies them."""
if event.data['new_state'] is None:
self._states.pop(event.data['entity_id'], None)
else:
self._states[event.data['entity_id']] = event.data['new_state']
class JSONEncoder(json.JSONEncoder):
"""JSONEncoder that supports Home Assistant objects."""
# pylint: disable=too-few-public-methods,method-hidden
def default(self, obj):
"""Convert Home Assistant objects.
Hand other objects to the original method.
"""
if isinstance(obj, datetime):
return obj.isoformat()
elif hasattr(obj, 'as_dict'):
return obj.as_dict()
try:
return json.JSONEncoder.default(self, obj)
except TypeError:
# If the JSON serializer couldn't serialize it
# it might be a generator, convert it to a list
try:
return [self.default(child_obj)
for child_obj in obj]
except TypeError:
# Ok, we're lost, cause the original error
return json.JSONEncoder.default(self, obj)
def validate_api(api):
"""Make a call to validate API."""
try:
req = api(METHOD_GET, URL_API)
if req.status_code == 200:
return APIStatus.OK
elif req.status_code == 401:
return APIStatus.INVALID_PASSWORD
else:
return APIStatus.UNKNOWN
except BluMateError:
return APIStatus.CANNOT_CONNECT
def connect_remote_events(from_api, to_api):
"""Setup from_api to forward all events to to_api."""
data = {
'host': to_api.host,
'api_password': to_api.api_password,
'port': to_api.port
}
try:
req = from_api(METHOD_POST, URL_API_EVENT_FORWARD, data)
if req.status_code == 200:
return True
else:
_LOGGER.error(
"Error setting up event forwarding: %s - %s",
req.status_code, req.text)
return False
except BluMateError:
_LOGGER.exception("Error setting up event forwarding")
return False
def disconnect_remote_events(from_api, to_api):
"""Disconnect forwarding events from from_api to to_api."""
data = {
'host': to_api.host,
'port': to_api.port
}
try:
req = from_api(METHOD_DELETE, URL_API_EVENT_FORWARD, data)
if req.status_code == 200:
return True
else:
_LOGGER.error(
"Error removing event forwarding: %s - %s",
req.status_code, req.text)
return False
except BluMateError:
_LOGGER.exception("Error removing an event forwarder")
return False
def get_event_listeners(api):
"""List of events that is being listened for."""
try:
req = api(METHOD_GET, URL_API_EVENTS)
return req.json() if req.status_code == 200 else {}
except (BluMateError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Unexpected result retrieving event listeners")
return {}
def fire_event(api, event_type, data=None):
"""Fire an event at remote API."""
try:
req = api(METHOD_POST, URL_API_EVENTS_EVENT.format(event_type), data)
if req.status_code != 200:
_LOGGER.error("Error firing event: %d - %s",
req.status_code, req.text)
except BluMateError:
_LOGGER.exception("Error firing event")
def get_state(api, entity_id):
"""Query given API for state of entity_id."""
try:
req = api(METHOD_GET, URL_API_STATES_ENTITY.format(entity_id))
# req.status_code == 422 if entity does not exist
return bm.State.from_dict(req.json()) \
if req.status_code == 200 else None
except (BluMateError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Error fetching state")
return None
def get_states(api):
"""Query given API for all states."""
try:
req = api(METHOD_GET,
URL_API_STATES)
return [bm.State.from_dict(item) for
item in req.json()]
except (BluMateError, ValueError, AttributeError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Error fetching states")
return []
def remove_state(api, entity_id):
"""Call API to remove state for entity_id.
Return True if entity is gone (removed/never existed).
"""
try:
req = api(METHOD_DELETE, URL_API_STATES_ENTITY.format(entity_id))
if req.status_code in (200, 404):
return True
_LOGGER.error("Error removing state: %d - %s",
req.status_code, req.text)
return False
except BluMateError:
_LOGGER.exception("Error removing state")
return False
def set_state(api, entity_id, new_state, attributes=None):
"""Tell API to update state for entity_id.
Return True if success.
"""
attributes = attributes or {}
data = {'state': new_state,
'attributes': attributes}
try:
req = api(METHOD_POST,
URL_API_STATES_ENTITY.format(entity_id),
data)
if req.status_code not in (200, 201):
_LOGGER.error("Error changing state: %d - %s",
req.status_code, req.text)
return False
else:
return True
except BluMateError:
_LOGGER.exception("Error setting state")
return False
def is_state(api, entity_id, state):
"""Query API to see if entity_id is specified state."""
cur_state = get_state(api, entity_id)
return cur_state and cur_state.state == state
def get_services(api):
"""Return a list of dicts.
Each dict has a string "domain" and a list of strings "services".
"""
try:
req = api(METHOD_GET, URL_API_SERVICES)
return req.json() if req.status_code == 200 else {}
except (BluMateError, ValueError):
# ValueError if req.json() can't parse the json
_LOGGER.exception("Got unexpected services result")
return {}
def call_service(api, domain, service, service_data=None):
"""Call a service at the remote API."""
try:
req = api(METHOD_POST,
URL_API_SERVICES_SERVICE.format(domain, service),
service_data)
if req.status_code != 200:
_LOGGER.error("Error calling service: %d - %s",
req.status_code, req.text)
except BluMateError:
_LOGGER.exception("Error calling service")
|
|
# -*- coding: utf-8 -*-
"""
.. _disc-stats:
=====================
Statistical inference
=====================
Here we will briefly cover multiple concepts of inferential statistics in an
introductory manner, and demonstrate how to use some MNE statistical functions.
.. contents:: Topics
:local:
:depth: 3
"""
# Authors: Eric Larson <[email protected]>
# License: BSD (3-clause)
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa, analysis:ignore
import mne
from mne.stats import (ttest_1samp_no_p, bonferroni_correction, fdr_correction,
permutation_t_test, permutation_cluster_1samp_test)
print(__doc__)
###############################################################################
# Hypothesis testing
# ------------------
# Null hypothesis
# ^^^^^^^^^^^^^^^
# From `Wikipedia <https://en.wikipedia.org/wiki/Null_hypothesis>`__:
#
# In inferential statistics, a general statement or default position that
# there is no relationship between two measured phenomena, or no
# association among groups.
#
# We typically want to reject a **null hypothesis** with
# some probability (e.g., p < 0.05). This probability is also called the
# significance level :math:`\alpha`.
# To think about what this means, let's follow the illustrative example from
# [1]_ and construct a toy dataset consisting of a 40 x 40 square with a
# "signal" present in the center with white noise added and a Gaussian
# smoothing kernel applied.
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
n_permutations = 'all' # run an exact test
n_src = width * width
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(2)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
###############################################################################
# The data averaged over all subjects looks like this:
fig, ax = plt.subplots()
ax.imshow(X.mean(0), cmap='inferno')
ax.set(xticks=[], yticks=[], title="Data averaged over subjects")
###############################################################################
# In this case, a null hypothesis we could test for each voxel is:
#
# There is no difference between the mean value and zero
# (:math:`H_0 \colon \mu = 0`).
#
# The alternative hypothesis, then, is that the voxel has a non-zero mean
# (:math:`H_1 \colon \mu \neq 0`).
# This is a *two-tailed* test because the mean could be less than
# or greater than zero, whereas a *one-tailed* test would test only one of
# these possibilities, i.e. :math:`H_1 \colon \mu \geq 0` or
# :math:`H_1 \colon \mu \leq 0`.
#
# .. note:: Here we will refer to each spatial location as a "voxel".
# In general, though, it could be any sort of data value,
# including cortical vertex at a specific time, pixel in a
# time-frequency decomposition, etc.
#
# Parametric tests
# ^^^^^^^^^^^^^^^^
# Let's start with a **paired t-test**, which is a standard test
# for differences in paired samples. Mathematically, it is equivalent
# to a 1-sample t-test on the difference between the samples in each condition.
# The paired t-test is **parametric**
# because it assumes that the underlying sample distribution is Gaussian, and
# is only valid in this case. This happens to be satisfied by our toy dataset,
# but is not always satisfied for neuroimaging data.
#
# In the context of our toy dataset, which has many voxels
# (:math:`40 \cdot 40 = 1600`), applying the paired t-test is called a
# *mass-univariate* approach as it treats each voxel independently.
titles = ['t']
out = stats.ttest_1samp(X, 0, axis=0)
ts = [out[0]]
ps = [out[1]]
mccs = [False] # these are not multiple-comparisons corrected
def plot_t_p(t, p, title, mcc, axes=None):
if axes is None:
fig = plt.figure(figsize=(6, 3))
axes = [fig.add_subplot(121, projection='3d'), fig.add_subplot(122)]
show = True
else:
show = False
p_lims = [0.1, 0.001]
t_lims = -stats.distributions.t.ppf(p_lims, n_subjects - 1)
p_lims = [-np.log10(p) for p in p_lims]
# t plot
x, y = np.mgrid[0:width, 0:width]
surf = axes[0].plot_surface(x, y, np.reshape(t, (width, width)),
rstride=1, cstride=1, linewidth=0,
vmin=t_lims[0], vmax=t_lims[1], cmap='viridis')
axes[0].set(xticks=[], yticks=[], zticks=[],
xlim=[0, width - 1], ylim=[0, width - 1])
axes[0].view_init(30, 15)
cbar = plt.colorbar(ax=axes[0], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=surf)
cbar.set_ticks(t_lims)
cbar.set_ticklabels(['%0.1f' % t_lim for t_lim in t_lims])
cbar.set_label('t-value')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if not show:
axes[0].set(title=title)
if mcc:
axes[0].title.set_weight('bold')
# p plot
use_p = -np.log10(np.reshape(np.maximum(p, 1e-5), (width, width)))
img = axes[1].imshow(use_p, cmap='inferno', vmin=p_lims[0], vmax=p_lims[1],
interpolation='nearest')
axes[1].set(xticks=[], yticks=[])
cbar = plt.colorbar(ax=axes[1], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=img)
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p_lim for p_lim in p_lims])
cbar.set_label(r'$-\log_{10}(p)$')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if show:
text = fig.suptitle(title)
if mcc:
text.set_weight('bold')
plt.subplots_adjust(0, 0.05, 1, 0.9, wspace=0, hspace=0)
mne.viz.utils.plt_show()
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# The "hat" technique regularizes the variance values used in the t-test
# calculation [1]_ to compensate for implausibly small variances.
ts.append(ttest_1samp_no_p(X, sigma=sigma))
ps.append(stats.distributions.t.sf(np.abs(ts[-1]), len(X) - 1) * 2)
titles.append(r'$\mathrm{t_{hat}}$')
mccs.append(False)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Non-parametric tests
# ^^^^^^^^^^^^^^^^^^^^
# Instead of assuming an underlying Gaussian distribution, we could instead
# use a **non-parametric resampling** method. In the case of a paired t-test
# between two conditions A and B, which is mathematically equivalent to a
# one-sample t-test between the difference in the conditions A-B, under the
# null hypothesis we have the principle of **exchangeability**. This means
# that, if the null is true, we can exchange conditions and not change
# the distribution of the test statistic.
#
# When using a paired t-test, exchangeability thus means that we can flip the
# signs of the difference between A and B. Therefore, we can construct the
# **null distribution** values for each voxel by taking random subsets of
# samples (subjects), flipping the sign of their difference, and recording the
# absolute value of the resulting statistic (we record the absolute value
# because we conduct a two-tailed test). The absolute value of the statistic
# evaluated on the veridical data can then be compared to this distribution,
# and the p-value is simply the proportion of null distribution values that
# are smaller.
#
# .. warning:: In the case of a true one-sample t-test, i.e. analyzing a single
# condition rather than the difference between two conditions,
# it is not clear where/how exchangeability applies; see
# `this FieldTrip discussion <ft_exch_>`_.
#
# In the case where ``n_permutations`` is large enough (or "all") so
# that the complete set of unique resampling exchanges can be done
# (which is :math:`2^{N_{samp}}-1` for a one-tailed and
# :math:`2^{N_{samp}-1}-1` for a two-tailed test, not counting the
# veridical distribution), instead of randomly exchanging conditions
# the null is formed from using all possible exchanges. This is known
# as a permutation test (or exact test).
# Here we have to do a bit of gymnastics to get our function to do
# a permutation test without correcting for multiple comparisons:
X.shape = (n_subjects, n_src) # flatten the array for simplicity
titles.append('Permutation')
ts.append(np.zeros(width * width))
ps.append(np.zeros(width * width))
mccs.append(False)
for ii in range(n_src):
ts[-1][ii], ps[-1][ii] = permutation_t_test(X[:, [ii]], verbose=False)[:2]
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Multiple comparisons
# --------------------
# So far, we have done no correction for multiple comparisons. This is
# potentially problematic for these data because there are
# :math:`40 \cdot 40 = 1600` tests being performed. If we use a threshold
# p < 0.05 for each individual test, we would expect many voxels to be declared
# significant even if there were no true effect. In other words, we would make
# many **type I errors** (adapted from `here <errors_>`_):
#
# .. rst-class:: skinnytable
#
# +----------+--------+------------------+------------------+
# | | Null hypothesis |
# | +------------------+------------------+
# | | True | False |
# +==========+========+==================+==================+
# | | | Type I error | Correct |
# | | Yes | False positive | True positive |
# + Reject +--------+------------------+------------------+
# | | | Correct | Type II error |
# | | No | True Negative | False negative |
# +----------+--------+------------------+------------------+
#
# To see why, consider a standard :math:`\alpha = 0.05`.
# For a single test, our probability of making a type I error is 0.05.
# The probability of making at least one type I error in
# :math:`N_{\mathrm{test}}` independent tests is then given by
# :math:`1 - (1 - \alpha)^{N_{\mathrm{test}}}`:
N = np.arange(1, 80)
alpha = 0.05
p_type_I = 1 - (1 - alpha) ** N
fig, ax = plt.subplots(figsize=(4, 3))
ax.scatter(N, p_type_I, 3)
ax.set(xlim=N[[0, -1]], ylim=[0, 1], xlabel=r'$N_{\mathrm{test}}$',
ylabel=u'Probability of at least\none type I error')
ax.grid(True)
fig.tight_layout()
fig.show()
###############################################################################
# To combat this problem, several methods exist. Typically these
# provide control over either one of the following two measures:
#
# 1. `Familywise error rate (FWER) <fwer_>`_
# The probability of making one or more type I errors:
#
# .. math::
# \mathrm{P}(N_{\mathrm{type\ I}} >= 1 \mid H_0)
#
# 2. `False discovery rate (FDR) <fdr_>`_
# The expected proportion of rejected null hypotheses that are
# actually true:
#
# .. math::
# \mathrm{E}(\frac{N_{\mathrm{type\ I}}}{N_{\mathrm{reject}}}
# \mid N_{\mathrm{reject}} > 0) \cdot
# \mathrm{P}(N_{\mathrm{reject}} > 0 \mid H_0)
#
# We cover some techniques that control FWER and FDR below.
#
# Bonferroni correction
# ^^^^^^^^^^^^^^^^^^^^^
# Perhaps the simplest way to deal with multiple comparisons, `Bonferroni
# correction <https://en.wikipedia.org/wiki/Bonferroni_correction>`__
# conservatively multiplies the p-values by the number of comparisons to
# control the FWER.
titles.append('Bonferroni')
ts.append(ts[-1])
ps.append(bonferroni_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# False discovery rate (FDR) correction
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Typically FDR is performed with the Benjamini-Hochberg procedure, which
# is less restrictive than Bonferroni correction for large numbers of
# comparisons (fewer type II errors), but provides less strict control of type
# I errors.
titles.append('FDR')
ts.append(ts[-1])
ps.append(fdr_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Non-parametric resampling test with a maximum statistic
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# **Non-parametric resampling tests** can also be used to correct for multiple
# comparisons. In its simplest form, we again do permutations using
# exchangeability under the null hypothesis, but this time we take the
# *maximum statistic across all voxels* in each permutation to form the
# null distribution. The p-value for each voxel from the veridical data
# is then given by the proportion of null distribution values
# that were smaller.
#
# This method has two important features:
#
# 1. It controls FWER.
# 2. It is non-parametric. Even though our initial test statistic
# (here a 1-sample t-test) is parametric, the null
# distribution for the null hypothesis rejection (the mean value across
# subjects is indistinguishable from zero) is obtained by permutations.
# This means that it makes no assumptions of Gaussianity
# (which do hold for this example, but do not in general for some types
# of processed neuroimaging data).
titles.append(r'$\mathbf{Perm_{max}}$')
out = permutation_t_test(X, verbose=False)[:2]
ts.append(out[0])
ps.append(out[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Clustering
# ^^^^^^^^^^
# Each of the aforementioned multiple comparisons corrections have the
# disadvantage of not fully incorporating the correlation structure of the
# data, namely that points close to one another (e.g., in space or time) tend
# to be correlated. However, by defining the connectivity/adjacency/neighbor
# structure in our data, we can use **clustering** to compensate.
#
# To use this, we need to rethink our null hypothesis. Instead
# of thinking about a null hypothesis about means per voxel (with one
# independent test per voxel), we consider a null hypothesis about sizes
# of clusters in our data, which could be stated like:
#
# The distribution of spatial cluster sizes observed in two experimental
# conditions are drawn from the same probability distribution.
#
# Here we only have a single condition and we contrast to zero, which can
# be thought of as:
#
# The distribution of spatial cluster sizes is independent of the sign
# of the data.
#
# In this case, we again do permutations with a maximum statistic, but, under
# each permutation, we:
#
# 1. Compute the test statistic for each voxel individually.
# 2. Threshold the test statistic values.
# 3. Cluster voxels that exceed this threshold (with the same sign) based on
# adjacency.
# 4. Retain the size of the largest cluster (measured, e.g., by a simple voxel
# count, or by the sum of voxel t-values within the cluster) to build the
# null distribution.
#
# After doing these permutations, the cluster sizes in our veridical data
# are compared to this null distribution. The p-value associated with each
# cluster is again given by the proportion of smaller null distribution
# values. This can then be subjected to a standard p-value threshold
# (e.g., p < 0.05) to reject the null hypothesis (i.e., find an effect of
# interest).
#
# This reframing to consider *cluster sizes* rather than *individual means*
# maintains the advantages of the standard non-parametric permutation
# test -- namely controlling FWER and making no assumptions of parametric
# data distribution.
# Critically, though, it also accounts for the correlation structure in the
# data -- which in this toy case is spatial but in general can be
# multidimensional (e.g., spatio-temporal) -- because the null distribution
# will be derived from data in a way that preserves these correlations.
#
# However, there is a drawback. If a cluster significantly deviates from
# the null, no further inference on the cluster (e.g., peak location) can be
# made, as the entire cluster as a whole is used to reject the null.
# Moreover, because the test statistic concerns the full data, the null
# hypothesis (and our rejection of it) refers to the structure of the full
# data. For more information, see also the comprehensive
# `FieldTrip tutorial <ft_cluster_>`_.
#
# Defining the connectivity/neighbor/adjacency matrix
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# First we need to define our connectivity/neighbor/adjacency matrix.
# This is a square array (or sparse matrix) of shape ``(n_src, n_src)`` that
# contains zeros and ones to define which spatial points are connected, i.e.,
# which voxels are adjacent to each other. In our case this
# is quite simple, as our data are aligned on a rectangular grid.
#
# Let's pretend that our data were smaller -- a 3 x 3 grid. Thinking about
# each voxel as being connected to the other voxels it touches, we would
# need a 9 x 9 connectivity matrix. The first row of this matrix contains the
# voxels in the flattened data that the first voxel touches. Since it touches
# the second element in the first row and the first element in the second row
# (and is also a neighbor to itself), this would be::
#
# [1, 1, 0, 1, 0, 0, 0, 0, 0]
#
# :mod:`sklearn.feature_extraction` provides a convenient function for this:
from sklearn.feature_extraction.image import grid_to_graph # noqa: E402
mini_connectivity = grid_to_graph(3, 3).toarray()
assert mini_connectivity.shape == (9, 9)
print(mini_connectivity[0])
###############################################################################
# In general the connectivity between voxels can be more complex, such as
# those between sensors in 3D space, or time-varying activation at brain
# vertices on a cortical surface. MNE provides several convenience functions
# for computing connectivity/neighbor/adjacency matrices (see the
# :ref:`Statistics API <api_reference_statistics>`).
#
# Standard clustering
# ~~~~~~~~~~~~~~~~~~~
# Here, since our data are on a grid, we can use ``connectivity=None`` to
# trigger optimized grid-based code, and run the clustering algorithm.
titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, connectivity=None,
n_permutations=n_permutations)
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# This method can also be used in this context to correct for small
# variances [1]_:
titles.append(r'$\mathbf{C_{hat}}$')
stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma)
t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, connectivity=None,
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
p_hat = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_hat[cl] = p
ts.append(t_hat)
ps.append(p_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# .. _tfce_example:
#
# Threshold-free cluster enhancement (TFCE)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# TFCE eliminates the free parameter initial ``threshold`` value that
# determines which points are included in clustering by approximating
# a continuous integration across possible threshold values with a standard
# `Riemann sum <https://en.wikipedia.org/wiki/Riemann_sum>`__ [2]_.
# This requires giving a starting threshold ``start`` and a step
# size ``step``, which in MNE is supplied as a dict.
# The smaller the ``step`` and closer to 0 the ``start`` value,
# the better the approximation, but the longer it takes.
#
# A significant advantage of TFCE is that, rather than modifying the
# statistical null hypothesis under test (from one about individual voxels
# to one about the distribution of clusters in the data), it modifies the *data
# under test* while still controlling for multiple comparisons.
# The statistical test is then done at the level of individual voxels rather
# than clusters. This allows for evaluation of each point
# independently for significance rather than only as cluster groups.
titles.append(r'$\mathbf{C_{TFCE}}$')
threshold_tfce = dict(start=0, step=0.2)
t_tfce, _, p_tfce, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, connectivity=None,
n_permutations=n_permutations)
ts.append(t_tfce)
ps.append(p_tfce)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# We can also combine TFCE and the "hat" correction:
titles.append(r'$\mathbf{C_{hat,TFCE}}$')
t_tfce_hat, _, p_tfce_hat, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, connectivity=None,
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
ts.append(t_tfce_hat)
ps.append(p_tfce_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
###############################################################################
# Visualize and compare methods
# -----------------------------
# Let's take a look at these statistics. The top row shows each test statistic,
# and the bottom shows p-values for various statistical tests, with the ones
# with proper control over FWER or FDR with bold titles.
fig = plt.figure(facecolor='w', figsize=(14, 3))
assert len(ts) == len(titles) == len(ps)
for ii in range(len(ts)):
ax = [fig.add_subplot(2, 10, ii + 1, projection='3d'),
fig.add_subplot(2, 10, 11 + ii)]
plot_t_p(ts[ii], ps[ii], titles[ii], mccs[ii], ax)
fig.tight_layout(pad=0, w_pad=0.05, h_pad=0.1)
plt.show()
###############################################################################
# The first three columns show the parametric and non-parametric statistics
# that are not corrected for multiple comparisons:
#
# - Mass univariate **t-tests** result in jagged edges.
# - **"Hat" variance correction** of the t-tests produces less peaky edges,
# correcting for sharpness in the statistic driven by low-variance voxels.
# - **Non-parametric resampling tests** are very similar to t-tests. This is to
# be expected: the data are drawn from a Gaussian distribution, and thus
# satisfy parametric assumptions.
#
# The next three columns show multiple comparison corrections of the
# mass univariate tests (parametric and non-parametric). These
# too conservatively correct for multiple comparisons because neighboring
# voxels in our data are correlated:
#
# - **Bonferroni correction** eliminates any significant activity.
# - **FDR correction** is less conservative than Bonferroni.
# - A **permutation test with a maximum statistic** also eliminates any
# significant activity.
#
# The final four columns show the non-parametric cluster-based permutation
# tests with a maximum statistic:
#
# - **Standard clustering** identifies the correct region. However, the whole
# area must be declared significant, so no peak analysis can be done.
# Also, the peak is broad.
# - **Clustering with "hat" variance adjustment** tightens the estimate of
# significant activity.
# - **Clustering with TFCE** allows analyzing each significant point
# independently, but still has a broadened estimate.
# - **Clustering with TFCE and "hat" variance adjustment** tightens the area
# declared significant (again FWER corrected).
#
# Statistical functions in MNE
# ----------------------------
# The complete listing of statistical functions provided by MNE are in
# the :ref:`Statistics API list <api_reference_statistics>`, but we will give
# a brief overview here.
#
# MNE provides several convenience parametric testing functions that can be
# used in conjunction with the non-parametric clustering methods. However,
# the set of functions we provide is not meant to be exhaustive.
#
# If the univariate statistical contrast of interest is not listed here
# (e.g., interaction term in an unbalanced ANOVA), consider checking out the
# :mod:`statsmodels` package. It offers many functions for computing
# statistical contrasts, e.g., :func:`statsmodels.stats.anova.anova_lm`.
# To use these functions in clustering:
#
# 1. Determine which test statistic (e.g., t-value, F-value) you would use
# in a univariate context to compute your contrast of interest. In other
# words, if there were only a single output such as reaction times, what
# test statistic might you compute on the data?
# 2. Wrap the call to that function within a function that takes an input of
# the same shape that is expected by your clustering function,
# and returns an array of the same shape without the "samples" dimension
# (e.g., :func:`mne.stats.permutation_cluster_1samp_test` takes an array
# of shape ``(n_samples, p, q)`` and returns an array of shape ``(p, q)``).
# 3. Pass this wrapped function to the ``stat_fun`` argument to the clustering
# function.
# 4. Set an appropriate ``threshold`` value (float or dict) based on the
# values your statistical contrast function returns.
#
# Parametric methods provided by MNE
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# - :func:`mne.stats.ttest_1samp_no_p`
# Paired t-test, optionally with hat adjustment.
# This is used by default for contrast enhancement in paired cluster tests.
#
# - :func:`mne.stats.f_oneway`
# One-way ANOVA for independent samples.
# This can be used to compute various F-contrasts. It is used by default
# for contrast enhancement in non-paired cluster tests.
#
# - :func:`mne.stats.f_mway_rm`
# M-way ANOVA for repeated measures and balanced designs.
# This returns F-statistics and p-values. The associated helper function
# :func:`mne.stats.f_threshold_mway_rm` can be used to determine the
# F-threshold at a given significance level.
#
# - :func:`mne.stats.linear_regression`
# Compute ordinary least square regressions on multiple targets, e.g.,
# sensors, time points across trials (samples).
# For each regressor it returns the beta value, t-statistic, and
# uncorrected p-value. While it can be used as a test, it is
# particularly useful to compute weighted averages or deal with
# continuous predictors.
#
# Non-parametric methods
# ^^^^^^^^^^^^^^^^^^^^^^
#
# - :func:`mne.stats.permutation_cluster_test`
# Unpaired contrasts with connectivity.
#
# - :func:`mne.stats.spatio_temporal_cluster_test`
# Unpaired contrasts with spatio-temporal connectivity.
#
# - :func:`mne.stats.permutation_t_test`
# Paired contrast with no connectivity.
#
# - :func:`mne.stats.permutation_cluster_1samp_test`
# Paired contrasts with connectivity.
#
# - :func:`mne.stats.spatio_temporal_cluster_1samp_test`
# Paired contrasts with spatio-temporal connectivity.
#
# .. warning:: In most MNE functions, data has shape
# ``(..., n_space, n_time)``, where the spatial dimension can
# be e.g. sensors or source vertices. But for our spatio-temporal
# clustering functions, the spatial dimensions need to be **last**
# for computational efficiency reasons. For example, for
# :func:`mne.stats.spatio_temporal_cluster_1samp_test`, ``X``
# needs to be of shape ``(n_samples, n_time, n_space)``. You can
# use :func:`numpy.transpose` to transpose axes if necessary.
#
# References
# ----------
# .. [1] Ridgway et al. 2012, "The problem of low variance voxels in
# statistical parametric mapping; a new hat avoids a 'haircut'",
# NeuroImage. 2012 Feb 1;59(3):2131-41.
#
# .. [2] Smith and Nichols 2009, "Threshold-free cluster enhancement:
# addressing problems of smoothing, threshold dependence, and
# localisation in cluster inference", NeuroImage 44 (2009) 83-98.
#
# .. include:: ../../tutorial_links.inc
|
|
import os
import time
import unittest
from mock import patch
from chirp.library import audio_file_test
from chirp.library import do_delete_audio_file_from_db
from chirp.library import database
TEST_DB_NAME_PATTERN = "/tmp/chirp-library-db_test.%d.sqlite"
class DeleteFingerprintTest(unittest.TestCase):
def setUp(self):
self.name = TEST_DB_NAME_PATTERN % int(time.time() * 1000000)
self.db = database.Database(self.name)
def tearDown(self):
os.unlink(self.name)
def _add_test_audiofiles(self):
test_volume = 17
test_import_timestamp = 1230959520
# populate some dummy audiofiles into the database
all_au_files = [audio_file_test.get_test_audio_file(i)
for i in xrange(10)]
add_txn = self.db.begin_add(test_volume, test_import_timestamp)
for au_file in all_au_files:
au_file.volume = test_volume
au_file.import_timestamp = test_import_timestamp
for au_file in all_au_files:
add_txn.add(au_file)
add_txn.commit()
def test_del_audiofilese__full_delete_single(self):
# SETUP
test_fingerprint = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
# quick confirmation that the audiofile that we want to test exists.
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af.fingerprint, test_fingerprint)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint])
# RESULTS
# verify audiofile doesn't exist
af = self.db.get_by_fingerprint(test_fingerprint)
self.assertEquals(af, None)
# make sure only 9 records exist now
self.assertEqual(len(list(self.db.get_all())), 9)
def test_del_audiofiles__full_delete_multiple(self):
# SETUP
test_fingerprint_1 = "0000000000000005"
test_fingerprint_2 = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
# quick confirmation that the audiofiles that we want to test exists.
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af.fingerprint, test_fingerprint_1)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af.fingerprint, test_fingerprint_2)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint_1, test_fingerprint_2])
# RESULTS
# verify audiofiles don't exist
af = self.db.get_by_fingerprint(test_fingerprint_1)
self.assertEquals(af, None)
af = self.db.get_by_fingerprint(test_fingerprint_2)
self.assertEquals(af, None)
# make sure only 8 records exist now
self.assertEqual(len(list(self.db.get_all())), 8)
def test_del_audiofiles__full_delete_non_existing_fingerprint(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
afm.del_audiofiles([test_fingerprint_1])
# RESULTS
# make sure nothing was deleted
self.assertEqual(len(list(self.db.get_all())), 10)
def test_del_audiofiles__raises_exception(self):
# SETUP
test_fingerprint_1 = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
# make sure 10 records exist
self.assertEqual(len(list(self.db.get_all())), 10)
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
def _raise_exception(*args, **kwargs):
raise Exception('Test')
with patch.object(afm, 'conn', autospec=True) as mock_conn:
mock_conn.execute.side_effect = _raise_exception
with self.assertRaises(Exception):
afm.del_audiofiles([test_fingerprint_1])
mock_conn.rollback.assert_called_with()
def test_get_audio_files__existing_record(self):
# SETUP
test_fingerprint = "0000000000000007"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_audio_files(fingerprints=[test_fingerprint])
# RESULTS
self.assertSetEqual(
set(a['fingerprint'] for a in af),
set([test_fingerprint]))
def test_get_audio_files__non_existing_records(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_audio_files(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertEqual(len(list(af)), 0)
def test_get_tags__existing_record(self):
# SETUP
test_fingerprint_1 = "0000000000000005"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertListEqual(
list(a['fingerprint'] for a in af),
5 * [test_fingerprint_1])
def test_get_tags__non_existing_records(self):
# SETUP
test_fingerprint_1 = "0000000000000020"
# Create db tables
self.assertTrue(self.db.create_tables())
self._add_test_audiofiles()
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name)
# TEST
af = afm.get_tags(
fingerprints=[test_fingerprint_1])
# RESULTS
self.assertEqual(len(list(af)), 0)
def test_print_rows_can_handle_non_ascii(self):
afm = do_delete_audio_file_from_db.AudioFileManager(
library_db_file=self.name
)
afm.print_rows([
[u'non-ascii string with a \xf8 character'],
])
|
|
from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
_empty_value_display = '-'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_class.check(model))
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
@property
def empty_value_display(self):
return self._empty_value_display
@empty_value_display.setter
def empty_value_display(self, empty_value_display):
self._empty_value_display = empty_value_display
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in self._registry.items():
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
'available_apps': self.get_app_list(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
request.current_app = self.name
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
request.current_app = self.name
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name)
context.update(extra_context or {})
defaults = {
'extra_context': context,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
request.current_app = self.name
return login(request, **defaults)
def _build_app_dict(self, request, label=None):
"""
Builds the app dictionary. Takes an optional label parameters to filter
models of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
if label:
raise PermissionDenied
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def get_app_list(self, request):
"""
Returns a sorted list of all the installed apps that have been
registered in this site.
"""
app_dict = self._build_app_dict(request)
# Sort the apps alphabetically.
app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
return app_list
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_list = self.get_app_list(request)
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_dict = self._build_app_dict(request, app_label)
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
app_name = apps.get_app_config(app_label).verbose_name
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
|
|
# -*- test-case-name: twistedcaldav.directory.test.test_principal -*-
##
# Copyright (c) 2006-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Implements a directory-backed principal hierarchy.
"""
__all__ = [
"DirectoryProvisioningResource",
"DirectoryPrincipalProvisioningResource",
"DirectoryPrincipalTypeProvisioningResource",
"DirectoryPrincipalUIDProvisioningResource",
"DirectoryPrincipalResource",
"DirectoryCalendarPrincipalResource",
]
from urllib import quote, unquote
from urlparse import urlparse
import uuid
from twext.python.log import Logger
from twisted.cred.credentials import UsernamePassword
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.defer import succeed
from twisted.python.modules import getModule
from twisted.web.template import XMLFile, Element, renderer
from twistedcaldav import caldavxml, customxml
from twistedcaldav.cache import DisabledCacheNotifier, PropfindCacheMixin
from twistedcaldav.config import config
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.directory.augment import allowedAutoScheduleModes
from twistedcaldav.directory.common import uidsResourceName
from twistedcaldav.directory.util import NotFoundResource
from twistedcaldav.directory.util import (
formatLink, formatLinks, formatPrincipals, formatList
)
from txdav.who.wiki import getWikiACL
from twistedcaldav.extensions import (
ReadOnlyResourceMixIn, DAVPrincipalResource, DAVResourceWithChildrenMixin
)
from twistedcaldav.extensions import DirectoryElement
from twistedcaldav.resource import CalendarPrincipalCollectionResource, CalendarPrincipalResource
from txdav.caldav.datastore.scheduling.utils import normalizeCUAddr
from txdav.who.directory import CalendarDirectoryRecordMixin
from txdav.xml import element as davxml
from txweb2 import responsecode
from txweb2.auth.digest import DigestedCredentials
from txweb2.auth.tls import TLSCredentials
from txweb2.auth.wrapper import UnauthorizedResponse
from txweb2.dav.noneprops import NonePropertyStore
from txweb2.dav.util import joinURL
from txweb2.http import HTTPError, RedirectResponse
try:
from twistedcaldav.authkerb import NegotiateCredentials
NegotiateCredentials # sigh, pyflakes
except ImportError:
NegotiateCredentials = None
thisModule = getModule(__name__)
log = Logger()
class PermissionsMixIn (ReadOnlyResourceMixIn):
def defaultAccessControlList(self):
return succeed(authReadACL)
@inlineCallbacks
def accessControlList(self, request, inheritance=True, expanding=False, inherited_aces=None):
try:
wikiACL = (yield getWikiACL(self, request))
except HTTPError:
wikiACL = None
if wikiACL is not None:
# ACL depends on wiki server...
log.debug("Wiki ACL: %s" % (wikiACL.toxml(),))
returnValue(wikiACL)
else:
# ...otherwise permissions are fixed, and are not subject to
# inheritance rules, etc.
returnValue((yield self.defaultAccessControlList()))
# Converter methods for recordsMatchingFields()
#
# A DAV property can be associated with one of these converter methods,
# which take the string being matched and return the appropriate record
# field name to match against, as well as a new match string which has been
# converted to the appropriate form.
def cuTypeConverter(cuType):
""" Converts calendar user types to OD type names """
return "recordType", CalendarDirectoryRecordMixin.fromCUType(cuType)
def cuAddressConverter(origCUAddr):
""" Converts calendar user addresses to OD-compatible form """
cua = normalizeCUAddr(origCUAddr)
if cua.startswith("urn:x-uid:"):
return "uid", cua[10:]
elif cua.startswith("urn:uuid:"):
return "guid", uuid.UUID(cua[9:])
elif cua.startswith("mailto:"):
return "emailAddresses", cua[7:]
elif cua.startswith("/") or cua.startswith("http"):
ignored, collection, id = cua.rsplit("/", 2)
if collection == "__uids__":
return "uid", id
else:
return "recordName", id
else:
raise ValueError(
"Invalid calendar user address format: %s" %
(origCUAddr,)
)
class DirectoryProvisioningResource (
PermissionsMixIn,
CalendarPrincipalCollectionResource,
):
def __init__(self, url, directory):
"""
@param url: the canonical URL for the resource.
@param directory: an L{IDirectoryService} to provision principals from.
"""
assert url.endswith("/"), "Collection URL must end in '/'"
CalendarPrincipalCollectionResource.__init__(self, url)
DAVResourceWithChildrenMixin.__init__(self)
# MOVE2WHO
# self.directory = IDirectoryService(directory)
self.directory = directory
def __repr__(self):
return "<%s: %s %s>" % (self.__class__.__name__, self.directory, self._url)
@inlineCallbacks
def locateChild(self, req, segments):
child = (yield self.getChild(segments[0]))
if child is not None:
returnValue((child, segments[1:]))
returnValue((NotFoundResource(principalCollections=self.principalCollections()), ()))
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
self._dead_properties = NonePropertyStore(self)
return self._dead_properties
def etag(self):
return succeed(None)
@inlineCallbacks
def principalForShortName(self, recordType, name):
record = (yield self.directory.recordWithShortName(recordType, name))
returnValue((yield self.principalForRecord(record)))
def principalForUser(self, user):
return self.principalForShortName(self.directory.recordType.lookupByName("user"), user)
@inlineCallbacks
def principalForAuthID(self, user):
# Basic/Digest creds -> just lookup user name
if isinstance(user, UsernamePassword) or isinstance(user, DigestedCredentials):
returnValue((yield self.principalForUser(user.username)))
elif NegotiateCredentials is not None and isinstance(user, NegotiateCredentials):
authID = "Kerberos:%s" % (user.principal,)
principal = yield self.principalForRecord((yield self.directory.recordWithAuthID(authID)))
if principal:
returnValue(principal)
elif user.username:
returnValue((yield self.principalForUser(user.username)))
elif isinstance(user, TLSCredentials):
# FIXME: for now we use the local part of the emailAddress in the certs Subject, but we may need
# to lookup some other attribute
returnValue((yield self.principalForUser(user.username)))
returnValue(None)
def principalForUID(self, uid):
raise NotImplementedError("Subclass must implement principalForUID()")
def principalForCalendarUserAddress(self, address):
raise NotImplementedError("Subclass must implement principalForCalendarUserAddress()")
def principalForRecord(self, record):
if record is None or not record.enabled:
return succeed(None)
return self.principalForUID(record.uid)
##
# DAV-property-to-record-field mapping
##
_cs_ns = "http://calendarserver.org/ns/"
_fieldMap = {
("DAV:" , "displayname") :
("fullNames", None, "Display Name", davxml.DisplayName),
("urn:ietf:params:xml:ns:caldav" , "calendar-user-type") :
("", cuTypeConverter, "Calendar User Type", caldavxml.CalendarUserType),
("urn:ietf:params:xml:ns:caldav" , "calendar-user-address-set") :
("", cuAddressConverter, "Calendar User Address Set", caldavxml.CalendarUserAddressSet),
(_cs_ns, "email-address-set") :
("emailAddresses", None, "Email Addresses", customxml.EmailAddressSet),
}
_fieldList = [v for _ignore_k, v in sorted(_fieldMap.iteritems(), key=lambda x:x[0])]
def propertyToField(self, property, match):
"""
If property is a DAV property that maps to a directory field, return
that field's name, otherwise return None
"""
field, converter, _ignore_description, _ignore_xmlClass = self._fieldMap.get(
property.qname(), (None, None, None, None))
if field is None:
return (None, None)
elif converter is not None:
field, match = converter(match)
return (field, match)
def principalSearchPropertySet(self):
props = []
for _ignore_field, _ignore_converter, description, xmlClass in self._fieldList:
props.append(
davxml.PrincipalSearchProperty(
davxml.PropertyContainer(
xmlClass()
),
davxml.Description(
davxml.PCDATAElement(description),
**{"xml:lang": "en"}
),
)
)
return davxml.PrincipalSearchPropertySet(*props)
class DirectoryPrincipalProvisioningResource (DirectoryProvisioningResource):
"""
Collection resource which provisions directory principals as its children.
"""
def __init__(self, url, directory):
DirectoryProvisioningResource.__init__(self, url, directory)
# FIXME: Smells like a hack
self.directory.setPrincipalCollection(self)
# Used to hook in the data collection root resources
self.calendarCollection = None
self.addressBookCollection = None
#
# Create children
#
self.supportedChildTypes = [
self.directory.recordType.user,
self.directory.recordType.group,
self.directory.recordType.location,
self.directory.recordType.resource,
self.directory.recordType.address,
]
if config.Authentication.Wiki.Enabled:
self.supportedChildTypes.append(
self.directory.recordType.macOSXServerWiki
)
for name, recordType in [
(self.directory.recordTypeToOldName(r), r)
for r in self.supportedChildTypes
]:
self.putChild(
name,
DirectoryPrincipalTypeProvisioningResource(
self, name, recordType
)
)
self.putChild(uidsResourceName, DirectoryPrincipalUIDProvisioningResource(self))
@inlineCallbacks
def principalForUID(self, uid):
child = (yield self.getChild(uidsResourceName))
returnValue((yield child.getChild(uid)))
@inlineCallbacks
def _principalForURI(self, uri):
scheme, netloc, path, _ignore_params, _ignore_query, _ignore_fragment = urlparse(uri)
if scheme == "":
pass
elif scheme in ("http", "https"):
# Get rid of possible user/password nonsense
netloc = netloc.split("@", 1)[-1]
# Get host/port
netloc = netloc.split(":", 1)
host = netloc[0]
if len(netloc) == 1 or netloc[1] == "":
port = 80
else:
port = int(netloc[1])
if (
host != config.ServerHostName and
host not in config.Scheduling.Options.PrincipalHostAliases
):
returnValue(None)
if port != {
"http" : config.HTTPPort,
"https": config.SSLPort,
}[scheme]:
returnValue(None)
elif scheme == "urn":
if path.startswith("uuid:"):
returnValue((yield self.principalForUID(path[5:])))
else:
returnValue(None)
else:
returnValue(None)
if not path.startswith(self._url):
returnValue(None)
path = path[len(self._url) - 1:]
segments = [unquote(s) for s in path.rstrip("/").split("/")]
if segments[0] == "" and len(segments) == 3:
typeResource = yield self.getChild(segments[1])
if typeResource is not None:
principalResource = yield typeResource.getChild(segments[2])
if principalResource:
returnValue(principalResource)
returnValue(None)
@inlineCallbacks
def principalForCalendarUserAddress(self, address):
# First see if the address is a principal URI
principal = yield self._principalForURI(address)
if principal:
if (
isinstance(principal, DirectoryCalendarPrincipalResource) and
principal.record.hasCalendars
):
returnValue(principal)
else:
# Next try looking it up in the directory
record = yield self.directory.recordWithCalendarUserAddress(address)
if record is not None and record.hasCalendars:
returnValue((yield self.principalForRecord(record)))
log.debug("No principal for calendar user address: %r" % (address,))
returnValue(None)
@inlineCallbacks
def principalForRecord(self, record):
child = (yield self.getChild(uidsResourceName))
returnValue((yield child.principalForRecord(record)))
##
# Static
##
def createSimilarFile(self, path):
log.error("Attempt to create clone %r of resource %r" % (path, self))
raise HTTPError(responsecode.NOT_FOUND)
def getChild(self, name):
if name == "":
return succeed(self)
else:
return succeed(self.putChildren.get(name, None))
def listChildren(self):
return [
self.directory.recordTypeToOldName(r) for r in
self.supportedChildTypes
]
##
# ACL
##
def principalCollections(self):
return (self,)
class DirectoryPrincipalTypeProvisioningResource (DirectoryProvisioningResource):
"""
Collection resource which provisions directory principals of a
specific type as its children, indexed by short name.
"""
def __init__(self, parent, name, recordType):
"""
@param parent: the parent L{DirectoryPrincipalProvisioningResource}.
@param recordType: the directory record type to provision.
"""
DirectoryProvisioningResource.__init__(
self,
joinURL(parent.principalCollectionURL(), name) + "/",
parent.directory
)
self.recordType = recordType
self.parent = parent
def principalForUID(self, uid):
return self.parent.principalForUID(uid)
def principalForCalendarUserAddress(self, address):
return self.parent.principalForCalendarUserAddress(address)
def principalForRecord(self, record):
return self.parent.principalForRecord(record)
##
# Static
##
def createSimilarFile(self, path):
log.error("Attempt to create clone %r of resource %r" % (path, self))
raise HTTPError(responsecode.NOT_FOUND)
def getChild(self, name):
if name == "":
return succeed(self)
else:
return self.principalForShortName(self.recordType, name)
@inlineCallbacks
def listChildren(self):
children = []
if config.EnablePrincipalListings:
try:
for record in (
yield self.directory.recordsWithRecordType(self.recordType)
):
for shortName in getattr(record, "shortNames", []):
children.append(shortName)
except AttributeError:
log.warn("Cannot list children of record type {rt}",
rt=self.recordType.name)
returnValue(children)
else:
# Not a listable collection
raise HTTPError(responsecode.FORBIDDEN)
##
# ACL
##
def principalCollections(self):
return self.parent.principalCollections()
class DirectoryPrincipalUIDProvisioningResource (DirectoryProvisioningResource):
"""
Collection resource which provisions directory principals indexed
by UID.
"""
def __init__(self, parent):
"""
@param directory: an L{IDirectoryService} to provision calendars from.
@param recordType: the directory record type to provision.
"""
DirectoryProvisioningResource.__init__(
self,
joinURL(parent.principalCollectionURL(), uidsResourceName) + "/",
parent.directory
)
self.parent = parent
def principalForUID(self, uid):
return self.parent.principalForUID(uid)
def principalForCalendarUserAddress(self, address):
return self.parent.principalForCalendarUserAddress(address)
def principalForRecord(self, record):
if record is None:
return succeed(None)
if record.hasCalendars or record.hasContacts:
# XXX these are different features and one should not automatically
# imply the other...
principal = DirectoryCalendarPrincipalResource(self, record)
else:
principal = DirectoryPrincipalResource(self, record)
return succeed(principal)
##
# Static
##
def createSimilarFile(self, path):
log.error("Attempt to create clone %r of resource %r" % (path, self))
raise HTTPError(responsecode.NOT_FOUND)
@inlineCallbacks
def getChild(self, name):
if name == "":
returnValue(self)
if "#" in name:
# This UID belongs to a sub-principal
primaryUID, subType = name.split("#")
else:
primaryUID = name
subType = None
record = (yield self.directory.recordWithUID(primaryUID))
primaryPrincipal = (yield self.principalForRecord(record))
if primaryPrincipal is None:
log.info("No principal found for UID: %s" % (name,))
returnValue(None)
if subType is None:
returnValue(primaryPrincipal)
else:
returnValue((yield primaryPrincipal.getChild(subType)))
def listChildren(self):
# Not a listable collection
raise HTTPError(responsecode.FORBIDDEN)
##
# ACL
##
def principalCollections(self):
return self.parent.principalCollections()
class DirectoryPrincipalDetailElement(Element):
"""
Element that can render the details of a
L{CalendarUserDirectoryPrincipalResource}.
"""
loader = XMLFile(thisModule.filePath.sibling(
"directory-principal-resource.html")
)
def __init__(self, resource):
super(DirectoryPrincipalDetailElement, self).__init__()
self.resource = resource
@renderer
def serversEnabled(self, request, tag):
"""
Renderer for when servers are enabled.
"""
if not config.Servers.Enabled:
return ""
record = self.resource.record
return tag.fillSlots(
hostedAt=str(record.serverURI()),
)
@renderer
def principal(self, request, tag):
"""
Top-level renderer in the template.
"""
record = self.resource.record
try:
if isinstance(record.guid, uuid.UUID):
guid = str(record.guid).upper()
else:
guid = record.guid
except AttributeError:
guid = ""
try:
emailAddresses = record.emailAddresses
except AttributeError:
emailAddresses = []
try:
shortNames = record.shortNames
except AttributeError:
shortNames = []
return tag.fillSlots(
directoryGUID=str(record.service.guid),
realm=record.service.realmName.encode("utf-8"),
principalGUID=guid,
recordType=record.service.recordTypeToOldName(record.recordType),
shortNames=",".join([n.encode("utf-8") for n in shortNames]),
fullName=record.displayName.encode("utf-8"),
emailAddresses=formatList(emailAddresses),
principalUID=str(self.resource.principalUID()),
principalURL=formatLink(self.resource.principalURL()),
alternateURIs=formatLinks(self.resource.alternateURIs()),
groupMembers=self.resource.groupMembers().addCallback(
formatPrincipals
),
groupMemberships=self.resource.groupMemberships().addCallback(
formatPrincipals
),
readWriteProxyFor=self.resource.proxyFor(True).addCallback(
formatPrincipals
),
readOnlyProxyFor=self.resource.proxyFor(False).addCallback(
formatPrincipals
),
)
@renderer
def extra(self, request, tag):
"""
No-op; implemented in subclass.
"""
return ''
@renderer
def enabledForCalendaring(self, request, tag):
"""
No-op; implemented in subclass.
"""
return ''
@renderer
def enabledForAddressBooks(self, request, tag):
"""
No-op; implemented in subclass.
"""
return ''
class DirectoryPrincipalElement(DirectoryElement):
"""
L{DirectoryPrincipalElement} is a renderer for directory details.
"""
@renderer
def resourceDetail(self, request, tag):
"""
Render the directory principal's details.
"""
return DirectoryPrincipalDetailElement(self.resource)
class DirectoryCalendarPrincipalDetailElement(DirectoryPrincipalDetailElement):
@renderer
def extra(self, request, tag):
"""
Renderer for extra directory body items for calendar/addressbook
principals.
"""
return tag
@renderer
def enabledForCalendaring(self, request, tag):
"""
Renderer which returns its tag when the wrapped record is enabled for
calendaring.
"""
resource = self.resource
record = resource.record
if record.hasCalendars:
return tag.fillSlots(
calendarUserAddresses=formatLinks(
sorted(resource.calendarUserAddresses())
),
calendarHomes=formatLinks(resource.calendarHomeURLs())
)
return ''
@renderer
def enabledForAddressBooks(self, request, tag):
"""
Renderer which returnst its tag when the wrapped record is enabled for
addressbooks.
"""
resource = self.resource
record = resource.record
if record.hasContacts:
return tag.fillSlots(
addressBookHomes=formatLinks(resource.addressBookHomeURLs())
)
return ''
class DirectoryCalendarPrincipalElement(DirectoryPrincipalElement):
"""
L{DirectoryPrincipalElement} is a renderer for directory details, with
calendaring additions.
"""
@renderer
def resourceDetail(self, request, tag):
"""
Render the directory calendar principal's details.
"""
return DirectoryCalendarPrincipalDetailElement(self.resource)
class DirectoryPrincipalResource (
PropfindCacheMixin, PermissionsMixIn, DAVPrincipalResource):
"""
Directory principal resource.
"""
def liveProperties(self):
return super(DirectoryPrincipalResource, self).liveProperties() + (
(calendarserver_namespace, "email-address-set"),
davxml.ResourceID.qname(),
)
cacheNotifierFactory = DisabledCacheNotifier
def __init__(self, parent, record):
"""
@param parent: the parent of this resource.
@param record: the L{IDirectoryRecord} that this resource represents.
"""
super(DirectoryPrincipalResource, self).__init__()
self.cacheNotifier = self.cacheNotifierFactory(self, cacheHandle="PrincipalToken")
if self.isCollection():
slash = "/"
else:
slash = ""
assert record is not None, "Principal must have a directory record"
self.record = record
self.parent = parent
url = joinURL(parent.principalCollectionURL(), self.principalUID()) + slash
self._url = url
self._alternate_urls = tuple([
joinURL(
parent.parent.principalCollectionURL(),
record.service.recordTypeToOldName(record.recordType),
quote(shortName.encode("utf-8"))
) + slash
for shortName in getattr(record, "shortNames", [])
])
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, str(self))
def __str__(self):
return "({}){}".format(self.record.recordType, self.record.uid)
def __eq__(self, other):
"""
Principals are the same if their principalURLs are the same.
"""
if isinstance(other, DirectoryPrincipalResource):
return (self.principalURL() == other.principalURL())
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.principalUID())
@inlineCallbacks
def readProperty(self, property, request):
if type(property) is tuple:
qname = property
else:
qname = property.qname()
namespace, name = qname
if qname == davxml.ResourceID.qname():
returnValue(davxml.ResourceID(davxml.HRef.fromString("urn:x-uid:%s" % (self.record.uid,))))
elif namespace == calendarserver_namespace:
if name == "email-address-set":
try:
emails = self.record.emailAddresses
except AttributeError:
emails = []
returnValue(customxml.EmailAddressSet(
*[customxml.EmailAddressProperty(addr) for addr in sorted(emails)]
))
result = (yield super(DirectoryPrincipalResource, self).readProperty(property, request))
returnValue(result)
def deadProperties(self):
if not hasattr(self, "_dead_properties"):
self._dead_properties = NonePropertyStore(self)
return self._dead_properties
def etag(self):
return succeed(None)
##
# HTTP
##
def htmlElement(self):
"""
Customize HTML rendering for directory principals.
"""
return DirectoryPrincipalElement(self)
##
# DAV
##
def isCollection(self):
return True
@inlineCallbacks
def handleMissingTrailingSlash(self, request):
try:
_ignore_authnUser, authzUser = yield self.authenticate(request)
except Exception:
authzUser = None
# Turn 301 into 401
if authzUser is None:
response = (yield UnauthorizedResponse.makeResponse(
request.credentialFactories,
request.remoteAddr
))
returnValue(response)
else:
response = RedirectResponse(
request.unparseURL(
path=quote(
unquote(request.path),
safe=':/') + '/'
)
)
returnValue(response)
def displayName(self):
return self.record.displayName
##
# ACL
##
def _calendar_user_proxy_index(self):
"""
Return the SQL database for calendar user proxies.
@return: the L{ProxyDB} for the principal collection.
"""
# The db is located in the principal collection root
from twistedcaldav.directory.calendaruserproxy import ProxyDBService
return ProxyDBService
def alternateURIs(self):
# FIXME: Add API to IDirectoryRecord for getting a record URI?
return self._alternate_urls
def principalURL(self):
return self._url
def url(self):
return self.principalURL()
def notifierID(self):
return self.principalURL()
def isProxyFor(self, principal):
"""
Determine whether this principal is a read-only or read-write proxy for the
specified principal.
@param principal: principal resource for the possible user proxying to this principal
@type principal: L{DirectoryPrincipalResource}
"""
return self.record.isProxyFor(principal.record)
def proxyMode(self, principal):
"""
Determine whether proxy mode this principal has in relation to the one specified.
@param principal: principal resource for the possible user proxying to this principal
@type principal: L{DirectoryPrincipalResource}
"""
return self.record.proxyMode(principal.record)
@inlineCallbacks
def proxyFor(self, readWrite, ignoreDisabled=True):
"""
Returns the set of principals currently delegating to this principal
with the access indicated by the readWrite argument. If readWrite is
True, then write-access delegators are returned, otherwise the read-
only-access delegators are returned.
@param readWrite: Whether to look up read-write delegators, or
read-only delegators
@type readWrite: L{bool}
@param ignoreDisabled: If L{True} disabled delegators are not returned
@type ignoreDisabled: L{bool}
@return: A Deferred firing with a set of principals
"""
proxyFors = set()
if config.EnableProxyPrincipals:
proxyForRecords = yield self.record.proxyFor(readWrite, ignoreDisabled)
for record in proxyForRecords:
principal = yield self.parent.principalForRecord(record)
if principal is not None:
proxyFors.add(principal)
returnValue(proxyFors)
@inlineCallbacks
def _getRelatives(self, method, record=None, relatives=None, records=None, proxy=None, infinity=False):
if record is None:
record = self.record
if relatives is None:
relatives = set()
if records is None:
records = set()
if record not in records:
records.add(record)
for relative in (yield getattr(record, method)()):
if relative not in records:
found = (yield self.parent.principalForRecord(relative))
if found is None:
log.error("No principal found for directory record: %r" % (relative,))
else:
if proxy:
if proxy == "read-write":
found = (yield found.getChild("calendar-proxy-write"))
else:
found = (yield found.getChild("calendar-proxy-read"))
relatives.add(found)
if infinity:
yield self._getRelatives(
method, relative, relatives, records,
infinity=infinity)
returnValue(relatives)
def groupMembers(self):
return self._getRelatives("members")
def expandedGroupMembers(self):
return self._getRelatives("members", infinity=True)
@inlineCallbacks
def groupMemberships(self, infinity=False):
groups = yield self._getRelatives("groups", infinity=infinity)
if config.EnableProxyPrincipals:
for readWrite, proxyType in (
(True, "calendar-proxy-write"),
(False, "calendar-proxy-read")
):
proxyFors = yield self.proxyFor(readWrite)
for proxyFor in proxyFors:
subPrincipal = yield self.parent.principalForUID(
"{}#{}".format(proxyFor.record.uid, proxyType)
)
groups.add(subPrincipal)
returnValue(groups)
def expandedGroupMemberships(self):
return self.groupMemberships(infinity=True)
def groupsChanged(self):
"""
A callback indicating the directory group membership for this principal
has changed. Update the cache token for this principal so the PROPFIND
response cache is invalidated.
"""
return self.cacheNotifier.changed()
def principalCollections(self):
return self.parent.principalCollections()
def principalUID(self):
return self.record.uid
def serverURI(self):
return self.record.serverURI()
def server(self):
return self.record.server()
def thisServer(self):
return self.record.thisServer()
def canAutoSchedule(self, organizer=None):
"""
Determine the auto-schedule state based on record state, type and config settings.
@param organizer: the CUA of the organizer trying to schedule this principal
@type organizer: C{str}
@return: C{Deferred} firing a C{bool}
"""
return self.record.canAutoSchedule(organizer)
@inlineCallbacks
def setAutoScheduleMode(self, autoScheduleMode):
self.record.autoScheduleMode = autoScheduleMode if autoScheduleMode in allowedAutoScheduleModes else "default"
augmentRecord = (yield self.record.service.augmentService.getAugmentRecord(self.record.guid, self.record.recordType))
augmentRecord.autoScheduleMode = autoScheduleMode
(yield self.record.service.augmentService.addAugmentRecords([augmentRecord]))
def getAutoScheduleMode(self, organizer=None):
"""
Return the auto schedule mode value for the principal. If the optional
organizer is provided, and that organizer is a member of the principal's
auto-accept group, return "automatic" instead; this allows specifying a
priliveged group whose scheduling requests are automatically accepted or
declined, regardless of whether the principal is normally managed by a
delegate.
@param organizer: the CUA of the organizer scheduling this principal
@type organizer: C{str}
@return: auto schedule mode
@rtype: C{Deferred} firing L{AutoScheduleMode}
"""
return self.record.getAutoScheduleMode(organizer)
@inlineCallbacks
def setAutoAcceptGroup(self, autoAcceptGroup):
"""
Sets the group whose members can automatically schedule with this principal
even if this principal's auto-schedule is False (assuming no conflicts).
@param autoAcceptGroup: GUID of the group
@type autoAcceptGroup: C{str}
"""
self.record.autoAcceptGroup = autoAcceptGroup
augmentRecord = (yield self.record.service.augmentService.getAugmentRecord(self.record.guid, self.record.recordType))
augmentRecord.autoAcceptGroup = autoAcceptGroup
(yield self.record.service.augmentService.addAugmentRecords([augmentRecord]))
def getAutoAcceptGroup(self):
"""
Returns the GUID of the auto accept group assigned to this principal, or empty
string if not assigned
"""
return self.record.autoAcceptGroup
def autoAcceptFromOrganizer(self, organizer):
"""
Is the organizer a member of this principal's autoAcceptGroup?
@param organizer: CUA of the organizer
@type organizer: C{str}
@return: True if the autoAcceptGroup is assigned, and the organizer is a member
of that group. False otherwise.
@rtype: C{Deferred} firing C{bool}
"""
return self.record.autoAcceptFromOrganizer()
def getCUType(self):
return self.record.getCUType()
##
# Static
##
def createSimilarFile(self, path):
log.error("Attempt to create clone %r of resource %r" % (path, self))
raise HTTPError(responsecode.NOT_FOUND)
@inlineCallbacks
def locateChild(self, req, segments):
child = (yield self.getChild(segments[0]))
if child is not None:
returnValue((child, segments[1:]))
returnValue((None, ()))
def getChild(self, name):
if name == "":
return succeed(self)
return succeed(None)
def listChildren(self):
return ()
class DirectoryCalendarPrincipalResource(DirectoryPrincipalResource,
CalendarPrincipalResource):
"""
Directory calendar principal resource.
"""
def liveProperties(self):
return DirectoryPrincipalResource.liveProperties(self) + CalendarPrincipalResource.liveProperties(self)
def calendarsEnabled(self):
return self.record.calendarsEnabled()
def addressBooksEnabled(self):
return config.EnableCardDAV and self.record.hasContacts
@inlineCallbacks
def readProperty(self, property, request):
# Ouch, multiple inheritance.
result = (yield DirectoryPrincipalResource.readProperty(self, property, request))
if not result:
result = (yield CalendarPrincipalResource.readProperty(self, property, request))
returnValue(result)
##
# CalDAV
##
def calendarUserAddresses(self):
return self.record.calendarUserAddresses
def htmlElement(self):
"""
Customize HTML generation for calendar principals.
"""
return DirectoryCalendarPrincipalElement(self)
def canonicalCalendarUserAddress(self):
"""
Return a CUA for this principal
"""
return self.record.canonicalCalendarUserAddress()
def enabledAsOrganizer(self):
return self.record.enabledAsOrganizer()
@inlineCallbacks
def scheduleInbox(self, request):
home = yield self.calendarHome(request)
if home is None:
returnValue(None)
inbox = yield home.getChild("inbox")
if inbox is None:
returnValue(None)
returnValue(inbox)
@inlineCallbacks
def notificationCollection(self, request):
notification = None
if config.Sharing.Enabled:
home = yield self.calendarHome(request)
if home is not None:
notification = yield home.getChild("notification")
returnValue(notification)
def calendarHomeURLs(self):
if self.record.hasCalendars:
homeURL = self._homeChildURL(None)
else:
homeURL = ""
return (homeURL,) if homeURL else ()
def scheduleInboxURL(self):
return self._homeChildURL("inbox/")
def scheduleOutboxURL(self):
return self._homeChildURL("outbox/")
def dropboxURL(self):
if config.EnableDropBox or config.EnableManagedAttachments:
return self._homeChildURL("dropbox/")
else:
return None
def notificationURL(self):
if config.Sharing.Enabled:
return self._homeChildURL("notification/")
else:
return None
def addressBookHomeURLs(self):
if self.record.hasContacts:
homeURL = self._addressBookHomeChildURL(None)
else:
homeURL = ""
return (homeURL,) if homeURL else ()
def _homeChildURL(self, name):
if not hasattr(self, "calendarHomeURL"):
if self.parent.parent.calendarCollection is None:
return None
self.calendarHomeURL = joinURL(
self.parent.parent.calendarCollection.url(),
uidsResourceName,
self.record.uid
) + "/"
# Prefix with other server if needed
if not self.thisServer():
self.calendarHomeURL = joinURL(self.serverURI(), self.calendarHomeURL)
url = self.calendarHomeURL
if url is None:
return None
else:
return joinURL(url, name) if name else url
def calendarHome(self, request):
if self.parent.parent.calendarCollection is not None:
return self.parent.parent.calendarCollection.homeForDirectoryRecord(self.record, request)
else:
return succeed(None)
def _addressBookHomeChildURL(self, name):
if not hasattr(self, "addressBookHomeURL"):
if self.parent.parent.addressBookCollection is None:
return None
self.addressBookHomeURL = joinURL(
self.parent.parent.addressBookCollection.url(),
uidsResourceName,
self.record.uid
) + "/"
# Prefix with other server if needed
if not self.thisServer():
self.addressBookHomeURL = joinURL(self.serverURI(), self.addressBookHomeURL)
url = self.addressBookHomeURL
if url is None:
return None
else:
return joinURL(url, name) if name else url
def addressBookHome(self, request):
if self.parent.parent.addressBookCollection is not None:
return self.parent.parent.addressBookCollection.homeForDirectoryRecord(self.record, request)
else:
return succeed(None)
##
# Static
##
def getChild(self, name):
if name == "":
return succeed(self)
if config.EnableProxyPrincipals and name in (
"calendar-proxy-read", "calendar-proxy-write",
"calendar-proxy-read-for", "calendar-proxy-write-for",
):
# name is required to be str
from twistedcaldav.directory.calendaruserproxy import (
CalendarUserProxyPrincipalResource
)
return succeed(CalendarUserProxyPrincipalResource(self, str(name)))
else:
return succeed(None)
def listChildren(self):
if config.EnableProxyPrincipals:
return (
"calendar-proxy-read", "calendar-proxy-write",
)
else:
return ()
##
# Utilities
##
authReadACL = davxml.ACL(
# Read access for authenticated users.
davxml.ACE(
davxml.Principal(davxml.Authenticated()),
davxml.Grant(davxml.Privilege(davxml.Read())),
davxml.Protected(),
),
)
|
|
"""Functional interface to graph methods and assorted utilities.
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
#
import networkx as nx
import itertools
__author__ = """\n""".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
__all__ = ['nodes', 'edges', 'degree', 'degree_histogram', 'neighbors',
'number_of_nodes', 'number_of_edges', 'density',
'nodes_iter', 'edges_iter', 'is_directed','info',
'freeze','is_frozen','subgraph','create_empty_copy',
'set_node_attributes','get_node_attributes',
'set_edge_attributes','get_edge_attributes',
'all_neighbors','non_neighbors']
def nodes(G):
"""Return a copy of the graph nodes in a list."""
return G.nodes()
def nodes_iter(G):
"""Return an iterator over the graph nodes."""
return G.nodes_iter()
def edges(G,nbunch=None):
"""Return list of edges adjacent to nodes in nbunch.
Return all edges if nbunch is unspecified or nbunch=None.
For digraphs, edges=out_edges
"""
return G.edges(nbunch)
def edges_iter(G,nbunch=None):
"""Return iterator over edges adjacent to nodes in nbunch.
Return all edges if nbunch is unspecified or nbunch=None.
For digraphs, edges=out_edges
"""
return G.edges_iter(nbunch)
def degree(G,nbunch=None,weight=None):
"""Return degree of single node or of nbunch of nodes.
If nbunch is ommitted, then return degrees of *all* nodes.
"""
return G.degree(nbunch,weight)
def neighbors(G,n):
"""Return a list of nodes connected to node n. """
return G.neighbors(n)
def number_of_nodes(G):
"""Return the number of nodes in the graph."""
return G.number_of_nodes()
def number_of_edges(G):
"""Return the number of edges in the graph. """
return G.number_of_edges()
def density(G):
r"""Return the density of a graph.
The density for undirected graphs is
.. math::
d = \frac{2m}{n(n-1)},
and for directed graphs is
.. math::
d = \frac{m}{n(n-1)},
where `n` is the number of nodes and `m` is the number of edges in `G`.
Notes
-----
The density is 0 for a graph without edges and 1 for a complete graph.
The density of multigraphs can be higher than 1.
Self loops are counted in the total number of edges so graphs with self
loops can have density higher than 1.
"""
n=number_of_nodes(G)
m=number_of_edges(G)
if m==0 or n <= 1:
d=0.0
else:
if G.is_directed():
d=m/float(n*(n-1))
else:
d= m*2.0/float(n*(n-1))
return d
def degree_histogram(G):
"""Return a list of the frequency of each degree value.
Parameters
----------
G : Networkx graph
A graph
Returns
-------
hist : list
A list of frequencies of degrees.
The degree values are the index in the list.
Notes
-----
Note: the bins are width one, hence len(list) can be large
(Order(number_of_edges))
"""
degseq=list(G.degree().values())
dmax=max(degseq)+1
freq= [ 0 for d in range(dmax) ]
for d in degseq:
freq[d] += 1
return freq
def is_directed(G):
""" Return True if graph is directed."""
return G.is_directed()
def freeze(G):
"""Modify graph to prevent further change by adding or removing
nodes or edges.
Node and edge data can still be modified.
Parameters
-----------
G : graph
A NetworkX graph
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([0,1,2,3])
>>> G=nx.freeze(G)
>>> try:
... G.add_edge(4,5)
... except nx.NetworkXError as e:
... print(str(e))
Frozen graph can't be modified
Notes
-----
To "unfreeze" a graph you must make a copy by creating a new graph object:
>>> graph = nx.path_graph(4)
>>> frozen_graph = nx.freeze(graph)
>>> unfrozen_graph = nx.Graph(frozen_graph)
>>> nx.is_frozen(unfrozen_graph)
False
See Also
--------
is_frozen
"""
def frozen(*args):
raise nx.NetworkXError("Frozen graph can't be modified")
G.add_node=frozen
G.add_nodes_from=frozen
G.remove_node=frozen
G.remove_nodes_from=frozen
G.add_edge=frozen
G.add_edges_from=frozen
G.remove_edge=frozen
G.remove_edges_from=frozen
G.clear=frozen
G.frozen=True
return G
def is_frozen(G):
"""Return True if graph is frozen.
Parameters
-----------
G : graph
A NetworkX graph
See Also
--------
freeze
"""
try:
return G.frozen
except AttributeError:
return False
def subgraph(G, nbunch):
"""Return the subgraph induced on nodes in nbunch.
Parameters
----------
G : graph
A NetworkX graph
nbunch : list, iterable
A container of nodes that will be iterated through once (thus
it should be an iterator or be iterable). Each element of the
container should be a valid node type: any hashable type except
None. If nbunch is None, return all edges data in the graph.
Nodes in nbunch that are not in the graph will be (quietly)
ignored.
Notes
-----
subgraph(G) calls G.subgraph()
"""
return G.subgraph(nbunch)
def create_empty_copy(G,with_nodes=True):
"""Return a copy of the graph G with all of the edges removed.
Parameters
----------
G : graph
A NetworkX graph
with_nodes : bool (default=True)
Include nodes.
Notes
-----
Graph, node, and edge data is not propagated to the new graph.
"""
H=G.__class__()
if with_nodes:
H.add_nodes_from(G)
return H
def info(G, n=None):
"""Print short summary of information for the graph G or the node n.
Parameters
----------
G : Networkx graph
A graph
n : node (any hashable)
A node in the graph G
"""
info='' # append this all to a string
if n is None:
info+="Name: %s\n"%G.name
type_name = [type(G).__name__]
info+="Type: %s\n"%",".join(type_name)
info+="Number of nodes: %d\n"%G.number_of_nodes()
info+="Number of edges: %d\n"%G.number_of_edges()
nnodes=G.number_of_nodes()
if len(G) > 0:
if G.is_directed():
info+="Average in degree: %8.4f\n"%\
(sum(G.in_degree().values())/float(nnodes))
info+="Average out degree: %8.4f"%\
(sum(G.out_degree().values())/float(nnodes))
else:
s=sum(G.degree().values())
info+="Average degree: %8.4f"%\
(float(s)/float(nnodes))
else:
if n not in G:
raise nx.NetworkXError("node %s not in graph"%(n,))
info+="Node % s has the following properties:\n"%n
info+="Degree: %d\n"%G.degree(n)
info+="Neighbors: "
info+=' '.join(str(nbr) for nbr in G.neighbors(n))
return info
def set_node_attributes(G,name,attributes):
"""Set node attributes from dictionary of nodes and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
attributes: dict
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.path_graph(3)
>>> bb=nx.betweenness_centrality(G)
>>> nx.set_node_attributes(G,'betweenness',bb)
>>> G.node[1]['betweenness']
1.0
"""
for node,value in attributes.items():
G.node[node][name]=value
def get_node_attributes(G,name):
"""Get node attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_nodes_from([1,2,3],color='red')
>>> color=nx.get_node_attributes(G,'color')
>>> color[1]
'red'
"""
return dict( (n,d[name]) for n,d in G.node.items() if name in d)
def set_edge_attributes(G,name,attributes):
"""Set edge attributes from dictionary of edge tuples and values
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
attributes: dict
Dictionary of attributes keyed by edge (tuple).
Examples
--------
>>> G=nx.path_graph(3)
>>> bb=nx.edge_betweenness_centrality(G, normalized=False)
>>> nx.set_edge_attributes(G,'betweenness',bb)
>>> G[1][2]['betweenness']
2.0
"""
for (u,v),value in attributes.items():
G[u][v][name]=value
def get_edge_attributes(G,name):
"""Get edge attributes from graph
Parameters
----------
G : NetworkX Graph
name : string
Attribute name
Returns
-------
Dictionary of attributes keyed by node.
Examples
--------
>>> G=nx.Graph()
>>> G.add_path([1,2,3],color='red')
>>> color=nx.get_edge_attributes(G,'color')
>>> color[(1,2)]
'red'
"""
return dict( ((u,v),d[name]) for u,v,d in G.edges(data=True) if name in d)
def all_neighbors(graph, node):
""" Returns all of the neighbors of a node in the graph.
If the graph is directed returns predecessors as well as successors.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
neighbors : iterator
Iterator of neighbors
"""
if graph.is_directed():
values = itertools.chain.from_iterable([graph.predecessors_iter(node),
graph.successors_iter(node)])
else:
values = graph.neighbors_iter(node)
return values
def non_neighbors(graph, node):
"""Returns the non-neighbors of the node in the graph.
Parameters
----------
graph : NetworkX graph
Graph to find neighbors.
node : node
The node whose neighbors will be returned.
Returns
-------
non_neighbors : iterator
Iterator of nodes in the graph that are not neighbors of the node.
"""
nbors = set(neighbors(graph, node)) | set([node])
return (nnode for nnode in graph if nnode not in nbors)
|
|
import os
import stat
import shutil
import tempfile
from colorama import init, Fore, Back, Style
from textwrap import dedent
import yaml
import jsonschema
import subprocess
from trackhub import Track, default_hub, CompositeTrack, ViewTrack
from trackhub.upload import upload_hub, upload_track, upload_file
from hubward import utils, liftover
from hubward.log import log
class Data(object):
def __init__(self, obj, reldir):
"""
Represents a single track destined for upload to UCSC as part of
a track hub.
Parameters
----------
obj : dict
One entry from the `tracks` list in the metadata.yaml file
reldir : str
The directory name of the metadata file. All paths within the
metadata file are assumed to be relative to `reldir`.
"""
self.obj = obj
self.reldir = reldir
self.original = os.path.join(reldir, obj['original'])
self.source_url = obj['source']['url']
self.source_fn = os.path.join(reldir, 'raw-data', obj['source']['fn'])
self.processed = os.path.join(reldir, obj['processed'])
self.description = obj.get('description', "")
self.label = obj['short_label']
self.obj.setdefault('long_label', self.label)
self.type_ = obj['type']
self.genome = obj['genome']
self.script = os.path.join(reldir, obj['script'])
self.trackinfo = obj.get('trackinfo', {})
def __str__(self):
return yaml.dump(self.obj)
def _needs_download(self):
if not os.path.exists(self.original):
return True
def _was_lifted_over(self):
if os.path.exists(os.path.join(self.reldir, 'ORIGINAL-STUDY')):
return True
def _download(self):
"""
Downloads and unpacks the source to `raw-data`.
After doing so, if self.original still does not exist, then raises
a ValueError.
"""
log(
"Downloading '%s' -> '%s'" %
(self.source_url, self.source_fn), indent=4)
if not os.path.exists(os.path.dirname(self.source_fn)):
os.makedirs(os.path.dirname(self.source_fn))
utils.download(self.source_url, self.source_fn)
utils.unpack(self.source_fn, os.path.dirname(self.source_fn))
if self._needs_download():
raise ValueError(
"Downloading and unpacking '%s' did not result in '%s'"
% (self.source_url, self.source_fn))
def _needs_update(self):
"""
Decides if we need to update the processed file.
"""
do_update = False
if self._was_lifted_over():
log(
"This file appears to have been lifted over from another "
"study, in which case we assume it does not need updating",
style=Fore.YELLOW
)
return False
if self._needs_download():
log("{0.original} does not exist; downloading"
.format(self, indent=4))
self._download()
do_update = True
if not os.path.exists(self.processed):
log("{0.processed} does not exist".format(self), indent=4)
do_update = True
# if processed is a link, then check the LINK time
if (
os.path.exists(self.processed) and
utils.link_is_newer(self.script, self.processed)
):
log("{0.script} is newer than {0.processed}, need to re-run"
.format(self), indent=4)
do_update = True
# but for the original data, we want to FOLLOW the link
if (
os.path.exists(self.original) and
os.path.exists(self.processed) and
utils.is_newer(self.original, self.processed)
):
log("{0.original} is newer than {0.processed}, need to re-run"
.format(self), indent=4)
do_update = True
if not do_update:
log("{0.processed} is up to date"
.format(self), indent=4, style=Style.DIM)
return do_update
def process(self):
"""
Run the conversion script if the output needs updating.
"""
# Note: _needs_update() does the logging.
if not self._needs_update():
return
if not os.path.exists(self.script):
raise ValueError(
"Processing script {0.script} does not exist".format(self))
if not (stat.S_IXUSR & os.stat(self.script)[stat.ST_MODE]):
raise ValueError(
Fore.RED +
"Processing script {0.script} not executable".format(self) +
Fore.RESET)
utils.makedirs(os.path.dirname(self.processed))
cmds = [
self.script,
self.original,
self.processed
]
retval = subprocess.check_call(cmds)
if self._needs_update():
raise ValueError(
Fore.RED + 'The following command did not update '
'{1}:\n\n{0}\n'.format(' \\\n'.join(cmds), self.processed) +
Fore.RESET
)
def _needs_liftover(self, from_assembly, to_assembly, newfile):
"""
Checks to see if liftover is needed based on the
"""
# Sentinel file encodes assembly conversion;
sentinel = self._liftover_sentinel(from_assembly, to_assembly, newfile)
if not os.path.exists(sentinel):
return True
elif utils.is_newer(self.processed, newfile):
return True
return False
def _liftover_sentinel(self, from_assembly, to_assembly, newfile):
"""
Returns the name of a hidden, empty file used to indicate that
a liftover has been performed.
"""
return os.path.join(
os.path.dirname(newfile),
'.{0}-to-{1}.' +
os.path.basename(newfile)
).format(from_assembly, to_assembly)
def liftover(self, from_assembly, to_assembly, newfile):
"""
Lifts over the processed file to a new file, but only if needed.
Uses a hidden sentinel file to indicate whether it's been lifted over.
Parameters
----------
from_assembly : str
Existing data are in this assembly's coordinates
to_assembly : str
Lift over existing data to this assembly's coordinates
newfile : str
Target filename of the lifted-over data
"""
if not from_assembly == self.genome:
log(
"{0} not from assembly {1}. Skipping liftover from {1} to {2} "
"and simply copying the file as-is to {3}"
.format(self.label, from_assembly, to_assembly, newfile)
)
shutil.copy(self.processed, newfile)
if not self._needs_liftover(from_assembly, to_assembly, newfile):
log("{0} is already lifted over and up-to-date. Skipping."
.format(newfile))
return
tmp = tempfile.NamedTemporaryFile(delete=False).name
log("Lift over {0} to {1}".format(self.processed, tmp))
liftover.liftover(
from_assembly, to_assembly, self.processed, tmp, self.type_)
utils.makedirs(os.path.dirname(newfile))
log("Moving {0} to {1}".format(tmp, newfile))
shutil.move(tmp, newfile)
if self.type_.lower() == 'bam':
shutil.move(tmp + '.bai', newfile + '.bai')
# CrossMap.py seems to `chmod go-rw` on lifted-over file. So we copy
# permissions from the original one.
shutil.copymode(self.processed, newfile)
# Write the sentinel file to indicate genome we lifted over to.
sentinel = self._liftover_sentinel(from_assembly, to_assembly, newfile)
with open(sentinel, 'w') as fout:
pass
class Study(object):
def __init__(self, dirname):
"""
Represents a single metadata.yaml file.
Parameters
----------
fn : filename of YAML- or JSON-formatted config file
"""
self.dirname = dirname
self._build_metadata()
fn = os.path.join(self.dirname, 'metadata.yaml')
if not os.path.exists(fn):
raise ValueError("Can't find {0}".format(fn))
self.metadata = yaml.load(open(fn))
schema = yaml.load(utils.get_resource('metadata_schema.yaml'))
jsonschema.validate(self.metadata, schema)
self.study = self.metadata['study']
self.label = self.metadata['study']['label']
self.study.setdefault('short_label', self.label)
self.study.setdefault('long_label', self.study['short_label'])
self.study['PMID'] = str(self.study.get('PMID', ''))
self.tracks = [Data(d, self.dirname) for d in self.metadata['tracks']]
# If description is blank or missing, fill in the contents of the
# README.
if not self.study.get('description', ''):
readme = self._find_readme()
if readme:
self.study['description'] = open(readme).read()
def _find_readme(self):
contents = os.listdir(self.dirname)
valid_readmes = [
'README.rst',
'README',
'readme.rst',
'readme']
if 'ORIGINAL-STUDY' in contents:
prefix = os.path.join(self.dirname, 'ORIGINAL-STUDY')
else:
prefix = self.dirname
for filename in contents:
if filename in valid_readmes:
return os.path.join(prefix, filename)
def __str__(self):
return yaml.dump(self.metadata)
def _was_lifted_over(self):
if os.path.exists(os.path.join(self.dirname, 'ORIGINAL-STUDY')):
return True
def _build_metadata(self):
"""
If metadata-builder.py exists, always run it.
"""
builder = os.path.join(self.dirname, 'metadata-builder.py')
if not os.path.exists(builder):
return
log("{0} exists. Running it...".format(builder))
metadata = os.path.join(self.dirname, 'metadata.yaml')
if os.path.exists(metadata):
backup = os.path.join(self.dirname, 'metadata.yaml.bak')
shutil.copy(metadata, backup)
log("Existing {0} backed up to {1}"
.format(metadata, backup))
if not (stat.S_IXUSR & os.stat(builder)[stat.ST_MODE]):
raise ValueError(
Fore.RED +
"{0} not executable".format(builder) +
Fore.RESET)
cmds = ['./metadata-builder.py']
retval = subprocess.check_call(cmds, cwd=self.dirname)
if not os.path.exists(metadata):
raise ValueError("Expected {0} but was not created by {1}"
.format(metadata, builder))
def process(self, force=False):
log('Study: {0.study[label]}, in "{0.dirname}"'.format(self),
style=Fore.BLUE)
for d in self.tracks:
d.process()
def reference_section(self):
"""
Creates a ReST-formatted reference section to be appended to the end of
the documentation for the composite track config page.
If no configured reference or PMID, then return an empty string.
"""
reference = self.study.get('reference', "")
# Allow "0001111", "PMID:0001111", "PMID: 0001111"
pmid = self.study.get('PMID', "").split(':')[-1].strip()
if not (reference or pmid):
return ""
if pmid:
pmid = 'http://www.ncbi.nlm.nih.gov/pubmed/{0}'.format(pmid)
return dedent(
"""
Reference
---------
{0}
{1}
""").format(reference, pmid)
def composite_track(self):
"""
Create a composite track ready to be added to a trackhub.TrackDb
instance.
"""
bigwigs = [i for i in self.tracks if i.type_ == 'bigwig']
bigbeds = [i for i in self.tracks if i.type_ == 'bigbed']
bams = [i for i in self.tracks if i.type_ == 'bam']
# Build the HTML docs
last_section = self.reference_section()
html_string = utils.reST_to_html(
self.metadata['study'].get('description', '') + '\n' + last_section)
sanitized_label = utils.sanitize(self.label, strict=True)
# Composite track to hold all subtracks for the study
composite = CompositeTrack(
name=sanitized_label,
short_label=self.study['short_label'],
long_label=self.study['long_label'],
tracktype='bigBed',
# Add all the documentation
html_string=html_string)
# If there are any bigWigs defined for this study, make a new "signal"
# subtrack in the composite and then add the bigWigs to it.
#
# Uses the sanitized label for the study to ensure uniqueness among
# tracks.
#
def _add_tracks(data_list, view, default_tracktype):
for data_obj in data_list:
kwargs = data_obj.obj.get('trackinfo', {})
kwargs = dict((k, str(v)) for k, v in kwargs.items())
kwargs.setdefault('tracktype', default_tracktype)
view.add_tracks(
Track(
name=sanitized_label + utils.sanitize(data_obj.label),
short_label=data_obj.label,
long_label=data_obj.obj['long_label'],
local_fn=data_obj.processed,
**kwargs))
if len(bigwigs) > 0:
signal_view = ViewTrack(
name=sanitized_label + 'signalviewtrack',
view=sanitized_label + 'signalview',
short_label=self.label + ' signal view',
long_label=self.label + ' signal view',
visibility='full',
maxHeightPixels='100:25:8',
autoScale='off',
tracktype='bigWig',
)
composite.add_view(signal_view)
_add_tracks(bigwigs, signal_view, 'bigWig')
# Same thing with bigBeds
if len(bigbeds) > 0:
bed_view = ViewTrack(
name=sanitized_label + 'bedviewtrack',
view=sanitized_label + 'bed_view',
short_label=self.label + ' bed view',
long_label=self.label + ' bed view',
visibility='dense',
)
composite.add_view(bed_view)
_add_tracks(bigbeds, bed_view, 'bigBed 9')
# and bams
if len(bams) > 0:
bam_view = ViewTrack(
name=sanitized_label + 'bamviewtrack',
view=sanitized_label + 'bam_view',
short_label=self.label + ' bam view',
long_label=self.label + ' bam view',
visibility='dense',
)
composite.add_view(bam_view)
_add_tracks(bams, bam_view, 'bam')
return composite
class Group(object):
def __init__(self, fn):
self.group = yaml.load(open(fn))
self.filename = fn
self.dirname = os.path.dirname(fn)
self.group.setdefault('short_label', self.group['name'])
self.group.setdefault('long_label', self.group['name'])
schema = yaml.load(utils.get_resource('group_schema.yaml'))
jsonschema.validate(self.group, schema)
self.studies = [
Study(os.path.join(self.dirname, s))
for s in self.group['studies']
]
def process(self):
hub, genomes_file, genome_, trackdb = default_hub(
hub_name=self.group['name'],
genome=self.group['genome'],
short_label=self.group['short_label'],
long_label=self.group['long_label'],
email=self.group['email'],
)
hub.url = self.group['hub_url']
# Process each study, and have it generate its own composite track to
# be added to the trackdb.
for study in self.studies:
study.process()
composite = study.composite_track()
trackdb.add_tracks(composite)
self.hub = hub
self.genomes_file = genomes_file
self.genome_ = genome_
self.trackdb = trackdb
def upload(self, hub_only=False, host=None, user=None, rsync_options=None,
hub_remote=None):
self.process()
if 'server' in self.group:
host = host or self.group['server'].get('host')
user = user or self.group['server'].get('user')
rsync_options = rsync_options or self.group['server'].get('rsync_options')
hub_remote = hub_remote or self.group['server'].get('hub_remote')
self.hub.remote_fn = hub_remote
self.hub.remote_dir = os.path.dirname(hub_remote)
self.hub.render()
if user == '$USER':
user = os.environ.get('USER')
kwargs = dict(host=host, user=user, rsync_options=rsync_options)
upload_hub(hub=self.hub, **kwargs)
if not hub_only:
for track, level in self.hub.leaves(Track):
upload_track(track=track, **kwargs)
log("Hub can now be accessed via {0}"
.format(self.hub.url), style=Fore.BLUE)
|
|
from weakref import ref
from decimal import Decimal
import re
import datetime
import traceback, sys
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.label import Label
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum_vtc.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds
from electrum_vtc import bitcoin
from electrum_vtc.util import timestamp_to_datetime
from electrum_vtc.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from context_menu import ContextMenu
from electrum_vtc_gui.kivy.i18n import _
class EmptyLabel(Factory.Label):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
loaded = False
kvname = None
context_menu = None
menu_actions = []
app = App.get_running_app()
def _change_action_view(self):
app = App.get_running_app()
action_bar = app.root.manager.current_screen.ids.action_bar
_action_view = self.action_view
if (not _action_view) or _action_view.parent:
return
action_bar.clear_widgets()
action_bar.add_widget(_action_view)
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
@profiler
def load_screen(self):
self.screen = Builder.load_file('gui/kivy/uix/ui_screens/' + self.kvname + '.kv')
self.add_widget(self.screen)
self.loaded = True
self.update()
setattr(self.app, self.kvname + '_screen', self)
def on_activate(self):
if self.kvname and not self.loaded:
self.load_screen()
#Clock.schedule_once(lambda dt: self._change_action_view())
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
self.hide_menu()
def hide_menu(self):
if self.context_menu is not None:
self.remove_widget(self.context_menu)
self.context_menu = None
def show_menu(self, obj):
self.hide_menu()
self.context_menu = ContextMenu(obj, self.menu_actions)
self.add_widget(self.context_menu)
TX_ICONS = [
"close",
"close",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
self.menu_actions = [ ('Label', self.label_dialog), ('Details', self.show_tx)]
def show_tx(self, obj):
tx_hash = obj.tx_hash
tx = self.app.wallet.transactions.get(tx_hash)
if not tx:
return
self.app.tx_dialog(tx)
def label_dialog(self, obj):
from dialogs.label_dialog import LabelDialog
key = obj.tx_hash
text = self.app.wallet.get_label(key)
def callback(text):
self.app.wallet.set_label(key, text)
self.update()
d = LabelDialog(_('Enter Transaction Label'), text, callback)
d.open()
def get_card(self, tx_hash, height, conf, timestamp, value, balance):
status, status_str = self.app.wallet.get_tx_status(tx_hash, height, conf, timestamp)
icon = "atlas://gui/kivy/theming/light/" + TX_ICONS[status]
label = self.app.wallet.get_label(tx_hash) if tx_hash else _('Pruned transaction outputs')
date = timestamp_to_datetime(timestamp)
ri = self.cards.get(tx_hash)
if ri is None:
ri = Factory.HistoryItem()
ri.screen = self
ri.tx_hash = tx_hash
self.cards[tx_hash] = ri
ri.icon = icon
ri.date = status_str
ri.message = label
ri.value = value or 0
ri.amount = self.app.format_amount(value, True) if value is not None else '--'
ri.confirmations = conf
if self.app.fiat_unit and date:
rate = self.app.fx.history_rate(date)
if rate:
s = self.app.fx.value_str(value, rate)
ri.quote_text = '' if s is None else s + ' ' + self.app.fiat_unit
return ri
def update(self, see_all=False):
if self.app.wallet is None:
return
history = reversed(self.app.wallet.get_history())
history_card = self.screen.ids.history_container
history_card.clear_widgets()
count = 0
for item in history:
ri = self.get_card(*item)
count += 1
history_card.add_widget(ri)
if count == 0:
msg = _('This screen shows your list of transactions. It is currently empty.')
history_card.add_widget(EmptyLabel(text=msg))
class SendScreen(CScreen):
kvname = 'send'
payment_request = None
def set_URI(self, text):
import electrum_vtc as electrum
try:
uri = electrum.util.parse_URI(text, self.app.on_pr)
except:
self.app.show_info(_("Not a Litecoin URI"))
return
amount = uri.get('amount')
self.screen.address = uri.get('address', '')
self.screen.message = uri.get('message', '')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.payment_request = None
self.screen.is_pr = False
def update(self):
pass
def do_clear(self):
self.screen.amount = ''
self.screen.message = ''
self.screen.address = ''
self.payment_request = None
self.screen.is_pr = False
def set_request(self, pr):
self.screen.address = pr.get_requestor()
amount = pr.get_amount()
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.screen.message = pr.get_memo()
if pr.is_pr():
self.screen.is_pr = True
self.payment_request = pr
else:
self.screen.is_pr = False
self.payment_request = None
def do_save(self):
if not self.screen.address:
return
if self.screen.is_pr:
# it sould be already saved
return
# save address as invoice
from electrum_vtc.paymentrequest import make_unsigned_request, PaymentRequest
req = {'address':self.screen.address, 'memo':self.screen.message}
amount = self.app.get_amount(self.screen.amount) if self.screen.amount else 0
req['amount'] = amount
pr = make_unsigned_request(req).SerializeToString()
pr = PaymentRequest(pr)
self.app.wallet.invoices.add(pr)
self.app.update_tab('invoices')
self.app.show_info(_("Invoice saved"))
if pr.is_pr():
self.screen.is_pr = True
self.payment_request = pr
else:
self.screen.is_pr = False
self.payment_request = None
def do_paste(self):
contents = unicode(self.app._clipboard.paste())
if not contents:
self.app.show_info(_("Clipboard is empty"))
return
self.set_URI(contents)
def do_send(self):
if self.screen.is_pr:
if self.payment_request.has_expired():
self.app.show_error(_('Payment request has expired'))
return
outputs = self.payment_request.get_outputs()
else:
address = str(self.screen.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Litecoin address or a payment request'))
return
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Litecoin Address') + ':\n' + address)
return
try:
amount = self.app.get_amount(self.screen.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.screen.amount)
return
outputs = [(bitcoin.TYPE_ADDRESS, address, amount)]
message = unicode(self.screen.message)
amount = sum(map(lambda x:x[2], outputs))
if self.app.electrum_config.get('use_rbf'):
from dialogs.question import Question
d = Question(_('Should this transaction be replaceable?'), lambda b: self._do_send(amount, message, outputs, b))
d.open()
else:
self._do_send(amount, message, outputs, False)
def _do_send(self, amount, message, outputs, rbf):
# make unsigned transaction
config = self.app.electrum_config
coins = self.app.wallet.get_spendable_coins(None, config)
try:
tx = self.app.wallet.make_unsigned_transaction(coins, outputs, config, None)
except NotEnoughFunds:
self.app.show_error(_("Not enough funds"))
return
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.app.show_error(str(e))
return
if rbf:
tx.set_rbf(True)
fee = tx.get_fee()
msg = [
_("Amount to be sent") + ": " + self.app.format_amount_and_units(amount),
_("Mining fee") + ": " + self.app.format_amount_and_units(fee),
]
if fee >= config.get('confirm_fee', 1000000):
msg.append(_('Warning')+ ': ' + _("The fee for this transaction seems unusually high."))
msg.append(_("Enter your PIN code to proceed"))
self.app.protected('\n'.join(msg), self.send_tx, (tx, message))
def send_tx(self, tx, message, password):
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx, self.payment_request)
self.app.wallet.set_label(tx.hash(), message)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def update(self):
if not self.screen.address:
self.get_new_address()
else:
status = self.app.wallet.get_request_status(self.screen.address)
self.screen.status = _('Payment received') if status == PR_PAID else ''
def clear(self):
self.screen.address = ''
self.screen.amount = ''
self.screen.message = ''
def get_new_address(self):
if not self.app.wallet:
return False
self.clear()
addr = self.app.wallet.get_unused_address()
if addr is None:
addr = self.app.wallet.get_receiving_address() or ''
b = False
else:
b = True
self.screen.address = addr
return b
def on_address(self, addr):
req = self.app.wallet.get_payment_request(addr, self.app.electrum_config)
self.screen.status = ''
if req:
self.screen.message = unicode(req.get('memo', ''))
amount = req.get('amount')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.screen.status = _('Payment received') if status == PR_PAID else ''
Clock.schedule_once(lambda dt: self.update_qr())
def get_URI(self):
from electrum_vtc.util import create_URI
amount = self.screen.amount
if amount:
a, u = self.screen.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_URI(self.screen.address, amount, self.screen.message)
@profiler
def update_qr(self):
uri = self.get_URI()
qr = self.screen.ids.qr
qr.set_data(uri)
def do_share(self):
uri = self.get_URI()
self.app.do_share(uri, _("Share Litecoin Request"))
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def save_request(self):
addr = str(self.screen.address)
amount = str(self.screen.amount)
message = unicode(self.screen.message)
amount = self.app.get_amount(amount) if amount else 0
req = self.app.wallet.make_payment_request(addr, amount, message, None)
self.app.wallet.add_payment_request(req, self.app.electrum_config)
self.app.update_tab('requests')
def on_amount_or_message(self):
self.save_request()
Clock.schedule_once(lambda dt: self.update_qr())
def do_new(self):
addr = self.get_new_address()
if not addr:
self.app.show_info(_('Please use the existing requests first.'))
else:
self.save_request()
self.app.show_info(_('New request added to your list.'))
invoice_text = {
PR_UNPAID:_('Pending'),
PR_UNKNOWN:_('Unknown'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
request_text = {
PR_UNPAID: _('Pending'),
PR_UNKNOWN: _('Unknown'),
PR_PAID: _('Received'),
PR_EXPIRED: _('Expired')
}
pr_icon = {
PR_UNPAID: 'atlas://gui/kivy/theming/light/important',
PR_UNKNOWN: 'atlas://gui/kivy/theming/light/important',
PR_PAID: 'atlas://gui/kivy/theming/light/confirmed',
PR_EXPIRED: 'atlas://gui/kivy/theming/light/close'
}
class InvoicesScreen(CScreen):
kvname = 'invoices'
cards = {}
def get_card(self, pr):
key = pr.get_id()
ci = self.cards.get(key)
if ci is None:
ci = Factory.InvoiceItem()
ci.key = key
ci.screen = self
self.cards[key] = ci
ci.requestor = pr.get_requestor()
ci.memo = pr.get_memo()
amount = pr.get_amount()
if amount:
ci.amount = self.app.format_amount_and_units(amount)
status = self.app.wallet.invoices.get_status(ci.key)
ci.status = invoice_text[status]
ci.icon = pr_icon[status]
else:
ci.amount = _('No Amount')
ci.status = ''
exp = pr.get_expiration_date()
ci.date = format_time(exp) if exp else _('Never')
return ci
def update(self):
self.menu_actions = [('Pay', self.do_pay), ('Details', self.do_view), ('Delete', self.do_delete)]
invoices_list = self.screen.ids.invoices_container
invoices_list.clear_widgets()
_list = self.app.wallet.invoices.sorted_list()
for pr in _list:
ci = self.get_card(pr)
invoices_list.add_widget(ci)
if not _list:
msg = _('This screen shows the list of payment requests that have been sent to you. You may also use it to store contact addresses.')
invoices_list.add_widget(EmptyLabel(text=msg))
def do_pay(self, obj):
pr = self.app.wallet.invoices.get(obj.key)
self.app.on_pr(pr)
def do_view(self, obj):
pr = self.app.wallet.invoices.get(obj.key)
pr.verify(self.app.wallet.contacts)
self.app.show_pr_details(pr.get_dict(), obj.status, True)
def do_delete(self, obj):
from dialogs.question import Question
def cb(result):
if result:
self.app.wallet.invoices.remove(obj.key)
self.app.update_tab('invoices')
d = Question(_('Delete invoice?'), cb)
d.open()
class RequestsScreen(CScreen):
kvname = 'requests'
cards = {}
def get_card(self, req):
address = req['address']
timestamp = req.get('time', 0)
amount = req.get('amount')
expiration = req.get('exp', None)
status = req.get('status')
signature = req.get('sig')
ci = self.cards.get(address)
if ci is None:
ci = Factory.RequestItem()
ci.screen = self
ci.address = address
self.cards[address] = ci
ci.memo = self.app.wallet.get_label(address)
if amount:
status = req.get('status')
ci.status = request_text[status]
else:
received = self.app.wallet.get_addr_received(address)
ci.status = self.app.format_amount_and_units(amount)
ci.icon = pr_icon[status]
ci.amount = self.app.format_amount_and_units(amount) if amount else _('No Amount')
ci.date = format_time(timestamp)
return ci
def update(self):
self.menu_actions = [('Show', self.do_show), ('Details', self.do_view), ('Delete', self.do_delete)]
requests_list = self.screen.ids.requests_container
requests_list.clear_widgets()
_list = self.app.wallet.get_sorted_requests(self.app.electrum_config) if self.app.wallet else []
for req in _list:
ci = self.get_card(req)
requests_list.add_widget(ci)
if not _list:
msg = _('This screen shows the list of payment requests you made.')
requests_list.add_widget(EmptyLabel(text=msg))
def do_show(self, obj):
self.app.show_request(obj.address)
def do_view(self, obj):
req = self.app.wallet.get_payment_request(obj.address, self.app.electrum_config)
status = req.get('status')
amount = req.get('amount')
address = req['address']
if amount:
status = req.get('status')
status = request_text[status]
else:
received_amount = self.app.wallet.get_addr_received(address)
status = self.app.format_amount_and_units(received_amount)
self.app.show_pr_details(req, status, False)
def do_delete(self, obj):
from dialogs.question import Question
def cb(result):
if result:
self.app.wallet.remove_payment_request(obj.address, self.app.electrum_config)
self.update()
d = Question(_('Delete request?'), cb)
d.open()
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
|
|
from pandac import PandaModules as PM
from direct.directnotify import DirectNotifyGlobal
from direct.showbase.PythonUtil import list2dict, uniqueElements
import string
import LevelConstants
import types
if __dev__:
import os
class LevelSpec:
notify = DirectNotifyGlobal.directNotify.newCategory('LevelSpec')
SystemEntIds = (LevelConstants.UberZoneEntId, LevelConstants.LevelMgrEntId, LevelConstants.EditMgrEntId)
def __init__(self, spec = None, scenario = 0):
newSpec = 0
if type(spec) is types.ModuleType:
if __dev__:
reload(spec)
self.specDict = spec.levelSpec
if __dev__:
self.setFilename(spec.__file__)
elif type(spec) is types.DictType:
self.specDict = spec
elif spec is None:
if __dev__:
newSpec = 1
self.specDict = {'globalEntities': {},
'scenarios': [{}]}
self.entId2specDict = {}
self.entId2specDict.update(list2dict(self.getGlobalEntIds(), value=self.privGetGlobalEntityDict()))
for i in range(self.getNumScenarios()):
self.entId2specDict.update(list2dict(self.getScenarioEntIds(i), value=self.privGetScenarioEntityDict(i)))
self.setScenario(scenario)
if __dev__:
if newSpec:
import EntityTypes
import EntityTypeRegistry
etr = EntityTypeRegistry.EntityTypeRegistry(EntityTypes)
self.setEntityTypeReg(etr)
entId = LevelConstants.UberZoneEntId
self.insertEntity(entId, 'zone')
self.doSetAttrib(entId, 'name', 'UberZone')
entId = LevelConstants.LevelMgrEntId
self.insertEntity(entId, 'levelMgr')
self.doSetAttrib(entId, 'name', 'LevelMgr')
entId = LevelConstants.EditMgrEntId
self.insertEntity(entId, 'editMgr')
self.doSetAttrib(entId, 'name', 'EditMgr')
return
def destroy(self):
del self.specDict
del self.entId2specDict
del self.scenario
if hasattr(self, 'level'):
del self.level
if hasattr(self, 'entTypeReg'):
del self.entTypeReg
def getNumScenarios(self):
return len(self.specDict['scenarios'])
def setScenario(self, scenario):
self.scenario = scenario
def getScenario(self):
return self.scenario
def getGlobalEntIds(self):
return self.privGetGlobalEntityDict().keys()
def getScenarioEntIds(self, scenario = None):
if scenario is None:
scenario = self.scenario
return self.privGetScenarioEntityDict(scenario).keys()
def getAllEntIds(self):
return self.getGlobalEntIds() + self.getScenarioEntIds()
def getAllEntIdsFromAllScenarios(self):
entIds = self.getGlobalEntIds()
for scenario in xrange(self.getNumScenarios()):
entIds.extend(self.getScenarioEntIds(scenario))
return entIds
def getEntitySpec(self, entId):
specDict = self.entId2specDict[entId]
return specDict[entId]
def getCopyOfSpec(self, spec):
specCopy = {}
if not isClient():
print 'EXECWARNING LevelSpec exec: %s' % self.getSpecImportsModuleName()
printStack()
exec 'from %s import *' % self.getSpecImportsModuleName()
for key in spec.keys():
specCopy[key] = eval(repr(spec[key]))
return specCopy
def getEntitySpecCopy(self, entId):
specDict = self.entId2specDict[entId]
return self.getCopyOfSpec(specDict[entId])
def getEntityType(self, entId):
return self.getEntitySpec(entId)['type']
def getEntityZoneEntId(self, entId):
spec = self.getEntitySpec(entId)
type = spec['type']
if type == 'zone':
return entId
return self.getEntityZoneEntId(spec['parentEntId'])
def getEntType2ids(self, entIds):
entType2ids = {}
for entId in entIds:
type = self.getEntityType(entId)
entType2ids.setdefault(type, [])
entType2ids[type].append(entId)
return entType2ids
def privGetGlobalEntityDict(self):
return self.specDict['globalEntities']
def privGetScenarioEntityDict(self, scenario):
return self.specDict['scenarios'][scenario]
def printZones(self):
allIds = self.getAllEntIds()
type2id = self.getEntType2ids(allIds)
zoneIds = type2id['zone']
if 0 in zoneIds:
zoneIds.remove(0)
zoneIds.sort()
for zoneNum in zoneIds:
spec = self.getEntitySpec(zoneNum)
print 'zone %s: %s' % (zoneNum, spec['name'])
if __dev__:
def setLevel(self, level):
self.level = level
def hasLevel(self):
return hasattr(self, 'level')
def setEntityTypeReg(self, entTypeReg):
self.entTypeReg = entTypeReg
for entId in self.getAllEntIds():
spec = self.getEntitySpec(entId)
type = self.getEntityType(entId)
typeDesc = self.entTypeReg.getTypeDesc(type)
attribDescDict = typeDesc.getAttribDescDict()
for attribName, desc in attribDescDict.iteritems():
if attribName not in spec:
spec[attribName] = desc.getDefaultValue()
self.checkSpecIntegrity()
def hasEntityTypeReg(self):
return hasattr(self, 'entTypeReg')
def setFilename(self, filename):
self.filename = filename
def doSetAttrib(self, entId, attrib, value):
specDict = self.entId2specDict[entId]
specDict[entId][attrib] = value
def setAttribChange(self, entId, attrib, value, username):
LevelSpec.notify.info('setAttribChange(%s): %s, %s = %s' % (username,
entId,
attrib,
repr(value)))
self.doSetAttrib(entId, attrib, value)
if self.hasLevel():
self.level.handleAttribChange(entId, attrib, value, username)
def insertEntity(self, entId, entType, parentEntId = 'unspecified'):
LevelSpec.notify.info('inserting entity %s (%s)' % (entId, entType))
globalEnts = self.privGetGlobalEntityDict()
self.entId2specDict[entId] = globalEnts
globalEnts[entId] = {}
spec = globalEnts[entId]
attribDescs = self.entTypeReg.getTypeDesc(entType).getAttribDescDict()
for name, desc in attribDescs.items():
spec[name] = desc.getDefaultValue()
spec['type'] = entType
if parentEntId != 'unspecified':
spec['parentEntId'] = parentEntId
if self.hasLevel():
self.level.handleEntityInsert(entId)
else:
LevelSpec.notify.warning('no level to be notified of insertion')
def removeEntity(self, entId):
LevelSpec.notify.info('removing entity %s' % entId)
if self.hasLevel():
self.level.handleEntityRemove(entId)
else:
LevelSpec.notify.warning('no level to be notified of removal')
dict = self.entId2specDict[entId]
del dict[entId]
del self.entId2specDict[entId]
def removeZoneReferences(self, removedZoneNums):
type2ids = self.getEntType2ids(self.getAllEntIdsFromAllScenarios())
for type in type2ids:
typeDesc = self.entTypeReg.getTypeDesc(type)
visZoneListAttribs = typeDesc.getAttribsOfType('visZoneList')
if len(visZoneListAttribs) > 0:
for entId in type2ids[type]:
spec = self.getEntitySpec(entId)
for attribName in visZoneListAttribs:
for zoneNum in removedZoneNums:
while zoneNum in spec[attribName]:
spec[attribName].remove(zoneNum)
def getSpecImportsModuleName(self):
return 'toontown.coghq.SpecImports'
def getFilename(self):
return self.filename
def privGetBackupFilename(self, filename):
return '%s.bak' % filename
def saveToDisk(self, filename = None, makeBackup = 1):
if filename is None:
filename = self.filename
if filename.endswith('.pyc'):
filename = filename.replace('.pyc', '.py')
if makeBackup and self.privFileExists(filename):
try:
backupFilename = self.privGetBackupFilename(filename)
self.privRemoveFile(backupFilename)
os.rename(filename, backupFilename)
except OSError, e:
LevelSpec.notify.warning('error during backup: %s' % str(e))
LevelSpec.notify.info("writing to '%s'" % filename)
self.privRemoveFile(filename)
self.privSaveToDisk(filename)
return
def privSaveToDisk(self, filename):
retval = 1
f = file(filename, 'wb')
try:
f.write(self.getPrettyString())
except IOError:
retval = 0
f.close()
return retval
def privFileExists(self, filename):
try:
os.stat(filename)
return 1
except OSError:
return 0
def privRemoveFile(self, filename):
try:
os.remove(filename)
return 1
except OSError:
return 0
def getPrettyString(self):
import pprint
tabWidth = 4
tab = ' ' * tabWidth
globalEntitiesName = 'GlobalEntities'
scenarioEntitiesName = 'Scenario%s'
topLevelName = 'levelSpec'
def getPrettyEntityDictStr(name, dict, tabs = 0):
def t(n):
return (tabs + n) * tab
def sortList(lst, firstElements = []):
elements = list(lst)
result = []
for el in firstElements:
if el in elements:
result.append(el)
elements.remove(el)
elements.sort()
result.extend(elements)
return result
firstTypes = ('levelMgr', 'editMgr', 'zone')
firstAttribs = ('type', 'name', 'comment', 'parentEntId', 'pos', 'x', 'y', 'z', 'hpr', 'h', 'p', 'r', 'scale', 'sx', 'sy', 'sz', 'color', 'model')
str = t(0) + '%s = {\n' % name
entIds = dict.keys()
entType2ids = self.getEntType2ids(entIds)
types = sortList(entType2ids.keys(), firstTypes)
for type in types:
str += t(1) + '# %s\n' % string.upper(type)
entIds = entType2ids[type]
entIds.sort()
for entId in entIds:
str += t(1) + '%s: {\n' % entId
spec = dict[entId]
attribs = sortList(spec.keys(), firstAttribs)
for attrib in attribs:
str += t(2) + "'%s': %s,\n" % (attrib, repr(spec[attrib]))
str += t(2) + '}, # end entity %s\n' % entId
str += t(1) + '}\n'
return str
def getPrettyTopLevelDictStr(tabs = 0):
def t(n):
return (tabs + n) * tab
str = t(0) + '%s = {\n' % topLevelName
str += t(1) + "'globalEntities': %s,\n" % globalEntitiesName
str += t(1) + "'scenarios': [\n"
for i in range(self.getNumScenarios()):
str += t(2) + '%s,\n' % (scenarioEntitiesName % i)
str += t(2) + '],\n'
str += t(1) + '}\n'
return str
str = 'from %s import *\n' % self.getSpecImportsModuleName()
str += '\n'
str += getPrettyEntityDictStr('GlobalEntities', self.privGetGlobalEntityDict())
str += '\n'
numScenarios = self.getNumScenarios()
for i in range(numScenarios):
str += getPrettyEntityDictStr('Scenario%s' % i, self.privGetScenarioEntityDict(i))
str += '\n'
str += getPrettyTopLevelDictStr()
self.testPrettyString(prettyString=str)
return str
def _recurKeyTest(self, dict1, dict2):
s = ''
errorCount = 0
if set(dict1.keys()) != set(dict2.keys()):
return 0
for key in dict1:
if type(dict1[key]) == type({}) and type(dict2[key]) == type({}):
if not self._recurKeyTest(dict1[key], dict2[key]):
return 0
else:
strd1 = repr(dict1[key])
strd2 = repr(dict2[key])
if strd1 != strd2:
s += '\nBAD VALUE(%s): %s != %s\n' % (key, strd1, strd2)
errorCount += 1
print s
if errorCount == 0:
return 1
else:
return 0
def testPrettyString(self, prettyString = None):
if prettyString is None:
prettyString = self.getPrettyString()
if not isClient():
print 'EXECWARNING LevelSpec exec 2: %s' % prettyString
printStack()
exec prettyString
if self._recurKeyTest(levelSpec, self.specDict):
return 1
return
def checkSpecIntegrity(self):
entIds = self.getGlobalEntIds()
entIds = list2dict(entIds)
for i in range(self.getNumScenarios()):
for id in self.getScenarioEntIds(i):
entIds[id] = None
if self.entTypeReg is not None:
allEntIds = entIds
for entId in allEntIds:
spec = self.getEntitySpec(entId)
entType = spec['type']
typeDesc = self.entTypeReg.getTypeDesc(entType)
attribNames = typeDesc.getAttribNames()
attribDescs = typeDesc.getAttribDescDict()
for attrib in spec.keys():
if attrib not in attribNames:
LevelSpec.notify.warning("entId %s (%s): unknown attrib '%s', omitting" % (entId, spec['type'], attrib))
del spec[attrib]
for attribName in attribNames:
if not spec.has_key(attribName):
LevelSpec.notify.warning("entId %s (%s): missing attrib '%s'" % (entId, spec['type'], attribName))
return
def stringHash(self):
h = PM.HashVal()
h.hashString(repr(self))
return h.asHex()
def __hash__(self):
return hash(repr(self))
def __str__(self):
return 'LevelSpec'
def __repr__(self):
return 'LevelSpec(%s, scenario=%s)' % (repeatableRepr(self.specDict), repeatableRepr(self.scenario))
|
|
import time
import struct
from openbci.utils.constants import Constants as k
class ParseRaw(object):
def __init__(self,
board_type=k.BOARD_CYTON,
gains=None,
log=False,
micro_volts=False,
scaled_output=True):
self.board_type = board_type
self.gains = gains
self.log = log
self.micro_volts = micro_volts
self.scale_factors = []
self.scaled_output = scaled_output
if gains is not None:
self.scale_factors = self.get_ads1299_scale_factors(self.gains, self.micro_volts)
self.raw_data_to_sample = RawDataToSample(gains=gains,
scale=scaled_output,
scale_factors=self.scale_factors,
verbose=log)
def is_stop_byte(self, byte):
"""
Used to check and see if a byte adheres to the stop byte structure
of 0xCx where x is the set of numbers from 0-F in hex of 0-15 in decimal.
:param byte: {int} - The number to test
:return: {boolean} - True if `byte` follows the correct form
"""
return (byte & 0xF0) == k.RAW_BYTE_STOP
def get_ads1299_scale_factors(self, gains, micro_volts=None):
out = []
for gain in gains:
scale_factor = k.ADS1299_VREF / float((pow(2, 23) - 1)) / float(gain)
if micro_volts is None:
if self.micro_volts:
scale_factor *= 1000000.
else:
if micro_volts:
scale_factor *= 1000000.
out.append(scale_factor)
return out
def get_channel_data_array(self, raw_data_to_sample):
"""
:param raw_data_to_sample: RawDataToSample
:return:
"""
channel_data = []
number_of_channels = len(raw_data_to_sample.scale_factors)
daisy = number_of_channels == k.NUMBER_OF_CHANNELS_DAISY
channels_in_packet = k.NUMBER_OF_CHANNELS_CYTON
if not daisy:
channels_in_packet = number_of_channels
# Channel data arrays are always 8 long
for i in range(channels_in_packet):
counts = self.interpret_24_bit_as_int_32(
raw_data_to_sample.raw_data_packet[
(i * 3) +
k.RAW_PACKET_POSITION_CHANNEL_DATA_START:(i * 3) +
k.RAW_PACKET_POSITION_CHANNEL_DATA_START + 3
]
)
channel_data.append(
raw_data_to_sample.scale_factors[i] *
counts if raw_data_to_sample.scale else counts
)
return channel_data
def get_data_array_accel(self, raw_data_to_sample):
accel_data = []
for i in range(k.RAW_PACKET_ACCEL_NUMBER_AXIS):
counts = self.interpret_16_bit_as_int_32(
raw_data_to_sample.raw_data_packet[
k.RAW_PACKET_POSITION_START_AUX +
(i * 2): k.RAW_PACKET_POSITION_START_AUX + (i * 2) + 2])
accel_data.append(k.CYTON_ACCEL_SCALE_FACTOR_GAIN *
counts if raw_data_to_sample.scale else counts)
return accel_data
def get_raw_packet_type(self, stop_byte):
return stop_byte & 0xF
def interpret_16_bit_as_int_32(self, two_byte_buffer):
return struct.unpack('>h', two_byte_buffer)[0]
def interpret_24_bit_as_int_32(self, three_byte_buffer):
# 3 byte ints
unpacked = struct.unpack('3B', three_byte_buffer)
# 3byte int in 2s compliment
if unpacked[0] > 127:
pre_fix = bytes(bytearray.fromhex('FF'))
else:
pre_fix = bytes(bytearray.fromhex('00'))
three_byte_buffer = pre_fix + three_byte_buffer
# unpack little endian(>) signed integer(i) (makes unpacking platform independent)
return struct.unpack('>i', three_byte_buffer)[0]
def parse_packet_standard_accel(self, raw_data_to_sample):
"""
:param raw_data_to_sample: RawDataToSample
:return:
"""
# Check to make sure data is not null.
if raw_data_to_sample is None:
raise RuntimeError(k.ERROR_UNDEFINED_OR_NULL_INPUT)
if raw_data_to_sample.raw_data_packet is None:
raise RuntimeError(k.ERROR_UNDEFINED_OR_NULL_INPUT)
# Check to make sure the buffer is the right size.
if len(raw_data_to_sample.raw_data_packet) != k.RAW_PACKET_SIZE:
raise RuntimeError(k.ERROR_INVALID_BYTE_LENGTH)
# Verify the correct stop byte.
if raw_data_to_sample.raw_data_packet[0] != k.RAW_BYTE_START:
raise RuntimeError(k.ERROR_INVALID_BYTE_START)
sample_object = OpenBCISample()
sample_object.accel_data = self.get_data_array_accel(raw_data_to_sample)
sample_object.channel_data = self.get_channel_data_array(raw_data_to_sample)
sample_object.sample_number = raw_data_to_sample.raw_data_packet[
k.RAW_PACKET_POSITION_SAMPLE_NUMBER
]
sample_object.start_byte = raw_data_to_sample.raw_data_packet[
k.RAW_PACKET_POSITION_START_BYTE
]
sample_object.stop_byte = raw_data_to_sample.raw_data_packet[
k.RAW_PACKET_POSITION_STOP_BYTE
]
sample_object.valid = True
now_ms = int(round(time.time() * 1000))
sample_object.timestamp = now_ms
sample_object.boardTime = 0
return sample_object
def parse_packet_standard_raw_aux(self, raw_data_to_sample):
pass
def parse_packet_time_synced_accel(self, raw_data_to_sample):
pass
def parse_packet_time_synced_raw_aux(self, raw_data_to_sample):
pass
def set_ads1299_scale_factors(self, gains, micro_volts=None):
self.scale_factors = self.get_ads1299_scale_factors(gains, micro_volts=micro_volts)
def transform_raw_data_packet_to_sample(self, raw_data):
"""
Used transform raw data packets into fully qualified packets
:param raw_data:
:return:
"""
try:
self.raw_data_to_sample.raw_data_packet = raw_data
packet_type = self.get_raw_packet_type(raw_data[k.RAW_PACKET_POSITION_STOP_BYTE])
if packet_type == k.RAW_PACKET_TYPE_STANDARD_ACCEL:
sample = self.parse_packet_standard_accel(self.raw_data_to_sample)
elif packet_type == k.RAW_PACKET_TYPE_STANDARD_RAW_AUX:
sample = self.parse_packet_standard_raw_aux(self.raw_data_to_sample)
elif packet_type == k.RAW_PACKET_TYPE_ACCEL_TIME_SYNC_SET or \
packet_type == k.RAW_PACKET_TYPE_ACCEL_TIME_SYNCED:
sample = self.parse_packet_time_synced_accel(self.raw_data_to_sample)
elif packet_type == k.RAW_PACKET_TYPE_RAW_AUX_TIME_SYNC_SET or \
packet_type == k.RAW_PACKET_TYPE_RAW_AUX_TIME_SYNCED:
sample = self.parse_packet_time_synced_raw_aux(self.raw_data_to_sample)
else:
sample = OpenBCISample()
sample.error = 'This module does not support packet type %d' % packet_type
sample.valid = False
sample.packet_type = packet_type
except BaseException as e:
sample = OpenBCISample()
if hasattr(e, 'message'):
sample.error = e.message
else:
sample.error = e
sample.valid = False
return sample
def make_daisy_sample_object_wifi(self, lower_sample_object, upper_sample_object):
"""
/**
* @description Used to make one sample object from two sample
* objects. The sample number of the new daisy sample will be the
* upperSampleObject's sample number divded by 2. This allows us
* to preserve consecutive sample numbers that flip over at 127
* instead of 255 for an 8 channel. The daisySampleObject will
* also have one `channelData` array with 16 elements inside it,
* with the lowerSampleObject in the lower indices and the
* upperSampleObject in the upper set of indices. The auxData from
* both channels shall be captured in an object called `auxData`
* which contains two arrays referenced by keys `lower` and
* `upper` for the `lowerSampleObject` and `upperSampleObject`,
* respectively. The timestamps shall be averaged and moved into
* an object called `timestamp`. Further, the un-averaged
* timestamps from the `lowerSampleObject` and `upperSampleObject`
* shall be placed into an object called `_timestamps` which shall
* contain two keys `lower` and `upper` which contain the original
* timestamps for their respective sampleObjects.
* @param lowerSampleObject {Object} - Lower 8 channels with odd sample number
* @param upperSampleObject {Object} - Upper 8 channels with even sample number
* @returns {Object} - The new merged daisy sample object
*/
"""
daisy_sample_object = OpenBCISample()
if lower_sample_object.channel_data is not None:
daisy_sample_object.channel_data = lower_sample_object.channel_data + \
upper_sample_object.channel_data
daisy_sample_object.sample_number = upper_sample_object.sample_number
daisy_sample_object.id = daisy_sample_object.sample_number
daisy_sample_object.aux_data = {
'lower': lower_sample_object.aux_data,
'upper': upper_sample_object.aux_data
}
if lower_sample_object.timestamp:
daisy_sample_object.timestamp = lower_sample_object.timestamp
daisy_sample_object.stop_byte = lower_sample_object.stop_byte
daisy_sample_object._timestamps = {
'lower': lower_sample_object.timestamp,
'upper': upper_sample_object.timestamp
}
if lower_sample_object.accel_data:
if lower_sample_object.accel_data[0] > 0 or lower_sample_object.accel_data[1] > 0 or \
lower_sample_object.accel_data[2] > 0:
daisy_sample_object.accel_data = lower_sample_object.accel_data
else:
daisy_sample_object.accel_data = upper_sample_object.accel_data
daisy_sample_object.valid = True
return daisy_sample_object
"""
/**
* @description Used transform raw data packets into fully qualified packets
* @param o {RawDataToSample} - Used to hold data and configuration settings
* @return {Array} samples An array of {Sample}
* @author AJ Keller (@aj-ptw)
*/
function transformRawDataPacketsToSample (o) {
let samples = [];
for (let i = 0; i < o.rawDataPackets.length; i++) {
o.rawDataPacket = o.rawDataPackets[i];
const sample = transformRawDataPacketToSample(o);
samples.push(sample);
if (sample.hasOwnProperty('sampleNumber')) {
o['lastSampleNumber'] = sample.sampleNumber;
} else if (!sample.hasOwnProperty('impedanceValue')) {
o['lastSampleNumber'] = o.rawDataPacket[k.OBCIPacketPositionSampleNumber];
}
}
return samples;
}
"""
def transform_raw_data_packets_to_sample(self, raw_data_packets):
samples = []
for raw_data_packet in raw_data_packets:
sample = self.transform_raw_data_packet_to_sample(raw_data_packet)
samples.append(sample)
self.raw_data_to_sample.last_sample_number = sample.sample_number
return samples
class RawDataToSample(object):
"""Object encapulsating a parsing object."""
def __init__(self,
accel_data=None,
gains=None,
last_sample_number=0,
raw_data_packets=None,
raw_data_packet=None,
scale=True,
scale_factors=None,
time_offset=0,
verbose=False):
"""
RawDataToSample
:param accel_data: list
The channel settings array
:param gains: list
The gains of each channel, this is used to derive number of channels
:param last_sample_number: int
:param raw_data_packets: list
list of raw_data_packets
:param raw_data_packet: bytearray
A single raw data packet
:param scale: boolean
Default `true`. A gain of 24 for Cyton will be used and 51 for ganglion by default.
:param scale_factors: list
Calculated scale factors
:param time_offset: int
For non time stamp use cases i.e. 0xC0 or 0xC1 (default and raw aux)
:param verbose:
"""
self.accel_data = accel_data if accel_data is not None else []
self.gains = gains if gains is not None else []
self.time_offset = time_offset
self.last_sample_number = last_sample_number
self.raw_data_packets = raw_data_packets if raw_data_packets is not None else []
self.raw_data_packet = raw_data_packet
self.scale = scale
self.scale_factors = scale_factors if scale_factors is not None else []
self.verbose = verbose
class OpenBCISample(object):
"""Object encapulsating a single sample from the OpenBCI board."""
def __init__(self,
aux_data=None,
board_time=0,
channel_data=None,
error=None,
imp_data=None,
packet_type=k.RAW_PACKET_TYPE_STANDARD_ACCEL,
protocol=k.PROTOCOL_WIFI,
sample_number=0,
start_byte=0,
stop_byte=0,
valid=True,
accel_data=None):
self.aux_data = aux_data if aux_data is not None else []
self.board_time = board_time
self.channel_data = channel_data if aux_data is not None else []
self.error = error
self.id = sample_number
self.imp_data = imp_data if aux_data is not None else []
self.packet_type = packet_type
self.protocol = protocol
self.sample_number = sample_number
self.start_byte = start_byte
self.stop_byte = stop_byte
self.timestamp = 0
self._timestamps = {}
self.valid = valid
self.accel_data = accel_data if accel_data is not None else []
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import mock
from neutron.db import rbac_db_models
from neutron.objects import base as obj_base
from neutron.objects import network
from neutron.objects.qos import binding
from neutron.objects.qos import policy
from neutron.tests.unit.objects import test_base as obj_test_base
from neutron.tests.unit import testlib_api
class _NetworkRBACBase(object):
def get_random_object_fields(self, obj_cls=None):
fields = (super(_NetworkRBACBase, self).
get_random_object_fields(obj_cls))
rnd_actions = self._test_class.db_model.get_valid_actions()
idx = random.randint(0, len(rnd_actions) - 1)
fields['action'] = rnd_actions[idx]
return fields
class NetworkRBACDbObjectTestCase(_NetworkRBACBase,
obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = network.NetworkRBAC
def setUp(self):
self._mock_get_valid_actions = mock.patch.object(
rbac_db_models.NetworkRBAC, 'get_valid_actions',
return_value=(rbac_db_models.ACCESS_EXTERNAL,
rbac_db_models.ACCESS_SHARED))
self.mock_get_valid_actions = self._mock_get_valid_actions.start()
super(NetworkRBACDbObjectTestCase, self).setUp()
for obj in self.db_objs:
net_obj = network.Network(self.context, id=obj['object_id'])
net_obj.create()
def _create_test_network_rbac(self):
self.objs[0].create()
return self.objs[0]
def test_object_version_degradation_1_1_to_1_0_no_id_no_project_id(self):
network_rbac_obj = self._create_test_network_rbac()
network_rbac_obj = network_rbac_obj.obj_to_primitive('1.0')
self.assertNotIn('project_id',
network_rbac_obj['versioned_object.data'])
self.assertNotIn('id', network_rbac_obj['versioned_object.data'])
class NetworkRBACIfaceOjectTestCase(_NetworkRBACBase,
obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.NetworkRBAC
def setUp(self):
self._mock_get_valid_actions = mock.patch.object(
rbac_db_models.NetworkRBAC, 'get_valid_actions',
return_value=(rbac_db_models.ACCESS_EXTERNAL,
rbac_db_models.ACCESS_SHARED))
self.mock_get_valid_actions = self._mock_get_valid_actions.start()
super(NetworkRBACIfaceOjectTestCase, self).setUp()
class NetworkDhcpAgentBindingObjectIfaceTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.NetworkDhcpAgentBinding
class NetworkDhcpAgentBindingDbObjectTestCase(
obj_test_base.BaseDbObjectTestCase, testlib_api.SqlTestCase):
_test_class = network.NetworkDhcpAgentBinding
def setUp(self):
super(NetworkDhcpAgentBindingDbObjectTestCase, self).setUp()
self._network = self._create_test_network()
self.update_obj_fields(
{'network_id': self._network.id,
'dhcp_agent_id': lambda: self._create_test_agent_id()})
class NetworkPortSecurityIfaceObjTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.NetworkPortSecurity
class NetworkPortSecurityDbObjTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = network.NetworkPortSecurity
def setUp(self):
super(NetworkPortSecurityDbObjTestCase, self).setUp()
self.update_obj_fields({'id': lambda: self._create_test_network_id()})
class NetworkSegmentIfaceObjTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.NetworkSegment
def setUp(self):
super(NetworkSegmentIfaceObjTestCase, self).setUp()
# TODO(ihrachys): we should not need to duplicate that in every single
# place, instead we should move the default pager into the base class
# attribute and pull it from there for testing matters. Leaving it for
# a follow up.
self.pager_map[self._test_class.obj_name()] = (
obj_base.Pager(
sorts=[('network_id', True), ('segment_index', True)]))
class NetworkSegmentDbObjTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = network.NetworkSegment
def setUp(self):
super(NetworkSegmentDbObjTestCase, self).setUp()
self.update_obj_fields(
{'network_id': lambda: self._create_test_network_id()})
def test_hosts(self):
hosts = ['host1', 'host2']
obj = self._make_object(self.obj_fields[0])
obj.hosts = hosts
obj.create()
obj = network.NetworkSegment.get_object(self.context, id=obj.id)
self.assertEqual(hosts, obj.hosts)
obj.hosts = ['host3']
obj.update()
obj = network.NetworkSegment.get_object(self.context, id=obj.id)
self.assertEqual(['host3'], obj.hosts)
obj.hosts = None
obj.update()
obj = network.NetworkSegment.get_object(self.context, id=obj.id)
self.assertFalse(obj.hosts)
class NetworkObjectIfaceTestCase(obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.Network
def setUp(self):
super(NetworkObjectIfaceTestCase, self).setUp()
self.pager_map[network.NetworkSegment.obj_name()] = (
obj_base.Pager(
sorts=[('network_id', True), ('segment_index', True)]))
class NetworkDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = network.Network
@mock.patch.object(policy.QosPolicy, 'unset_default')
def test_qos_policy_id(self, *mocks):
policy_obj = policy.QosPolicy(self.context)
policy_obj.create()
obj = self._make_object(self.obj_fields[0])
obj.qos_policy_id = policy_obj.id
obj.create()
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual(policy_obj.id, obj.qos_policy_id)
policy_obj2 = policy.QosPolicy(self.context)
policy_obj2.create()
obj.qos_policy_id = policy_obj2.id
obj.update()
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual(policy_obj2.id, obj.qos_policy_id)
obj.qos_policy_id = None
obj.update()
obj = network.Network.get_object(self.context, id=obj.id)
self.assertIsNone(obj.qos_policy_id)
@mock.patch.object(policy.QosPolicy, 'unset_default')
def test__attach_qos_policy(self, *mocks):
obj = self._make_object(self.obj_fields[0])
obj.create()
policy_obj = policy.QosPolicy(self.context)
policy_obj.create()
obj._attach_qos_policy(policy_obj.id)
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual(policy_obj.id, obj.qos_policy_id)
qos_binding_obj = binding.QosPolicyNetworkBinding.get_object(
self.context, network_id=obj.id)
self.assertEqual(qos_binding_obj.policy_id, obj.qos_policy_id)
old_policy_id = policy_obj.id
policy_obj2 = policy.QosPolicy(self.context)
policy_obj2.create()
obj._attach_qos_policy(policy_obj2.id)
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual(policy_obj2.id, obj.qos_policy_id)
qos_binding_obj2 = binding.QosPolicyNetworkBinding.get_object(
self.context, network_id=obj.id)
self.assertEqual(qos_binding_obj2.policy_id, obj.qos_policy_id)
qos_binding_obj = binding.QosPolicyNetworkBinding.get_objects(
self.context, policy_id=old_policy_id)
self.assertEqual(0, len(qos_binding_obj))
def test_dns_domain(self):
obj = self._make_object(self.obj_fields[0])
obj.dns_domain = 'foo.com'
obj.create()
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual('foo.com', obj.dns_domain)
obj.dns_domain = 'bar.com'
obj.update()
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual('bar.com', obj.dns_domain)
obj.dns_domain = None
obj.update()
obj = network.Network.get_object(self.context, id=obj.id)
self.assertIsNone(obj.dns_domain)
def test__set_dns_domain(self):
obj = self._make_object(self.obj_fields[0])
obj.create()
obj._set_dns_domain('foo.com')
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual('foo.com', obj.dns_domain)
obj._set_dns_domain('bar.com')
obj = network.Network.get_object(self.context, id=obj.id)
self.assertEqual('bar.com', obj.dns_domain)
class SegmentHostMappingIfaceObjectTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.SegmentHostMapping
class SegmentHostMappingDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = network.SegmentHostMapping
def setUp(self):
super(SegmentHostMappingDbObjectTestCase, self).setUp()
self.update_obj_fields(
{'segment_id': lambda: self._create_test_segment_id()})
class NetworkDNSDomainIfaceObjectTestcase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.NetworkDNSDomain
class NetworkDNSDomainDbObjectTestcase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = network.NetworkDNSDomain
def setUp(self):
super(NetworkDNSDomainDbObjectTestcase, self).setUp()
self.update_obj_fields(
{'network_id': lambda: self._create_test_network_id()})
class ExternalNetworkIfaceObjectTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = network.ExternalNetwork
class ExternalNetworkDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = network.ExternalNetwork
def setUp(self):
super(ExternalNetworkDbObjectTestCase, self).setUp()
self.update_obj_fields(
{'network_id': lambda: self._create_test_network_id()})
|
|
# Dual annealing unit tests implementation.
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
# Yang Xiang <[email protected]>
# Author: Sylvain Gubian, PMP S.A.
"""
Unit tests for the dual annealing global optimizer
"""
from scipy.optimize import dual_annealing
from scipy.optimize._dual_annealing import EnergyState
from scipy.optimize._dual_annealing import LocalSearchWrapper
from scipy.optimize._dual_annealing import ObjectiveFunWrapper
from scipy.optimize._dual_annealing import StrategyChain
from scipy.optimize._dual_annealing import VisitingDistribution
from scipy.optimize import rosen, rosen_der
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_array_less
from pytest import raises as assert_raises
from scipy._lib._util import check_random_state
from scipy._lib._pep440 import Version
class TestDualAnnealing:
def setup_method(self):
# A function that returns always infinity for initialization tests
self.weirdfunc = lambda x: np.inf
# 2-D bounds for testing function
self.ld_bounds = [(-5.12, 5.12)] * 2
# 4-D bounds for testing function
self.hd_bounds = self.ld_bounds * 4
# Number of values to be generated for testing visit function
self.nbtestvalues = 5000
self.high_temperature = 5230
self.low_temperature = 0.1
self.qv = 2.62
self.seed = 1234
self.rs = check_random_state(self.seed)
self.nb_fun_call = 0
self.ngev = 0
def callback(self, x, f, context):
# For testing callback mechanism. Should stop for e <= 1 as
# the callback function returns True
if f <= 1.0:
return True
def func(self, x, args=()):
# Using Rastrigin function for performing tests
if args:
shift = args
else:
shift = 0
y = np.sum((x - shift) ** 2 - 10 * np.cos(2 * np.pi * (
x - shift))) + 10 * np.size(x) + shift
self.nb_fun_call += 1
return y
def rosen_der_wrapper(self, x, args=()):
self.ngev += 1
return rosen_der(x, *args)
# FIXME: there are some discontinuities in behaviour as a function of `qv`,
# this needs investigating - see gh-12384
@pytest.mark.parametrize('qv', [1.1, 1.41, 2, 2.62, 2.9])
def test_visiting_stepping(self, qv):
lu = list(zip(*self.ld_bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
dim = lower.size
vd = VisitingDistribution(lower, upper, qv, self.rs)
values = np.zeros(dim)
x_step_low = vd.visiting(values, 0, self.high_temperature)
# Make sure that only the first component is changed
assert_equal(np.not_equal(x_step_low, 0), True)
values = np.zeros(dim)
x_step_high = vd.visiting(values, dim, self.high_temperature)
# Make sure that component other than at dim has changed
assert_equal(np.not_equal(x_step_high[0], 0), True)
@pytest.mark.parametrize('qv', [2.25, 2.62, 2.9])
def test_visiting_dist_high_temperature(self, qv):
lu = list(zip(*self.ld_bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
vd = VisitingDistribution(lower, upper, qv, self.rs)
# values = np.zeros(self.nbtestvalues)
# for i in np.arange(self.nbtestvalues):
# values[i] = vd.visit_fn(self.high_temperature)
values = vd.visit_fn(self.high_temperature, self.nbtestvalues)
# Visiting distribution is a distorted version of Cauchy-Lorentz
# distribution, and as no 1st and higher moments (no mean defined,
# no variance defined).
# Check that big tails values are generated
assert_array_less(np.min(values), 1e-10)
assert_array_less(1e+10, np.max(values))
def test_reset(self):
owf = ObjectiveFunWrapper(self.weirdfunc)
lu = list(zip(*self.ld_bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
es = EnergyState(lower, upper)
assert_raises(ValueError, es.reset, owf, check_random_state(None))
def test_low_dim(self):
ret = dual_annealing(
self.func, self.ld_bounds, seed=self.seed)
assert_allclose(ret.fun, 0., atol=1e-12)
assert ret.success
def test_high_dim(self):
ret = dual_annealing(self.func, self.hd_bounds, seed=self.seed)
assert_allclose(ret.fun, 0., atol=1e-12)
assert ret.success
def test_low_dim_no_ls(self):
ret = dual_annealing(self.func, self.ld_bounds,
no_local_search=True, seed=self.seed)
assert_allclose(ret.fun, 0., atol=1e-4)
def test_high_dim_no_ls(self):
ret = dual_annealing(self.func, self.hd_bounds,
no_local_search=True, seed=self.seed)
assert_allclose(ret.fun, 0., atol=1e-4)
def test_nb_fun_call(self):
ret = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
assert_equal(self.nb_fun_call, ret.nfev)
def test_nb_fun_call_no_ls(self):
ret = dual_annealing(self.func, self.ld_bounds,
no_local_search=True, seed=self.seed)
assert_equal(self.nb_fun_call, ret.nfev)
def test_max_reinit(self):
assert_raises(ValueError, dual_annealing, self.weirdfunc,
self.ld_bounds)
def test_reproduce(self):
res1 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
res2 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
res3 = dual_annealing(self.func, self.ld_bounds, seed=self.seed)
# If we have reproducible results, x components found has to
# be exactly the same, which is not the case with no seeding
assert_equal(res1.x, res2.x)
assert_equal(res1.x, res3.x)
@pytest.mark.skipif(Version(np.__version__) < Version('1.17'),
reason='Generator not available for numpy, < 1.17')
def test_rand_gen(self):
# check that np.random.Generator can be used (numpy >= 1.17)
# obtain a np.random.Generator object
rng = np.random.default_rng(1)
res1 = dual_annealing(self.func, self.ld_bounds, seed=rng)
# seed again
rng = np.random.default_rng(1)
res2 = dual_annealing(self.func, self.ld_bounds, seed=rng)
# If we have reproducible results, x components found has to
# be exactly the same, which is not the case with no seeding
assert_equal(res1.x, res2.x)
def test_bounds_integrity(self):
wrong_bounds = [(-5.12, 5.12), (1, 0), (5.12, 5.12)]
assert_raises(ValueError, dual_annealing, self.func,
wrong_bounds)
def test_bound_validity(self):
invalid_bounds = [(-5, 5), (-np.inf, 0), (-5, 5)]
assert_raises(ValueError, dual_annealing, self.func,
invalid_bounds)
invalid_bounds = [(-5, 5), (0, np.inf), (-5, 5)]
assert_raises(ValueError, dual_annealing, self.func,
invalid_bounds)
invalid_bounds = [(-5, 5), (0, np.nan), (-5, 5)]
assert_raises(ValueError, dual_annealing, self.func,
invalid_bounds)
def test_deprecated_local_search_options_bounds(self):
func = lambda x: np.sum((x-5) * (x-1))
bounds = list(zip([-6, -5], [6, 5]))
# Test bounds can be passed (see gh-10831)
with pytest.warns(DeprecationWarning, match=r"dual_annealing argument "):
dual_annealing(
func,
bounds=bounds,
local_search_options={"method": "SLSQP", "bounds": bounds})
with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
dual_annealing(
func,
bounds=bounds,
minimizer_kwargs={"method": "CG", "bounds": bounds})
def test_minimizer_kwargs_bounds(self):
func = lambda x: np.sum((x-5) * (x-1))
bounds = list(zip([-6, -5], [6, 5]))
# Test bounds can be passed (see gh-10831)
dual_annealing(
func,
bounds=bounds,
minimizer_kwargs={"method": "SLSQP", "bounds": bounds})
with pytest.warns(RuntimeWarning, match=r"Method CG cannot handle "):
dual_annealing(
func,
bounds=bounds,
minimizer_kwargs={"method": "CG", "bounds": bounds})
def test_max_fun_ls(self):
ret = dual_annealing(self.func, self.ld_bounds, maxfun=100,
seed=self.seed)
ls_max_iter = min(max(
len(self.ld_bounds) * LocalSearchWrapper.LS_MAXITER_RATIO,
LocalSearchWrapper.LS_MAXITER_MIN),
LocalSearchWrapper.LS_MAXITER_MAX)
assert ret.nfev <= 100 + ls_max_iter
assert not ret.success
def test_max_fun_no_ls(self):
ret = dual_annealing(self.func, self.ld_bounds,
no_local_search=True, maxfun=500, seed=self.seed)
assert ret.nfev <= 500
assert not ret.success
def test_maxiter(self):
ret = dual_annealing(self.func, self.ld_bounds, maxiter=700,
seed=self.seed)
assert ret.nit <= 700
# Testing that args are passed correctly for dual_annealing
def test_fun_args_ls(self):
ret = dual_annealing(self.func, self.ld_bounds,
args=((3.14159,)), seed=self.seed)
assert_allclose(ret.fun, 3.14159, atol=1e-6)
# Testing that args are passed correctly for pure simulated annealing
def test_fun_args_no_ls(self):
ret = dual_annealing(self.func, self.ld_bounds,
args=((3.14159, )), no_local_search=True,
seed=self.seed)
assert_allclose(ret.fun, 3.14159, atol=1e-4)
def test_callback_stop(self):
# Testing that callback make the algorithm stop for
# fun value <= 1.0 (see callback method)
ret = dual_annealing(self.func, self.ld_bounds,
callback=self.callback, seed=self.seed)
assert ret.fun <= 1.0
assert 'stop early' in ret.message[0]
assert not ret.success
@pytest.mark.parametrize('method, atol', [
('Nelder-Mead', 2e-5),
('COBYLA', 1e-5),
('Powell', 1e-8),
('CG', 1e-8),
('BFGS', 1e-8),
('TNC', 1e-8),
('SLSQP', 2e-7),
])
def test_multi_ls_minimizer(self, method, atol):
ret = dual_annealing(self.func, self.ld_bounds,
minimizer_kwargs=dict(method=method),
seed=self.seed)
assert_allclose(ret.fun, 0., atol=atol)
def test_wrong_restart_temp(self):
assert_raises(ValueError, dual_annealing, self.func,
self.ld_bounds, restart_temp_ratio=1)
assert_raises(ValueError, dual_annealing, self.func,
self.ld_bounds, restart_temp_ratio=0)
def test_gradient_gnev(self):
minimizer_opts = {
'jac': self.rosen_der_wrapper,
}
ret = dual_annealing(rosen, self.ld_bounds,
minimizer_kwargs=minimizer_opts,
seed=self.seed)
assert ret.njev == self.ngev
def test_from_docstring(self):
func = lambda x: np.sum(x * x - 10 * np.cos(2 * np.pi * x)) + 10 * np.size(x)
lw = [-5.12] * 10
up = [5.12] * 10
ret = dual_annealing(func, bounds=list(zip(lw, up)), seed=1234)
assert_allclose(ret.x,
[-4.26437714e-09, -3.91699361e-09, -1.86149218e-09,
-3.97165720e-09, -6.29151648e-09, -6.53145322e-09,
-3.93616815e-09, -6.55623025e-09, -6.05775280e-09,
-5.00668935e-09], atol=4e-8)
assert_allclose(ret.fun, 0.000000, atol=5e-13)
@pytest.mark.parametrize('new_e, temp_step, accepted, accept_rate', [
(0, 100, 1000, 1.0097587941791923),
(0, 2, 1000, 1.2599210498948732),
(10, 100, 878, 0.8786035869128718),
(10, 60, 695, 0.6812920690579612),
(2, 100, 990, 0.9897404249173424),
])
def test_accept_reject_probabilistic(
self, new_e, temp_step, accepted, accept_rate):
# Test accepts unconditionally with e < current_energy and
# probabilistically with e > current_energy
rs = check_random_state(123)
count_accepted = 0
iterations = 1000
accept_param = -5
current_energy = 1
for _ in range(iterations):
energy_state = EnergyState(lower=None, upper=None)
# Set energy state with current_energy, any location.
energy_state.update_current(current_energy, [0])
chain = StrategyChain(
accept_param, None, None, None, rs, energy_state)
# Normally this is set in run()
chain.temperature_step = temp_step
# Check if update is accepted.
chain.accept_reject(j=1, e=new_e, x_visit=[2])
if energy_state.current_energy == new_e:
count_accepted += 1
assert count_accepted == accepted
# Check accept rate
pqv = 1 - (1 - accept_param) * (new_e - current_energy) / temp_step
rate = 0 if pqv <= 0 else np.exp(np.log(pqv) / (1 - accept_param))
assert_allclose(rate, accept_rate)
|
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from constants import US_STATES, ACTIONS
# control models
class ControlModel(models.Model):
"""an abstract model that only contains a name - serves as a control for selection options"""
class Meta:
abstract = True
ordering = ('name', )
name = models.CharField(max_length=255, unique=True, db_index=True)
def __str__(self):
return self.name
class Attribute(ControlModel):
def get_absolute_url(self):
return reverse('app:attribute__view', args=[str(self.pk)])
class Brand(ControlModel):
def get_absolute_url(self):
return reverse('app:brand__view', args=[str(self.pk)])
class Category(ControlModel):
def get_absolute_url(self):
return reverse('app:category__view', args=[str(self.pk)])
class ContactLabel(ControlModel):
def get_absolute_url(self):
return reverse('app:contact_label__view', args=[str(self.pk)])
class CostAdjustmentReason(ControlModel):
def get_absolute_url(self):
return reverse('app:cost_adjustment_reason__view', args=[str(self.pk)])
class QuantityAdjustmentReason(ControlModel):
def get_absolute_url(self):
return reverse('app:quantity_adjustment_reason__view', args=[str(self.pk)])
class Supplier(ControlModel):
def get_absolute_url(self):
return reverse('app:supplier__view', args=[str(self.pk)])
# full models
class Sku(models.Model):
class Meta:
ordering = ('id', )
# id
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255, db_index=True)
upc = models.CharField(max_length=255, blank=True, null=True, db_index=True, default=None)
# categorization
brand = models.ForeignKey(Brand)
categories = models.ManyToManyField(Category)
# inventory
quantity_on_hand = models.IntegerField(default=0, db_index=True)
location = models.CharField(max_length=255, blank=True, null=True, db_index=True, default=None)
last_location = models.CharField(max_length=255, blank=True, null=True, db_index=True, default=None)
# dat team
owner = models.ForeignKey(User)
supplier = models.ForeignKey(Supplier)
lead_time = models.IntegerField(blank=True, null=True, default=None)
minimum_quantity = models.IntegerField(default=0)
notify_at_threshold = models.BooleanField(default=False)
cost = models.FloatField(blank=True, null=True, default=0)
supplier_sku = models.CharField(max_length=255, blank=True, null=True, default=None)
case_quantity = models.IntegerField(blank=True, null=True, default=None)
in_live_deal = models.BooleanField(default=False, db_index=True)
is_subscription = models.BooleanField(default=False, db_index=True)
notes = models.CharField(max_length=255, blank=True, null=True, db_index=True, default=None)
action = models.CharField(max_length=255, choices=ACTIONS, blank=True, null=True, db_index=True, default=None)
action_date = models.CharField(max_length=255, blank=True, null=True, default=None)
# stamp
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
if not self.id: # new sku
self.id = Sku.objects.all().aggregate(models.Max('id'))['id__max'] + 1
super(Sku, self).save(*args, **kwargs)
else: # existing sku
try:
old = Sku.objects.get(pk=self.pk)
if old.location != self.location:
self.last_location = old.location
except:
pass
if 'qty_change' in kwargs:
del kwargs['qty_change']
super(Sku, self).save(*args, **kwargs)
elif 'cost_change' in kwargs:
del kwargs['cost_change']
super(Sku, self).save(*args, **kwargs)
elif 'gdocs' in kwargs:
del kwargs['gdocs']
super(Sku, self).save(*args, **kwargs)
else:
old = Sku.objects.get(pk=self.pk)
assert old.quantity_on_hand == self.quantity_on_hand
assert old.cost == self.cost
super(Sku, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('app:sku__view', args=[str(self.pk)])
def _attributes(self):
attrs = []
# qs = SkuAttribute.objects.filter(sku=self)
# for obj in qs:
# if obj.attribute.name.lower().endswith('bulk'):
# attrs.append('Bulk')
# elif obj.attribute.name.lower().endswith('date'):
# attrs.append('(%s) %s' % (obj.attribute.name.split()[0], obj.value))
# else:
# attrs.append(obj.value)
for attr in self.skuattribute_set.all():
attrs.append((attr.attribute.name, attr.value))
return attrs
attributes = property(_attributes)
def _description(self):
attrs = self.attributes
if len(attrs):
# return '[%d] %s %s : %s' % (self.id, self.brand, self.name, ', '.join(attrs))
attrs = ', '.join(['(%s) %s' % (attr[0], attr[1]) for attr in attrs])
return '[%d] %s %s : %s' % (self.id, self.brand, self.name, attrs)
else:
return '[%d] %s %s' % (self.id, self.brand, self.name)
description = property(_description)
def __str__(self):
return ''.join([c for c in self.description if ord(c) < 128])
class SkuAttribute(models.Model):
class Meta:
ordering = ('sku__id', 'attribute__name')
unique_together = (
('sku', 'attribute'),
)
sku = models.ForeignKey(Sku)
attribute = models.ForeignKey(Attribute)
value = models.CharField(max_length=255, db_index=True)
def get_absolute_url(self):
return reverse('app:sku_attribute__view', args=[str(self.pk)])
def __str__(self):
return '%s@%s : %s' % (self.sku.id, self.attribute.name, self.value)
class CostAdjustment(models.Model):
class Meta:
ordering = ('-created', )
sku = models.ForeignKey(Sku)
old = models.FloatField()
new = models.FloatField()
who = models.ForeignKey(User)
reason = models.ForeignKey(CostAdjustmentReason)
detail = models.TextField(blank=True, null=True, default=None)
created = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
self.old = self.sku.cost
super(CostAdjustment, self).save(*args, **kwargs)
self.sku.cost = self.new
self.sku.save(cost_change=True)
def get_absolute_url(self):
return reverse('app:cost_adjustment__view', args=[str(self.pk)])
def __str__(self):
return '[%s] %s to %s' % (self.sku.id, self.old, self.new)
class QuantityAdjustment(models.Model):
class Meta:
ordering = ('-created', )
sku = models.ForeignKey(Sku)
old = models.IntegerField()
new = models.IntegerField()
who = models.ForeignKey(User)
reason = models.ForeignKey(QuantityAdjustmentReason)
detail = models.TextField(blank=True, null=True, default=None)
created = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
self.old = self.sku.quantity_on_hand
super(QuantityAdjustment, self).save(*args, **kwargs)
self.sku.quantity_on_hand = self.new
self.sku.save(qty_change=True)
def get_absolute_url(self):
return reverse('app:quantity_adjustment__view', args=[str(self.pk)])
def __str__(self):
return '[%s] %s to %s on %s by %s because %s' % (self.sku.id, self.old, self.new, self.created, self.who, self.reason)
class Contact(models.Model):
class Meta:
ordering = ('name', 'represents')
unique_together = (
('name', 'represents'),
)
name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
work_phone = models.CharField(max_length=255)
cell_phone = models.CharField(max_length=255, blank=True, null=True, default=None)
fax = models.CharField(max_length=255, blank=True, null=True, default=None)
address1 = models.CharField(max_length=255, blank=True, null=True, default=None)
address2 = models.CharField(max_length=255, blank=True, null=True, default=None)
address3 = models.CharField(max_length=255, blank=True, null=True, default=None)
city = models.CharField(max_length=255, blank=True, null=True, default=None)
state = models.CharField(max_length=255, choices=US_STATES, blank=True, null=True, default=None)
zipcode = models.CharField(max_length=255, blank=True, null=True, default=None)
country = models.CharField(max_length=255, default='United States', blank=True, null=True)
represents = models.ForeignKey(Supplier)
label = models.ManyToManyField(ContactLabel)
def get_absolute_url(self):
return reverse('app:contact__view', args=[str(self.pk)])
def __str__(self):
return '%s @ %s' % (self.name, self.represents.name)
class Receiver(models.Model):
class Meta:
ordering = ('name', )
name = models.CharField(max_length=255, unique=True)
address1 = models.CharField(max_length=255, blank=True, null=True, default=None)
address2 = models.CharField(max_length=255, blank=True, null=True, default=None)
address3 = models.CharField(max_length=255, blank=True, null=True, default=None)
city = models.CharField(max_length=255, blank=True, null=True, default=None)
state = models.CharField(max_length=255, choices=US_STATES, blank=True, null=True, default=None)
zipcode = models.CharField(max_length=255, blank=True, null=True, default=None)
country = models.CharField(max_length=255, default='United States', blank=True, null=True)
def get_absolute_url(self):
return reverse('app:receiver__view', args=[str(self.pk)])
def __str__(self):
return self.name
class PurchaseOrder(models.Model):
class Meta:
ordering = ('-created', )
creator = models.ForeignKey(User)
supplier = models.ForeignKey(Supplier)
contact = models.ForeignKey(Contact)
receiver = models.ForeignKey(Receiver)
deal = models.CharField(max_length=255)
note = models.TextField(blank=True, null=True, default=None)
created = models.DateTimeField(auto_now_add=True)
terms = models.CharField(max_length=255)
tracking_url = models.CharField(max_length=512, blank=True, null=True, default=None)
shipping_cost = models.FloatField(default=0.0)
sales_tax = models.FloatField(default=0.0)
def get_absolute_url(self):
return reverse('app:purchase_order__view', args=[str(self.pk)])
def is_fully_received(self):
po_li = dict(
(li.sku.id, li.quantity_ordered) for li
in PurchaseOrderLineItem.objects.filter(purchase_order=self)
)
qs = ShipmentLineItem.objects.filter(shipment__purchase_order=self)
ship_li = {}
for li in qs:
ship_li.setdefault(li.sku.id, 0)
ship_li[li.sku.id] += li.quantity_received
for sku, qty in po_li.iteritems():
if sku in ship_li:
if qty > ship_li[sku]:
return False
else:
return False
return True
def _total_cost(self):
return sum([li.total_cost for li in self.purchaseorderlineitem_set.all()]) + self.shipping_cost + self.sales_tax
total_cost = property(_total_cost)
def __str__(self):
return '%s-%s' % (self.id, self.creator.username)
class PurchaseOrderLineItem(models.Model):
class Meta:
ordering = ('-purchase_order__id', 'sku__id')
purchase_order = models.ForeignKey(PurchaseOrder)
sku = models.ForeignKey(Sku)
quantity_ordered = models.IntegerField()
unit_cost = models.FloatField()
discount_percent = models.FloatField(blank=True, null=True)
discount_dollar = models.FloatField(blank=True, null=True)
def save(self, *args, **kwargs):
super(PurchaseOrderLineItem, self).save(*args, **kwargs)
if self.unit_cost != self.sku.cost:
ca = CostAdjustment(
sku=self.sku,
new=self.unit_cost,
who=self.purchase_order.creator,
reason=CostAdjustmentReason.objects.get(name='Supplier Adjustment'),
detail='adjustment made during creation of <a href="%s">%s</a>' % (
self.purchase_order.get_absolute_url(), str(self.purchase_order))
)
ca.save()
def get_absolute_url(self):
return reverse('app:purchase_order_line_item__view', args=[str(self.pk)])
def _adjusted_unit_cost(self):
dp = self.discount_percent or 0
dd = self.discount_dollar or 0
dp /= 100.0
return (self.unit_cost - (self.unit_cost * dp)) - dd
adjusted_unit_cost = property(_adjusted_unit_cost)
def _total_cost(self):
return self.adjusted_unit_cost * self.quantity_ordered
total_cost = property(_total_cost)
def __str__(self):
return '[%s] %s' % (self.sku.id, self.quantity_ordered)
class Shipment(models.Model):
class Meta:
ordering = ('-created', )
creator = models.ForeignKey(User)
purchase_order = models.ForeignKey(PurchaseOrder)
note = models.TextField(blank=True, null=True, default=None)
created = models.DateTimeField(auto_now_add=True)
def get_absolute_url(self):
return reverse('app:shipment__view', args=[str(self.pk)])
def __str__(self):
return '%s-%s' % (self.id, self.creator.username)
class ShipmentLineItem(models.Model):
class Meta:
ordering = ('-shipment__id', 'sku__id')
shipment = models.ForeignKey(Shipment)
sku = models.ForeignKey(Sku)
quantity_received = models.IntegerField()
def save(self, *args, **kwargs):
adj = QuantityAdjustment()
adj.sku = self.sku
adj.new = self.sku.quantity_on_hand + self.quantity_received
adj.reason = QuantityAdjustmentReason.objects.get(name='Received Shipment')
adj.detail = 'received %s units on %s in shipment <a href="%s">%s</a>' % (
self.quantity_received, self.shipment.created.strftime('%x'), self.shipment.get_absolute_url(),
str(self.shipment)
)
adj.who = self.shipment.creator
adj.save()
super(ShipmentLineItem, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('app:shipment_line_item__view', args=[str(self.pk)])
def __str__(self):
return '[%s] %s' % (self.sku.id, self.quantity_received)
|
|
# -*- coding: utf-8 -*-
'''
Management of iptables
======================
This is an iptables-specific module designed to manage Linux firewalls. It is
expected that this state module, and other system-specific firewall states, may
at some point be deprecated in favor of a more generic `firewall` state.
.. code-block:: yaml
httpd:
iptables.append:
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
httpd:
iptables.insert:
- position: 1
- table: filter
- chain: INPUT
- jump: ACCEPT
- match: state
- connstate: NEW
- dport: 80
- proto: tcp
- sport: 1025:65535
- save: True
'''
# Import salt libs
from salt.state import STATE_INTERNAL_KEYWORDS as _STATE_INTERNAL_KEYWORDS
def __virtual__():
'''
Only load if the locale module is available in __salt__
'''
return 'iptables' if 'iptables.version' in __salt__ else False
def chain_present(name, table='filter'):
'''
Verify the chain is exist.
name
A user-defined chain name.
table
The table to own the chain.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': '',
'state_stdout': ''}
chain_check = __salt__['iptables.check_chain'](table, name, state_ret=ret)
if chain_check is True:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already exist in {1} table'
.format(name, table))
return ret
command = __salt__['iptables.new_chain'](table, name, state_ret=ret)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table create success'
.format(name, table))
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to create {0} chain in {1} table: {2}'.format(
name,
table,
command.strip(),
)
return ret
def chain_absent(name, table='filter'):
'''
Verify the chain is absent.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': '',
'state_stdout': ''}
chain_check = __salt__['iptables.check_chain'](table, name, state_ret=ret)
if not chain_check:
ret['result'] = True
ret['comment'] = ('iptables {0} chain is already absent in {1} table'
.format(name, table))
return ret
flush_chain = __salt__['iptables.flush'](table, name, state_ret=ret)
if not flush_chain:
command = __salt__['iptables.delete_chain'](table, name, state_ret=ret)
if command is True:
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = ('iptables {0} chain in {1} table delete success'
.format(name, table))
else:
ret['result'] = False
ret['comment'] = ('Failed to delete {0} chain in {1} table: {2}'
.format(name, table, command.strip()))
else:
ret['result'] = False
ret['comment'] = 'Failed to flush {0} chain in {1} table: {2}'.format(
name,
table,
flush_chain.strip(),
)
return ret
def append(name, **kwargs):
'''
Append a rule to a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': '',
'state_stdout': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
rule = __salt__['iptables.build_rule'](**kwargs)
command = __salt__['iptables.build_rule'](full=True, command='A', **kwargs)
if __salt__['iptables.check'](kwargs['table'],
kwargs['chain'],
rule, state_ret=ret) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set ({1})'.format(
name,
command.strip())
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set ({1})'.format(
name,
command.strip())
return ret
if not __salt__['iptables.append'](kwargs['table'], kwargs['chain'], rule, state_ret=ret):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1}'.format(
name,
command.strip())
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, state_ret=ret)
ret['comment'] = ('Set and Saved iptables rule for {0} to: '
'{1}'.format(name, command.strip()))
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1}').format(
name,
command.strip())
return ret
def insert(name, **kwargs):
'''
Insert a rule into a chain
name
A user-defined name to call this rule by in another part of a state or
formula. This should not be an actual rule.
All other arguments are passed in with the same name as the long option
that would normally be used for iptables, with one exception: `--state` is
specified as `connstate` instead of `state` (not to be confused with
`ctstate`).
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': '',
'state_stdout': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
rule = __salt__['iptables.build_rule'](**kwargs)
command = __salt__['iptables.build_rule'](full=True, command='I', **kwargs)
if __salt__['iptables.check'](kwargs['table'],
kwargs['chain'],
rule, state_ret=ret) is True:
ret['result'] = True
ret['comment'] = 'iptables rule for {0} already set ({1})'.format(
name,
command.strip())
return ret
if __opts__['test']:
ret['comment'] = 'iptables rule for {0} needs to be set ({1})'.format(
name,
command.strip())
return ret
if not __salt__['iptables.insert'](kwargs['table'], kwargs['chain'], kwargs['position'], rule, state_ret=ret):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set iptables rule for {0} to: {1}'.format(
name,
command.strip())
if 'save' in kwargs:
if kwargs['save']:
__salt__['iptables.save'](filename=None, state_ret=ret)
ret['comment'] = ('Set and Saved iptables rule for {0} to: '
'{1}'.format(name, command.strip()))
return ret
else:
ret['result'] = False
ret['comment'] = ('Failed to set iptables rule for {0}.\n'
'Attempted rule was {1}').format(
name,
command.strip())
return ret
def set_policy(name, **kwargs):
'''
Sets the default policy for iptables firewall tables
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': '',
'state_stdout': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if __salt__['iptables.get_policy'](
kwargs['table'],
kwargs['chain']) == kwargs['policy']:
ret['result'] = True
ret['comment'] = ('iptables default policy for {0} already set to {1}'
.format(kwargs['table'], kwargs['policy']))
return ret
if not __salt__['iptables.set_policy'](
kwargs['table'],
kwargs['chain'],
kwargs['policy'], state_ret=ret):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Set default policy for {0} to {1}'.format(
kwargs['chain'],
kwargs['policy'],
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to set iptables default policy'
return ret
def flush(name, **kwargs):
'''
Flush current iptables state
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': '',
'state_stdout': ''}
for ignore in _STATE_INTERNAL_KEYWORDS:
if ignore in kwargs:
del kwargs[ignore]
if not 'chain' in kwargs:
kwargs['chain'] = ''
if not __salt__['iptables.flush'](kwargs['table'], kwargs['chain'], state_ret=ret):
ret['changes'] = {'locale': name}
ret['result'] = True
ret['comment'] = 'Flush iptables rules in {0} table {1} chain'.format(
kwargs['table'],
kwargs['chain'],
)
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to flush iptables rules'
return ret
|
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import pathlib
from typing import Optional
import mako.template
import yaml
from framework.infrastructure import gcp
from framework.infrastructure import k8s
logger = logging.getLogger(__name__)
class RunnerError(Exception):
"""Error running app"""
class KubernetesBaseRunner:
TEMPLATE_DIR_NAME = 'kubernetes-manifests'
TEMPLATE_DIR_RELATIVE_PATH = f'../../{TEMPLATE_DIR_NAME}'
ROLE_WORKLOAD_IDENTITY_USER = 'roles/iam.workloadIdentityUser'
def __init__(self,
k8s_namespace,
namespace_template=None,
reuse_namespace=False):
# Kubernetes namespaced resources manager
self.k8s_namespace: k8s.KubernetesNamespace = k8s_namespace
self.reuse_namespace = reuse_namespace
self.namespace_template = namespace_template or 'namespace.yaml'
# Mutable state
self.namespace: Optional[k8s.V1Namespace] = None
def run(self, **kwargs):
if self.reuse_namespace:
self.namespace = self._reuse_namespace()
if not self.namespace:
self.namespace = self._create_namespace(
self.namespace_template, namespace_name=self.k8s_namespace.name)
def cleanup(self, *, force=False):
if (self.namespace and not self.reuse_namespace) or force:
self.delete_namespace()
self.namespace = None
@staticmethod
def _render_template(template_file, **kwargs):
template = mako.template.Template(filename=str(template_file))
return template.render(**kwargs)
@staticmethod
def _manifests_from_yaml_file(yaml_file):
with open(yaml_file) as f:
with contextlib.closing(yaml.safe_load_all(f)) as yml:
for manifest in yml:
yield manifest
@staticmethod
def _manifests_from_str(document):
with contextlib.closing(yaml.safe_load_all(document)) as yml:
for manifest in yml:
yield manifest
@classmethod
def _template_file_from_name(cls, template_name):
templates_path = (pathlib.Path(__file__).parent /
cls.TEMPLATE_DIR_RELATIVE_PATH)
return templates_path.joinpath(template_name).resolve()
def _create_from_template(self, template_name, **kwargs):
template_file = self._template_file_from_name(template_name)
logger.debug("Loading k8s manifest template: %s", template_file)
yaml_doc = self._render_template(template_file, **kwargs)
logger.info("Rendered template %s/%s:\n%s", self.TEMPLATE_DIR_NAME,
template_name, yaml_doc)
manifests = self._manifests_from_str(yaml_doc)
manifest = next(manifests)
# Error out on multi-document yaml
if next(manifests, False):
raise RunnerError('Exactly one document expected in manifest '
f'{template_file}')
k8s_objects = self.k8s_namespace.apply_manifest(manifest)
if len(k8s_objects) != 1:
raise RunnerError('Expected exactly one object must created from '
f'manifest {template_file}')
logger.info('%s %s created', k8s_objects[0].kind,
k8s_objects[0].metadata.name)
return k8s_objects[0]
def _reuse_deployment(self, deployment_name) -> k8s.V1Deployment:
deployment = self.k8s_namespace.get_deployment(deployment_name)
# TODO(sergiitk): check if good or must be recreated
return deployment
def _reuse_service(self, service_name) -> k8s.V1Service:
service = self.k8s_namespace.get_service(service_name)
# TODO(sergiitk): check if good or must be recreated
return service
def _reuse_namespace(self) -> k8s.V1Namespace:
return self.k8s_namespace.get()
def _create_namespace(self, template, **kwargs) -> k8s.V1Namespace:
namespace = self._create_from_template(template, **kwargs)
if not isinstance(namespace, k8s.V1Namespace):
raise RunnerError('Expected V1Namespace to be created '
f'from manifest {template}')
if namespace.metadata.name != kwargs['namespace_name']:
raise RunnerError('V1Namespace created with unexpected name: '
f'{namespace.metadata.name}')
logger.debug('V1Namespace %s created at %s',
namespace.metadata.self_link,
namespace.metadata.creation_timestamp)
return namespace
@staticmethod
def _get_workload_identity_member_name(project, namespace_name,
service_account_name):
"""
Returns workload identity member name used to authenticate Kubernetes
service accounts.
https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
"""
return (f'serviceAccount:{project}.svc.id.goog'
f'[{namespace_name}/{service_account_name}]')
def _grant_workload_identity_user(self, *, gcp_iam, gcp_service_account,
service_account_name):
workload_identity_member = self._get_workload_identity_member_name(
gcp_iam.project, self.k8s_namespace.name, service_account_name)
logger.info('Granting %s to %s for GCP Service Account %s',
self.ROLE_WORKLOAD_IDENTITY_USER, workload_identity_member,
gcp_service_account)
gcp_iam.add_service_account_iam_policy_binding(
gcp_service_account, self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member)
def _revoke_workload_identity_user(self, *, gcp_iam, gcp_service_account,
service_account_name):
workload_identity_member = self._get_workload_identity_member_name(
gcp_iam.project, self.k8s_namespace.name, service_account_name)
logger.info('Revoking %s from %s for GCP Service Account %s',
self.ROLE_WORKLOAD_IDENTITY_USER, workload_identity_member,
gcp_service_account)
try:
gcp_iam.remove_service_account_iam_policy_binding(
gcp_service_account, self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member)
except gcp.api.Error as error:
logger.warning('Failed %s from %s for Service Account %s: %r',
self.ROLE_WORKLOAD_IDENTITY_USER,
workload_identity_member, gcp_service_account, error)
def _create_service_account(self, template,
**kwargs) -> k8s.V1ServiceAccount:
resource = self._create_from_template(template, **kwargs)
if not isinstance(resource, k8s.V1ServiceAccount):
raise RunnerError('Expected V1ServiceAccount to be created '
f'from manifest {template}')
if resource.metadata.name != kwargs['service_account_name']:
raise RunnerError('V1ServiceAccount created with unexpected name: '
f'{resource.metadata.name}')
logger.debug('V1ServiceAccount %s created at %s',
resource.metadata.self_link,
resource.metadata.creation_timestamp)
return resource
def _create_deployment(self, template, **kwargs) -> k8s.V1Deployment:
deployment = self._create_from_template(template, **kwargs)
if not isinstance(deployment, k8s.V1Deployment):
raise RunnerError('Expected V1Deployment to be created '
f'from manifest {template}')
if deployment.metadata.name != kwargs['deployment_name']:
raise RunnerError('V1Deployment created with unexpected name: '
f'{deployment.metadata.name}')
logger.debug('V1Deployment %s created at %s',
deployment.metadata.self_link,
deployment.metadata.creation_timestamp)
return deployment
def _create_service(self, template, **kwargs) -> k8s.V1Service:
service = self._create_from_template(template, **kwargs)
if not isinstance(service, k8s.V1Service):
raise RunnerError('Expected V1Service to be created '
f'from manifest {template}')
if service.metadata.name != kwargs['service_name']:
raise RunnerError('V1Service created with unexpected name: '
f'{service.metadata.name}')
logger.debug('V1Service %s created at %s', service.metadata.self_link,
service.metadata.creation_timestamp)
return service
def _delete_deployment(self, name, wait_for_deletion=True):
logger.info('Deleting deployment %s', name)
try:
self.k8s_namespace.delete_deployment(name)
except k8s.ApiException as e:
logger.info('Deployment %s deletion failed, error: %s %s', name,
e.status, e.reason)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_deployment_deleted(name)
logger.debug('Deployment %s deleted', name)
def _delete_service(self, name, wait_for_deletion=True):
logger.info('Deleting service %s', name)
try:
self.k8s_namespace.delete_service(name)
except k8s.ApiException as e:
logger.info('Service %s deletion failed, error: %s %s', name,
e.status, e.reason)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_service_deleted(name)
logger.debug('Service %s deleted', name)
def _delete_service_account(self, name, wait_for_deletion=True):
logger.info('Deleting service account %s', name)
try:
self.k8s_namespace.delete_service_account(name)
except k8s.ApiException as e:
logger.info('Service account %s deletion failed, error: %s %s',
name, e.status, e.reason)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_service_account_deleted(name)
logger.debug('Service account %s deleted', name)
def delete_namespace(self, wait_for_deletion=True):
logger.info('Deleting namespace %s', self.k8s_namespace.name)
try:
self.k8s_namespace.delete()
except k8s.ApiException as e:
logger.info('Namespace %s deletion failed, error: %s %s',
self.k8s_namespace.name, e.status, e.reason)
return
if wait_for_deletion:
self.k8s_namespace.wait_for_namespace_deleted()
logger.debug('Namespace %s deleted', self.k8s_namespace.name)
def _wait_deployment_with_available_replicas(self, name, count=1, **kwargs):
logger.info('Waiting for deployment %s to have %s available replica(s)',
name, count)
self.k8s_namespace.wait_for_deployment_available_replicas(
name, count, **kwargs)
deployment = self.k8s_namespace.get_deployment(name)
logger.info('Deployment %s has %i replicas available',
deployment.metadata.name,
deployment.status.available_replicas)
def _wait_pod_started(self, name, **kwargs):
logger.info('Waiting for pod %s to start', name)
self.k8s_namespace.wait_for_pod_started(name, **kwargs)
pod = self.k8s_namespace.get_pod(name)
logger.info('Pod %s ready, IP: %s', pod.metadata.name,
pod.status.pod_ip)
def _wait_service_neg(self, name, service_port, **kwargs):
logger.info('Waiting for NEG for service %s', name)
self.k8s_namespace.wait_for_service_neg(name, **kwargs)
neg_name, neg_zones = self.k8s_namespace.get_service_neg(
name, service_port)
logger.info("Service %s: detected NEG=%s in zones=%s", name, neg_name,
neg_zones)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import stat
import tempfile
import threading
import time
import unittest
from collections import namedtuple
from pyspark import SparkConf, SparkFiles, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest, SPARK_HOME
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass # noqa: F401
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass # noqa: F401
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
# When thread is pinned, job group should be set for each thread for now.
# Local properties seem not being inherited like Scala side does.
if os.environ.get("PYSPARK_PIN_THREAD", "false").lower() == "true":
sc.setJobGroup('test_progress_api', '', True)
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
def test_forbid_insecure_gateway(self):
# Fail immediately if you try to create a SparkContext
# with an insecure gateway
parameters = namedtuple('MockGatewayParameters', 'auth_token')(None)
mock_insecure_gateway = namedtuple('MockJavaGateway', 'gateway_parameters')(parameters)
with self.assertRaises(ValueError) as context:
SparkContext(gateway=mock_insecure_gateway)
self.assertIn("insecure Py4j gateway", str(context.exception))
def test_resources(self):
"""Test the resources are empty by default."""
with SparkContext() as sc:
resources = sc.resources
self.assertEqual(len(resources), 0)
def test_disallow_to_create_spark_context_in_executors(self):
# SPARK-32160: SparkContext should not be created in executors.
with SparkContext("local-cluster[3, 1, 1024]") as sc:
with self.assertRaises(Exception) as context:
sc.range(2).foreach(lambda _: SparkContext())
self.assertIn("SparkContext should only be created and accessed on the driver.",
str(context.exception))
def test_allow_to_create_spark_context_in_executors(self):
# SPARK-32160: SparkContext can be created in executors if the config is set.
def create_spark_context():
conf = SparkConf().set("spark.executor.allowSparkContext", "true")
with SparkContext(conf=conf):
pass
with SparkContext("local-cluster[3, 1, 1024]") as sc:
sc.range(2).foreach(lambda _: create_spark_context())
class ContextTestsWithResources(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.tempFile = tempfile.NamedTemporaryFile(delete=False)
self.tempFile.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [\\"0\\"]}')
self.tempFile.close()
# create temporary directory for Worker resources coordination
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
os.chmod(self.tempFile.name, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
stat.S_IROTH | stat.S_IXOTH)
conf = SparkConf().set("spark.test.home", SPARK_HOME)
conf = conf.set("spark.driver.resource.gpu.amount", "1")
conf = conf.set("spark.driver.resource.gpu.discoveryScript", self.tempFile.name)
self.sc = SparkContext('local-cluster[2,1,1024]', class_name, conf=conf)
def test_resources(self):
"""Test the resources are available."""
resources = self.sc.resources
self.assertEqual(len(resources), 1)
self.assertTrue('gpu' in resources)
self.assertEqual(resources['gpu'].name, 'gpu')
self.assertEqual(resources['gpu'].addresses, ['0'])
def tearDown(self):
os.unlink(self.tempFile.name)
self.sc.stop()
if __name__ == "__main__":
from pyspark.tests.test_context import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
from __future__ import print_function
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D, concatenate
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.layers.merge import Multiply
from keras.models import Sequential, Model
from keras.optimizers import Adam,RMSprop, SGD
from keras import losses
from keras.utils import to_categorical
import keras.backend as K
import numpy as np
from time import time
import sys
backend_name = K.backend()
is_tf = False
if 'tensorflow' in backend_name.lower():
is_tf = True
import platform
print('platform : ', platform.node().lower())
is_sofiane = False
if 'alison' in platform.node().lower():
celeba_path = '/Users/pouplinalison/Documents/skin_analytics/code_dcgan/inData/celeba.npy'
elif 'desktop' in platform.node().lower():
is_sofiane = True
celeba_path = 'D:\Code\data\sceleba.npy'
elif 'sofiane' in platform.node().lower():
celeba_path = '/Users/sofianemahiou/Code/data/sceleba.npy'
else:
celeba_path = '/data/users/amp115/skin_analytics/inData/celeba.npy'
from bigan_root import BIGAN_ROOT
class BIGAN(BIGAN_ROOT):
def __init__(self,example_bool = False, test_model = False,interpolate_bool=False,celeba_path=celeba_path,preload=False,start_iteration=0,train_bool=True):
super(BIGAN, self).__init__(example_bool = example_bool, train_bool=train_bool, test_model=test_model,interpolate_bool=interpolate_bool,
img_rows=64,img_cols=64,channels=3, save_folder='bigan/celeba3/'
,latent_dim=200,preload=preload)
self.dataPath = celeba_path
def build_generator(self):
noise_shape = (self.latent_dim,)
model = Sequential()
model.add(Dense(1024 * 4 * 4, activation="relu", input_shape=noise_shape))
model.add(Reshape((4, 4, 1024)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(512, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=noise_shape)
img = model(noise)
return Model(noise, img)
def build_encoder(self):
img_shape = (self.img_rows, self.img_cols, self.channels)
model = Sequential()
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(self.latent_dim))
model.summary()
img = Input(shape=img_shape)
validity = model(img)
return Model(img, validity)
def build_discriminator(self):
img_shape = (self.img_rows, self.img_cols, self.channels)
img = Input(shape=img_shape)
model_image = Conv2D(32, kernel_size=3, strides=2, padding="same")(img)
model_image = LeakyReLU(alpha=0.2)(model_image)
model_image = Dropout(0.25)(model_image)
model_image = Conv2D(64, kernel_size=3, strides=2, padding="same")(model_image)
model_image = ZeroPadding2D(padding=((0,1),(0,1)))(model_image)
model_image = LeakyReLU(alpha=0.2)(model_image)
model_image = Dropout(0.25)(model_image)
model_image = BatchNormalization(momentum=0.8)(model_image)
model_image = Conv2D(128, kernel_size=3, strides=2, padding="same")(model_image)
model_image = LeakyReLU(alpha=0.2)(model_image)
model_image = Dropout(0.25)(model_image)
model_image = BatchNormalization(momentum=0.8)(model_image)
model_image = Conv2D(256, kernel_size=3, strides=1, padding="same")(model_image)
model_image = LeakyReLU(alpha=0.2)(model_image)
model_image = Dropout(0.25)(model_image)
model_image = Flatten()(model_image)
model_image = Dense(self.latent_dim)(model_image)
z = Input(shape=(self.latent_dim, ))
model_z = Dense(self.latent_dim)(z)
# d_in = concatenate([model_image,model_z,multiply([model_image,model_z])])
d_in = concatenate([model_image,model_z])
model = Dense(1024)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
model = Dense(512)(d_in)
model = LeakyReLU(alpha=0.2)(model)
model = Dropout(0.5)(model)
# model = Dense(1024)(model)
# model = LeakyReLU(alpha=0.2)(model)
# model = Dropout(0.5)(model)
# model = Dense(1024)(model)
# model = LeakyReLU(alpha=0.2)(model)
# model = Dropout(0.5)(model)
validity = Dense(1, activation="sigmoid")(model)
return Model([z, img], validity)
def load_data(self):
print('----- Loading CelebA -------')
X_train = np.load(self.dataPath)
print('------ Data Loaded : Preprocessing -----')
X_train = X_train.transpose([0,2,3,1])
# Rescale -1 to 1
if is_sofiane:
for i in range(X_train.shape[0]):
print('advancement: {:.2f}%'.format(i/X_train.shape[0]*100),end='\r')
X_train[i] = (X_train[i].astype(np.float32) - 0.5) / 0.5
else:
X_train = (X_train.astype(np.float32) - 0.5) / 0.5
print('CelebA shape:', X_train.shape, X_train.min(), X_train.max())
print('------- CelebA loaded -------')
return X_train
if __name__ == '__main__':
test_bool = False
train_bool = True
interpolate_bool = False
preload=False
start_iteration = 0
example_bool = False
if '-preload' in sys.argv[1:]:
preload = True
if '-test' in sys.argv[1:]:
test_bool = True
train_bool = False
if '-interpolate' in sys.argv[1:]:
interpolate_bool = True
train_bool = False
if '-start' in sys.argv[1:]:
start_iteration = int(sys.argv[sys.argv.index('-start')+1])
if start_iteration != 0:
preload = True
if '-example' in sys.argv[1:]:
train_bool = False
preload = True
example_bool = True
bigan = BIGAN(example_bool = example_bool, train_bool= train_bool, test_model = test_bool,interpolate_bool = interpolate_bool,preload=preload)
bigan.run(epochs=50001, batch_size=64, save_interval=100,start_iteration=start_iteration)
|
|
import itertools
from neo4j import GraphDatabase, basic_auth
def set_up_connection():
# setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
return session
def diagnose_symptoms_by_subset_of_anomaly(symptoms):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# build the query based on the symptoms list
query = ''
for index, symp in enumerate(symptoms):
query = query + 'MATCH (m' + str(index) + ':Measurement)-[r' + str(index) + ':' + symp[
'relationship'] + ']->(g:Anomaly) '
query = query + 'WHERE '
for index, symp in enumerate(symptoms):
if (index + 1) < len(symptoms):
query = query + 'm' + str(index) + '.Name=\'' + symp['measurement'] + '\' AND '
else:
query = query + 'm' + str(index) + '.Name=\'' + symp['measurement'] + '\' RETURN DISTINCT g.Title'
# query the database
result = session.run(query)
diagnosis = [node[0] for node in result]
return diagnosis
def diagnose_symptoms_by_intersection_with_anomaly(requested_symptoms):
# This function has several ugly patches and needs to be improved. This will probably require to do a deep refactor
# of all the VA code.
# Notation (needed to understand the function)
# Let A = {a1, a2, ..., aN} be the set of all anomalies.
# Let Sk = {sk1, sk2, ..., skM} be the set of symptoms of anomaly k (with 1<=k<=N).
# Let S = (Union of all Sk) = (s1, s2, ..., sM) be the set of all symptoms.
# Let X be a subset of S to be diagnosed (that is, the input of this function, X = "requested_symptoms").
# Let f be the diagnosis function. Then f is defined as:
# f: P(S) -> P(A), f(X) := {ak in A : (Sk intersection X) is nonempty}
# In other words, f return all the anomalies which have some of their symptoms in the symptoms to be diagnosed.
#
# This code function computes f(requested_symptoms) and sorts the resulting list according to a certain score.
# Such score is defined as g(ak) = g1(ak) * g2(ak), where:
# g1(ak) = #(Sk intersection X) / #X
# g2(ak) = #(Sk intersection X) / #Sk
# Where #A is used to denote the cardinal of the set A.
#
# **************************************************
# This first block of the function queries the neo4j graph. The result is f(X) (not sorted yet!)
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build the query based on the symptoms list
query = 'MATCH (m:Measurement)-[r]->(a:Anomaly) WHERE '
for index, symptom in enumerate(requested_symptoms):
measurement = symptom['measurement']
clause = '(m.Name = "' + measurement + '")'
if (index + 1) < len(requested_symptoms):
clause = clause + ' OR '
query = query + clause
query = query + ' RETURN DISTINCT a.Title'
# Query the database and parse the result (which is a list of the anomalies which symptoms have non empty
# intersection with the requested symptoms)
result = session.run(query)
diagnosis = [node[0] for node in result]
# **************************************************
# **************************************************
# This second section does some super ugly parsing that needs to be improved. The input of this function is a data
# structure that looks as follows (disregarding all the previous notation):
# requested_symptoms = [symptDict1, symptDict2, ...., symptDictN]
# Where:
# symptDictK = {measurement: 'Raw name of the measurement',
# display_name: 'Raw name of the measurement + parameter group',
# relationship: 'Threshold signature (as 'Exceeds_UWL')'} (*1)
#
# The output of the query in the first block, on the other hand, looks as follows:
# diagnosis = [anomaly1, anomaly2, ...., anomalyN]
#
# For each anomaly in the previous list, the set of its symptoms is retrieved (using another function within
# this same script. The result looks as follows:
# symptoms_of_anomaly = {measurement: 'Raw name of the measurement + parameter group',
# relationship: 'Threshold signature (as 'Upper Warning Limit')'} (*2)
#
# To compute the desired intersections we will need to compare items like (*1) with items like (*2), and hence the
# need to do some ugly parsing. Both types of items will be "translated" to the following common ground:
# symptCommonDict = {measurement: 'Raw name of the measurement + parameter group',
# relationship: 'Threshold signature (as 'Exceeds_UWL')'} (*3)
# The input is parsed from (*1) to (*3) ('measurement' field is substituted by 'display name' value, 'display_name'
# field is stripped and 'relationship' field is left equal).
parsed_input_symptoms = []
for symptom in requested_symptoms:
parsed_input_symptoms.append({'measurement': symptom['display_name'], 'relationship': symptom['relationship']})
# For each anomaly of the diagnosis output, an object like (*2) is obtained and converted to (*3). The only parsing
# needed is to convert the relationship format.
symptoms_of_each_anomaly = {}
for anomaly in diagnosis:
# Retrieve symptoms of anomaly
anomaly_symptoms = retrieve_symptoms_from_anomaly(anomaly)
symptom_of_anomaly = []
# For each symptom of the anomaly, parse the relationship field
for symptom in anomaly_symptoms:
relationship = symptom['relationship']
if relationship == "Upper Warning Limit" or relationship == "Upper Critic Limit":
relationship = 'Exceeds_UpperCautionLimit'
else:
relationship = 'Exceeds_LowerCautionLimit'
symptom['relationship'] = relationship
symptom_of_anomaly.append(symptom)
# Append the resulting object to the dictionary
symptoms_of_each_anomaly[anomaly] = symptom_of_anomaly
# **************************************************
# **************************************************
# In this third part of the code, the cardinal of the said intersections is computed. Also, the amount of symptoms
# of each anomaly is retrieved.
# Let A and B be sets. A is looped. For each element in a, B is looped. For each element b in B, a and b are
# compared, and the cardinal is increased if equal. This could be done more efficiently, but the already poor
# readability of this function would be completely obliterated.
# A -> requested_symptoms
# B -> anomaly_symptoms
# Define auxiliary function to compare the symptom dictionaries
def compare(anomaly_symptom, parsed_input_symptom):
measurements_are_equal = (anomaly_symptom['measurement'] == parsed_input_symptom['measurement'])
relationships_are_equal = (anomaly_symptom['relationship'] == parsed_input_symptom['relationship'])
if measurements_are_equal and relationships_are_equal:
return True
else:
return False
# Initialize result storing variables
cardinality_for_each_anomaly = {}
size_of_each_anomaly = {}
# Loop over the anomalies
for anomaly in diagnosis:
# Initialize the cardinal counter
cardinal = 0
# Loop over A
for anomaly_symptom in symptoms_of_each_anomaly[anomaly]:
# Loop over B
for parsed_input_symptom in parsed_input_symptoms:
# Compare
are_equal = compare(anomaly_symptom, parsed_input_symptom)
if are_equal:
cardinal += 1
# Store the results
cardinality_for_each_anomaly[anomaly] = cardinal
size_of_each_anomaly[anomaly] = len(symptoms_of_each_anomaly[anomaly])
# **************************************************
# **************************************************
# In this fourth part of the code, the score of each anomaly is computed and the final ordered list of anomalies is
# built.
# Create the result storing variable and parse the size of the requested symptoms set
scored_diagnosis = {}
total_requested_symptoms = len(requested_symptoms)
# Loop over the anomalies, compute each of the partial scores and the total scores (g1, g2 and g)
for anomaly in diagnosis:
# Compute the score
g1 = cardinality_for_each_anomaly[anomaly] / total_requested_symptoms
g2 = cardinality_for_each_anomaly[anomaly] / size_of_each_anomaly[anomaly]
g = g1 * g2
# Round it for the frontend display
score = round(g, 2)
# Save it
scored_diagnosis[anomaly] = score
# Sort the result according to the scores
ordered_diagnosis = {k: v for k, v in sorted(scored_diagnosis.items(), key=lambda item1: item1[1])}
# Convert the dictionary to a list of its keys
ordered_diagnosis = list(ordered_diagnosis.keys())
ordered_diagnosis.reverse()
# Cast list to top 7 items
top_n_diagnosis = []
size_limit = min(7, len(ordered_diagnosis))
for i in range(0, size_limit):
anomaly = ordered_diagnosis[i]
score = scored_diagnosis[anomaly]
text_score = ""
if score < 0.33:
text_score = "Least likely"
elif score < 0.66:
text_score = "Somewhat likely"
else:
text_score = "Very likely"
top_n_diagnosis.append({'name': anomaly, 'score': score, 'text_score': text_score})
# Return result
final_diagnosis = top_n_diagnosis
return final_diagnosis
def retrieve_all_anomalies():
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = 'MATCH (n:Anomaly) RETURN DISTINCT n.Title'
result = session.run(query)
# Parse the result
anomaly_list = []
for item in result:
anomaly_list.append(item[0])
return anomaly_list
def retrieve_all_measurements():
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = 'MATCH (m:Measurement) RETURN DISTINCT m.Name'
result = session.run(query)
# Parse the result
measurement_list = []
for item in result:
measurement_list.append(item[0])
return measurement_list
def retrieve_all_measurements_parameter_groups():
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = 'MATCH (m:Measurement) RETURN DISTINCT m.ParameterGroup'
result = session.run(query)
# Parse the result
measurement_list = []
for item in result:
if item[0] is not None and item[0] != '':
measurement_list.append(item[0])
return measurement_list
def retrieve_all_procedures():
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = 'MATCH (p:Procedure) RETURN DISTINCT p.Title'
result = session.run(query)
# Parse the result
procedure_list = []
for item in result:
procedure_list.append(item[0])
return procedure_list
def retrieve_procedures_fTitle_from_anomaly(anomaly_name):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (a:Anomaly)-[s:Solution]-(p:Procedure) WHERE a.Title='" + anomaly_name + "' RETURN p.fTitle ORDER BY s.Order"
result = session.run(query)
# Parse the result
procedure_list = []
for item in result:
procedure_list.append(item[0])
return procedure_list
def retrieve_procedures_title_from_anomaly(anomaly_name):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (a:Anomaly)-[s:Solution]-(p:Procedure) WHERE a.Title='" + anomaly_name + "' RETURN p.Title ORDER BY s.Order"
result = session.run(query)
# Parse the result
procedure_list = []
for item in result:
procedure_list.append(item[0])
return procedure_list
def retrieve_affected_components_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure)-[:Comprises]-(c:Component) WHERE p.pNumber='" + procedure + "' RETURN DISTINCT " \
"c.Title "
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure)-[:Comprises]-(c:Component) WHERE p.fTitle='" + procedure + "' RETURN " \
"DISTINCT " \
"c.Title "
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure)-[:Comprises]-(c:Component) WHERE p.Title='" + procedure + "' RETURN " \
"DISTINCT " \
"c.Title "
result = session.run(query)
# Parse the result
component_list = []
for item in result:
component_list.append(item[0])
return component_list
def retrieve_time_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure) WHERE p.pNumber='" + procedure + "' RETURN DISTINCT p.ETR"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure) WHERE p.fTitle='" + procedure + "' RETURN DISTINCT p.ETR"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure) WHERE p.Title='" + procedure + "' RETURN DISTINCT p.ETR"
result = session.run(query)
# Parse the result
procedure_time_list = []
for item in result:
procedure_time_list.append(item[0])
time = procedure_time_list[0]
return time
def retrieve_risks_from_anomaly(anomaly_name):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (a:Anomaly)-[:Can_Cause]-(r:Risk) WHERE a.Title='" + anomaly_name + "' RETURN DISTINCT r.Title"
result = session.run(query)
# Parse the result
risks_list = []
for item in result:
risks_list.append(item[0])
return risks_list
def retrieve_affected_subsystems_from_anomaly(anomaly_name):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (a:Anomaly)-[:Affects]-(s:SubSystem) WHERE a.Title='" + anomaly_name + "' RETURN DISTINCT s.Title"
result = session.run(query)
# Parse the result
subsystems_list = []
for item in result:
subsystems_list.append(item[0])
return subsystems_list
def retrieve_symptoms_from_anomaly(anomaly_name):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query to obtain the affected measurements that exceed the upper caution limit
query_UpperCautionLimit = "MATCH (a:Anomaly)-[r:Exceeds_UpperCautionLimit]-(m:Measurement) WHERE a.Title='" + anomaly_name + \
"' RETURN DISTINCT m.Name, m.ParameterGroup"
result_UpperCautionLimit = session.run(query_UpperCautionLimit)
# Parse the result
symptoms_list_UpperCautionLimit = []
for item in result_UpperCautionLimit:
measurement_name = item[0] + ' (' + item[1] + ')'
symptoms_list_UpperCautionLimit.append(measurement_name)
# Build and send the query to obtain the affected measurements that exceed the lower caution limit
query_LowerCautionLimit = "MATCH (a:Anomaly)-[r:Exceeds_LowerCautionLimit]-(m:Measurement) WHERE a.Title='" + anomaly_name + \
"' RETURN DISTINCT m.Name, m.ParameterGroup"
result_LowerCautionLimit = session.run(query_LowerCautionLimit)
# Parse the result
symptoms_list_LowerCautionLimit = []
for item in result_LowerCautionLimit:
measurement_name = item[0] + ' (' + item[1] + ')'
symptoms_list_LowerCautionLimit.append(measurement_name)
# Build the output (making the relationship explicit)
symptoms_list = []
for measurement in symptoms_list_LowerCautionLimit:
symptom = {'measurement': measurement, 'relationship': 'Lower Caution Limit'}
symptoms_list.append(symptom)
for measurement in symptoms_list_UpperCautionLimit:
symptom = {'measurement': measurement, 'relationship': 'Upper Warning Limit'}
symptoms_list.append(symptom)
return symptoms_list
def retrieve_thresholds_from_measurement(measurement_name):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (m:Measurement) WHERE m.Name='" + measurement_name + \
"' RETURN m.ParameterGroup, m.LowerWarningLimit, m.LowerCautionLimit, m.UpperCautionLimit, " \
"m.UpperWarningLimit "
result = session.run(query)
# Parse the result
parsed_result = []
for items in result:
parsed_result.append(items)
result_info = []
# Check if the parsed result is empty and proceed accordingly
if parsed_result:
for item in parsed_result:
thresholds_dict = {'ParameterGroup': item[0], 'LowerWarningLimit': item[1], 'LowerCautionLimit': item[2],
'UpperCautionLimit': item[3], 'UpperWarningLimit': item[4]}
result_info.append(thresholds_dict)
else:
result_info = {'ParameterGroup': 'None', 'LowerWarningLimit': 'None', 'LowerCautionLimit': 'None',
'UpperCautionLimit': 'None', 'UpperWarningLimit': 'None'}
return result_info
def retrieve_units_from_measurement(measurement_name):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (m:Measurement) WHERE m.Name='" + measurement_name + \
"' RETURN DISTINCT m.Unit"
result = session.run(query)
# Parse the result
parsed_result = ''
for item in result:
parsed_result = item
units = parsed_result[0]
return units
def retrieve_ordered_steps_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure)-[:Has]-(st:Step) WHERE p.pNumber='" + \
procedure + "' RETURN st.Action ORDER BY st.Title"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure)-[:Has]-(st:Step) WHERE p.fTitle='" + \
procedure + "' RETURN st.Action ORDER BY st.Title"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure)-[:Has]-(st:Step) WHERE p.Title='" + \
procedure + "' RETURN st.Action ORDER BY st.Title"
result = session.run(query)
# Parse the result
steps_list = []
for item in result:
steps_list.append(item[0])
return steps_list
def retrieve_fancy_steps_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query_step_labels = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.pNumber=\'' + procedure + '\' RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_actions = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.pNumber=\'' + procedure + '\' RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_figures = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.pNumber=\'' + procedure + '\' RETURN s.Link ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_fNumbers = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.pNumber=\'' + procedure + '\' RETURN s.fNumber ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_figures2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.pNumber=\'' + procedure + '\' RETURN s.Link2 ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_fNumbers2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.pNumber=\'' + procedure + '\' RETURN s.fNumber2 ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query_step_labels = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.fTitle=\'' + procedure + '\' RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_actions = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.fTitle=\'' + procedure + '\' RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_figures = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.fTitle=\'' + procedure + '\' RETURN s.Link ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_fNumbers = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.fTitle=\'' + procedure + '\' RETURN s.fNumber ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_figures2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.fTitle=\'' + procedure + '\' RETURN s.Link2 ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_fNumbers2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.fTitle=\'' + procedure + '\' RETURN s.fNumber2 ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query_step_labels = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_actions = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_figures = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Link ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_fNumbers = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.fNumber ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_figures2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Link2 ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query_step_fNumbers2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.fNumber2 ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
# Run the queries
result_step_labels = session.run(query_step_labels)
result_step_actions = session.run(query_step_actions)
result_step_figures = session.run(query_step_figures)
result_step_fNumbers = session.run(query_step_fNumbers)
result_step_figures2 = session.run(query_step_figures2)
result_step_fNumbers2 = session.run(query_step_fNumbers2)
step_labels = []
for item in result_step_labels:
step_labels.append(item[0])
step_actions = []
for item in result_step_actions:
step_actions.append(item[0])
step_figures = []
for item in result_step_figures:
step_figures.append(item[0])
step_fNumbers = []
step_hasFigure = []
for item in result_step_fNumbers:
step_fNumbers.append(item[0])
if (item[0]) is not None:
step_hasFigure.append(True)
else:
step_hasFigure.append(False)
step_figures2 = []
for item in result_step_figures2:
step_figures2.append(item[0])
step_fNumbers2 = []
step_hasFigure2 = []
for item in result_step_fNumbers2:
step_fNumbers2.append(item[0])
if (item[0]) is not None:
step_hasFigure2.append(True)
else:
step_hasFigure2.append(False)
# Parse the result
steps = []
label_counter = {
'steps': 0,
'substeps': 0,
'subsubsteps': 0
}
for index, step in enumerate(step_labels):
isStep = True
# Retrieve the depth from the label points
label_points = 0
for char in step_labels[index]:
if char == '.':
label_points += 1
depth = label_points
# Decide whether the step should be initially enabled or not
is_enabled = False
if depth == 0:
if label_counter['steps'] == 0:
is_enabled = True
label_counter['steps'] += 1
if depth == 1:
if label_counter['steps'] == 1 and label_counter['substeps'] == 0:
is_enabled = True
label_counter['substeps'] += 1
isStep = False
if depth == 2:
if label_counter['steps'] == 1 and label_counter['substeps'] == 1 and label_counter['subsubsteps'] == 0:
is_enabled = True
label_counter['subsubsteps'] += 1
isStep = False
# Build the parsed item
step_item = {'depth': depth,
'label': step_labels[index],
'action': step_actions[index],
'figure': step_figures[index],
'fNumber': step_fNumbers[index],
'hasFigure': step_hasFigure[index],
'figure2': step_figures2[index],
'fNumber2': step_fNumbers2[index],
'hasFigure2': step_hasFigure2[index],
'isDone': False,
'isStep': isStep}
steps.append(step_item)
return steps
def retrieve_objective_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure) WHERE p.pNumber='" + procedure + "' RETURN p.Objective"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure) WHERE p.fTitle='" + procedure + "' RETURN p.Objective"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure) WHERE p.Title='" + procedure + "' RETURN p.Objective"
result = session.run(query)
# Parse the result
objective_list = []
for item in result:
objective_list.append(item[0])
if len(objective_list) == 0:
objective = 'ERROR: missing objective description.'
else:
objective = objective_list[0]
return objective
def retrieve_equipment_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure)-[Uses]->(e:Equipment) WHERE p.pNumber='" + procedure + "' RETURN e.Title"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure)-[Uses]->(e:Equipment) WHERE p.fTitle='" + procedure + "' RETURN e.Title"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure)-[Uses]->(e:Equipment) WHERE p.Title='" + procedure + "' RETURN e.Title"
result = session.run(query)
# Parse the result
equipment_list = []
for item in result:
equipment_list.append(item[0])
if len(equipment_list) == 0:
equipment = ['ERROR: missing equipment list.']
else:
equipment = equipment_list
return equipment
def retrieve_references_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure)-[:Uses]->(r:Reference) WHERE p.pNumber='" + procedure + "' RETURN r.Title"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure)-[:Uses]->(r:Reference) WHERE p.fTitle='" + procedure + "' RETURN r.Title"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure)-[:Uses]->(r:Reference) WHERE p.Title='" + procedure + "' RETURN r.Title"
result = session.run(query)
# Parse the result
reference_list = []
for item in result:
reference_list.append(item[0])
return reference_list
def retrieve_reference_links_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure)-[:Uses]->(r:Reference) WHERE p.pNumber='" + procedure + "' RETURN r.Procedure"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure)-[:Uses]->(r:Reference) WHERE p.fTitle='" + procedure + "' RETURN r.Procedure"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure)-[:Uses]->(r:Reference) WHERE p.Title='" + procedure + "' RETURN r.Procedure"
result = session.run(query)
# Parse the result
reference_list = []
for item in result:
reference_list.append(item[0])
return reference_list
def retrieve_figures_from_procedure(procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH(p:Procedure)-[r:Has]->(f:Figure) WHERE p.pNumber=\'" + procedure + \
"\'RETURN f.Link ORDER BY f.Number"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH(p:Procedure)-[r:Has]->(f:Figure) WHERE p.fTitle=\'" + procedure + \
"\'RETURN f.Link ORDER BY f.Number"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH(p:Procedure)-[r:Has]->(f:Figure) WHERE p.Title=\'" + procedure + \
"\'RETURN f.Link ORDER BY f.Number"
result = session.run(query)
# Parse the result
figure_list = []
for item in result:
figure_list.append(item[0])
return figure_list
def retrieve_all_components():
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query1 = 'MATCH (n) WHERE EXISTS(n.Link) RETURN DISTINCT n.Link AS Link UNION ALL MATCH ()-[r]-() WHERE ' \
'EXISTS(r.Link) RETURN DISTINCT r.Link AS Link '
query2 = 'MATCH (n) WHERE EXISTS(n.Link2) RETURN DISTINCT n.Link2 AS Link2 UNION ALL MATCH ()-[r]-() WHERE ' \
'EXISTS(r.Link2) RETURN DISTINCT r.Link2 AS Link2 '
result1 = session.run(query1)
result2 = session.run(query2)
components_list = []
for items in itertools.chain(result1, result2):
for item in items:
components_list.append(item)
return components_list
def retrieve_all_procedure_numbers():
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (p:Procedure) RETURN DISTINCT p.pNumber"
result = session.run(query)
# Parse the result
procedure_numbers = []
for item in result:
procedure_numbers.append(item[0])
return procedure_numbers
def retrieve_all_step_numbers():
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = 'MATCH (n) WHERE EXISTS(n.SubStep) RETURN DISTINCT n.Title AS Title UNION ALL MATCH (m) WHERE ' \
'EXISTS(m.SubSubStep) RETURN DISTINCT m.Title AS Title'
result = session.run(query)
# Parse the result
step_numbers = []
for item in result:
step_numbers.append(item[0])
return step_numbers
def retrieve_procedures_from_pNumber(pNumber):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Build and send the query
query = "MATCH (p:Procedure) WHERE p.pNumber='" + pNumber + "' RETURN p.Title"
result = session.run(query)
procedure = ''
# Parse the result
for item in result:
procedure = item[0]
return procedure
def retrieve_step_from_procedure(step_number, procedure):
# Setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
try:
float(procedure)
# Build and send the query because procedure is a number
query = "MATCH (p:Procedure)-[:Has]->(s) WHERE p.pNumber='" + procedure + \
"' AND s.Title='" + step_number + "' RETURN s.Action"
except ValueError:
print("Not a number.")
# check if it is full title by checking if it starts with a number
try:
float(procedure[0])
# Build and send the query because procedure is a full title
query = "MATCH (p:Procedure)-[:Has]->(s) WHERE p.fTitle='" + procedure + \
"' AND s.Title='" + step_number + "' RETURN s.Action"
except ValueError:
print("Not a full title.")
# Build and send the query because procedure is just a name
query = "MATCH (p:Procedure)-[:Has]->(s) WHERE p.Title='" + procedure + \
"' AND s.Title='" + step_number + "' RETURN s.Action"
result = session.run(query)
# Parse the result
for item in result:
step = item[0]
return step
|
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import struct
from .errors import ReadError
skip = '_'
def none():
"""A ReadWriter that consumes nothing and returns None."""
return NoneReadWriter()
def constant(rw, value):
"""A ReadWriter that runs the given ReadWriter and ignores the value.
Always writes and returns ``value`` instead.
:param rw:
ReadWriter to run
:param value:
Value to serialize and return
"""
return ConstantReadWriter(rw, value)
def number(width_bytes):
"""Build a ReadWriter for integers of the given width.
:param width_bytes:
Width of the integer. One of 1, 2, 4 and 8.
"""
return NumberReadWriter(width_bytes)
def args(length_rw):
"""Build a ReadWriter for args=[arg1, arg2, arg3]
:param length_rw:
ReadWriter for the length of each arg
"""
return ArgsReaderWriter(length_rw)
def len_prefixed_string(length_rw, is_binary=False):
"""Build a ReadWriter for strings prefixed with their length.
.. code-block:: python
len_prefixed_string(number(2)) # == str~2
:param length_rw:
ReadWriter for the length of the string
:param is_binary:
Whether the string is a binary blob. If this is False (the default),
the string will be encoded/decoded to UTF-8 before writing/reading.
"""
return LengthPrefixedBlobReadWriter(length_rw, is_binary)
def chain(*rws):
"""Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters
"""
assert rws is not None
if len(rws) == 1 and isinstance(rws[0], list):
# In case someone does chain([l0, l1, ...])
rws = rws[0]
return ChainReadWriter(rws)
def dictionary(*pairs):
"""Build a ReadWriter that reads/writes dictionaries.
``pairs`` are tuples containing field names and their corresponding
ReadWriters. The fields will be read and written in the same order
provided here.
For example the following ReadWriter will read and write dictionaries in
the form ``{"flags": <byte>, "id": <int32>}``.
.. code-block:: python
dictionary(
("flags", number(1)),
("id", number(4)),
)
For pairs where the key name is `rw.skip`, the value will not be saved and
the serializer will receive None.
:param pairs:
One or more tuples in the from ``(<field name>, <ReadWriter>)``.
"""
return NamedChainReadWriter(pairs)
def instance(cls, *pairs):
"""Build a ReadWriter that reads/writes intances of the given class.
``pairs`` are key-value pairs that specify constructor argument names and
their corresponding ReadWriters. These same names are used to access
attributes on instances when writing.
.. code-block:: python
instance(
Person,
("name", len_prefixed_string(number(2))),
("age", number(1))
)
For pairs where the attribute name is `rw.skip`, the value will not be
passed to the constructor. Further, while serializing, None will be passed
to the serializer.
:param cls:
A class with an ``__init__`` method accepting keyword arguments for
all items specified in ``pairs``
:param pairs:
Key-value pairs mapping argument name to ReadWriter.
"""
return InstanceReadWriter(cls, pairs)
def headers(length_rw, key_rw, value_rw=None):
"""Build a ReadWriter for header lists.
A header is represented as::
count:L (key:K value:V){count}
The value produced is a list of key-value pairs. For example,
.. code-block:: python
headers(
number(L),
len_prefixed_string(number(K)),
len_prefixed_string(number(V)),
)
:param length_rw:
ReadWriter for the number of pairs in the header
:param key_rw:
ReadWriter for a key in a pair
:param value_rw:
ReadWriter for a value in a pair. Defaults to ``key_rw``.
"""
return HeadersReadWriter(length_rw, key_rw, value_rw)
def switch(switch_rw, cases):
"""A ReadWriter that picks behavior based on the value of ``switch_rw``.
.. code-block:: python
switch(
number(1), {
0: option_1_rw(),
1: option_2_rw()
}
)
Produces a tuple in the from ``(switch_value, case_value)``. If a given
switch value did not have a corresponding case, nothing will be written to
the stream and None will be returned as the value when reading.
:param switch_rw:
A ReadWriter that produces a value to dispatch on
:param cases:
Pairs where the key is the expected value from ``switch_rw``. If the
value matches, the corresponding ReadWriter will be executed.
"""
return SwitchReadWriter(switch_rw, cases)
class ReadWriter(object):
"""Provides the ability to read/write types from/to file-like objects.
ReadWrites SHOULD not maintain any state between calls to
``read``/``write`` and MUST be re-usable and thread-safe. The
``read``/``write`` methods MAY Be called on the same ReadWriter instance
multiple times for different requests at the same time.
The file-like stream object MUST provide ``read(int)`` and ``write(str)``
methods with behaviors as follows:
``read(int)``
MUST return the specified number of bytes from the stream. MAY return
fewer bytes if the end of the stream was reached.
``write(str)``
MUST write the given string or buffer to the stream.
"""
def read(self, stream):
"""Read and return the object from the stream.
:param stream:
file-like object providing a `read(int)` method
:returns: the deserialized object
:raises ReadError:
for parse errors or if the input is too short
"""
raise NotImplementedError()
def write(self, obj, stream):
"""Write the object to the stream.
:param stream:
file-like obect providing a `write(str)` method
:returns:
the stream
"""
raise NotImplementedError()
def length(self, obj):
"""Return the number of bytes will actually be written into io.
For cases where the width depends on the input, this should return the
length of data will be written into iostream."""
raise NotImplementedError()
def width(self):
"""Return the number of bytes this ReadWriter is expected to take.
For cases where the width depends on the input, this should return the
minimum width the ReadWriter is expected to take."""
raise NotImplementedError()
def take(self, stream, num):
"""Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected
"""
s = stream.read(num)
slen = len(s)
if slen != num:
raise ReadError(
"Expected %d bytes but got %d bytes." % (num, slen)
)
return s
class DelegatingReadWriter(ReadWriter):
"""Allows mapping ReadWriters onto different types.
A common pattern is to define a base ReadWriter using the primitives from
this module and then map those onto custom types.
For example, consider a Person class.
.. code-block:: python
Person = namedtuple('Person', 'name age')
Given a ReadWriter that produces a ``(name, age)`` tuple, we want to map
it to/from Person object.
.. code-block:: python
class PersonReadWriter(DelegatingReadWriter):
__rw__ = # a ReadWriter that produces (name, age) tuples
def read(self, stream):
(name, age) = super(PersonReadWriter, self).read(stream)
return Person(name, age)
def write(self, person, stream):
super(PersonReadWriter, self).write(
(person.name, person.age),
stream,
)
"""
# The underlying ReadWriter. All calls will be delegated to this.
__rw__ = None
class __metaclass__(type):
def __new__(mcs, name, bases, dct):
if bases != (ReadWriter,):
# Children of this class MUST provide __rw__
assert dct.get('__rw__'), (
"%s.__rw__ must be set" % name
)
return type.__new__(mcs, name, bases, dct)
def read(self, stream):
return self.__rw__.read(stream)
def write(self, obj, stream):
self.__rw__.write(obj, stream)
return stream
def width(self):
return self.__rw__.width()
def length(self, obj):
return self.__rw__.length(obj)
class NumberReadWriter(ReadWriter):
"""See :py:func:`number` for documentation."""
_FORMATS = {
1: '>B',
2: '>H',
4: '>I',
8: '>q',
}
__slots__ = ('_width', '_format')
def __init__(self, width_bytes):
assert width_bytes in self._FORMATS, (
"Unsupported integer width '%d'" % width_bytes
)
self._width = width_bytes
self._format = self._FORMATS[width_bytes]
def read(self, stream):
return struct.unpack(self._format, self.take(stream, self._width))[0]
def write(self, num, stream):
stream.write(struct.pack(self._format, num))
return stream
def width(self):
return self._width
def length(self, obj):
return self._width
class ArgsReaderWriter(ReadWriter):
def __init__(self, length_rw, num=3):
assert length_rw is not None
self._length_rw = length_rw
self._rw = len_prefixed_string(self._length_rw,
is_binary=True)
self.num = num
def read(self, stream):
args = []
try:
for _ in range(self.num):
args.append(self._rw.read(stream))
except ReadError:
pass
return args
def write(self, args, stream):
for arg in args:
if arg is None:
arg = ""
self._rw.write(arg, stream)
def width(self):
return self.num * self._length_rw.width()
def length(self, args):
size = 0
for arg in args:
if arg is None:
arg = ""
size += self._rw.length(arg)
return size
class LengthPrefixedBlobReadWriter(ReadWriter):
"""See :py:func:`len_prefixed_string` for documentation."""
__slots__ = ('_length', '_is_binary')
def __init__(self, length_rw, is_binary=False):
assert length_rw is not None
self._length = length_rw
self._is_binary = is_binary
def read(self, stream):
length = self._length.read(stream)
if length == 0:
return ""
else:
blob = self.take(stream, length)
if not self._is_binary:
blob = blob.decode('utf-8')
return blob
def write(self, s, stream):
if not self._is_binary:
s = s.encode('utf-8')
length = len(s)
self._length.write(length, stream)
stream.write(s)
return stream
def width(self):
return self._length.width()
def length(self, s):
if not self._is_binary:
s = s.encode('utf-8')
return len(s) + self._length.width()
class ChainReadWriter(ReadWriter):
"""See :py:func:`chain` for documentation."""
__slots__ = ('_links',)
def __init__(self, links):
assert links is not None
self._links = tuple(links)
def read(self, stream):
return [link.read(stream) for link in self._links]
def write(self, items, stream):
assert len(items) == len(self._links)
for item, link in zip(items, self._links):
link.write(item, stream)
return stream
def width(self):
return sum(link.width() for link in self._links)
def length(self, items):
assert len(items) == len(self._links)
size = 0
for item, link in zip(items, self._links):
size += link.length(item)
return size
class NamedChainReadWriter(ReadWriter):
"""See :py:func:`dictionary` for documentation."""
__slots__ = ('_pairs',)
def __init__(self, pairs):
assert pairs is not None
self._pairs = pairs
def read(self, stream):
result = {}
for name, rw in self._pairs:
try:
value = rw.read(stream)
if name != skip:
result[name] = value
except ReadError as e:
raise ReadError(
"Failed to read %s: %s" % (name, e.message)
)
return result
def write(self, obj, stream):
for name, rw in self._pairs:
if name != skip:
rw.write(obj[name], stream)
else:
rw.write(None, stream)
return stream
def width(self):
return sum(rw.width() for _, rw in self._pairs)
def length(self, obj):
size = 0
for name, rw in self._pairs:
if name != skip:
size += rw.length(obj[name])
else:
size += rw.length(None)
return size
class InstanceReadWriter(ReadWriter):
__slots__ = ('_cls', '_pairs',)
def __init__(self, cls, pairs):
self._pairs = pairs
self._cls = cls
def read(self, stream):
kwargs = {}
try:
for attr, rw in self._pairs:
value = rw.read(stream)
if attr != skip:
kwargs[attr] = value
except ReadError as e:
raise ReadError(
"Failed to read %s: %s" % (self._cls, e.message)
)
return self._cls(**kwargs)
def write(self, obj, stream):
for attr, rw in self._pairs:
if attr != skip:
value = getattr(obj, attr)
rw.write(value, stream)
else:
rw.write(None, stream)
return stream
def width(self):
return sum(rw.width() for _, rw in self._pairs)
def length(self, obj):
size = 0
for attr, rw in self._pairs:
if attr != skip:
value = getattr(obj, attr)
size += rw.length(value)
else:
size += rw.length(None)
return size
def length_no_args(self, obj):
size = 0
for attr, rw in self._pairs:
if attr == "args":
continue
if attr != skip:
value = getattr(obj, attr)
size += rw.length(value)
else:
size += rw.length(None)
return size
class HeadersReadWriter(ReadWriter):
"""See :py:func:`headers` for documentation."""
__slots__ = ('_length', '_key', '_value')
def __init__(self, length_rw, key_rw, value_rw=None):
self._length = length_rw
self._pair = chain(key_rw, value_rw or key_rw)
def read(self, stream):
count = self._length.read(stream)
headers = []
for i in range(count):
headers.append(self._pair.read(stream))
return headers
def write(self, headers, stream):
# In case someone does write({..}, stream)
if isinstance(headers, dict):
headers = headers.items()
self._length.write(len(headers), stream)
for pair in headers:
self._pair.write(pair, stream)
return stream
def width(self):
return self._length.width()
def length(self, headers):
size = 0
if isinstance(headers, dict):
headers = headers.items()
size += self._length.length(len(headers))
for pair in headers:
size += self._pair.length(pair)
return size
class NoneReadWriter(ReadWriter):
def read(self, stream):
return None
def write(self, _, stream):
return stream
def width(self):
return 0
def length(self, obj):
return 0
class ConstantReadWriter(ReadWriter):
__slots__ = ('_rw', '_value')
def __init__(self, rw, value):
self._rw = rw
self._value = value
def read(self, stream):
self._rw.read(stream)
return self._value
def write(self, out, stream):
self._rw.write(self._value, stream)
return stream
def width(self):
return self._rw.width()
def length(self, obj):
return self._rw.width()
class SwitchReadWriter(ReadWriter):
__slots__ = ('_switch', '_cases')
def __init__(self, switch_rw, cases_rw):
self._switch = switch_rw
self._cases = cases_rw
def read(self, stream):
k = self._switch.read(stream)
if k in self._cases:
v = self._cases[k].read(stream)
return (k, v)
else:
return (k, None)
def write(self, item, stream):
k, v = item
self._switch.write(k, stream)
if v is not None and k in self._cases:
self._cases[k].write(v, stream)
return stream
def width(self):
return self._switch.width()
def length(self, item):
k, v = item
size = 0
size += self._switch.length(k)
if v is not None and k in self._cases:
size += self._cases[k].length(v)
return size
|
|
# -*- coding: utf-8 -*-
# MonAMI Asterisk Manger Interface Server
# Asterisk AMI Emulator Handler Process
# (c) AMOOMA GmbH 2012-2013
from threading import Thread
from log import ldebug, linfo, lwarn, lerror, lcritic
from time import sleep
from traceback import format_exc
from collections import deque
from urllib import unquote
from asterisk import AsteriskAMIServer
from socket import SHUT_RDWR
from helper import sval
class MonAMIHandler(Thread):
def __init__(self, socket, address, event_socket=None):
Thread.__init__(self)
self.runthread = True
self.socket = socket
self.address = address
self.event_socket = event_socket
self.ami = None
self.deregister_at_server = None
self.message_pipe = deque()
self.channels = {}
self.user_password_authentication = None
self.account_name = ''
def stop(self):
ldebug('thread stop', self)
self.ami.stop()
self.runthread = False
def shutdown(self):
self.deregister_at_server(self)
ldebug('closing connection to %s:%d' % self.address)
try:
self.socket.shutdown(SHUT_RDWR)
self.socket.close()
ldebug('connection closed ', self)
except:
ldebug('connection closed by foreign host', self)
def run(self):
ldebug('starting MonAMI handler thread', self)
# starting asterisk AMI thread
self.ami = AsteriskAMIServer(self.socket, self.address, self.message_pipe)
self.ami.start()
self.ami.send_greeting()
# register for events
self.event_socket.register_client_queue(self.message_pipe, 'CHANNEL_CREATE')
self.event_socket.register_client_queue(self.message_pipe, 'CHANNEL_DESTROY')
self.event_socket.register_client_queue(self.message_pipe, 'CHANNEL_STATE')
self.event_socket.register_client_queue(self.message_pipe, 'CHANNEL_ANSWER')
self.event_socket.register_client_queue(self.message_pipe, 'CHANNEL_BRIDGE')
while self.runthread and self.ami.isAlive():
if self.message_pipe:
message = self.message_pipe.pop()
message_type = sval(message, 'type')
if message_type == 'freeswitch_event':
self.handle_fs_event(message['body'])
elif message_type == 'ami_client_message':
self.handle_ami_client_message(message['body'])
else:
sleep(0.1)
self.event_socket.deregister_client_queue_all(self.message_pipe)
ldebug('exiting MonAMI handler thread', self)
self.shutdown()
def handle_ami_client_message(self, message):
if 'Action' in message:
action = message['Action'].lower()
if action == 'login':
if 'UserName' in message:
self.account_name = message['UserName']
if 'Secret' in message and self.user_password_authentication and self.user_password_authentication(self.account_name, message['Secret']):
self.ami.send_login_ack()
ldebug('AMI connection authenticated - account: %s' % self.account_name, self)
else:
self.ami.send_login_nack()
linfo('AMI authentication failed - account: %s' % sval(message, 'UserName'), self)
self.ami.stop()
self.stop()
elif action == 'logoff':
self.ami.send_logout_ack()
ldebug('AMI logout', self)
self.ami.stop()
self.stop()
elif action == 'ping':
self.ami.send_pong(sval(message, 'ActionID'))
elif action == 'status':
self.ami.send_status_ack(sval(message, 'ActionID'))
elif action == 'command' and sval(message, 'Command') == 'core show version':
self.ami.send_asterisk_version(sval(message, 'ActionID'))
elif action == 'hangup':
account_name, separator, uuid = str(sval(message, 'Channel')).rpartition('-uuid-')
if account_name != '':
self.event_socket.hangup(uuid)
self.ami.send_hangup_ack()
elif action == 'originate':
self.message_originate(message)
elif action == 'extensionstate':
self.ami.send_extension_state(sval(message, 'ActionID'), sval(message, 'Exten'), sval(message, 'Context'))
else:
ldebug('unknown asterisk message received: %s' % message, self)
self.ami.send_message_unknown(message['Action'])
def to_unique_channel_name(self, uuid, channel_name):
# strip anything left of sip_account_name
path, separator, contact_part = channel_name.rpartition('/sip:')
if path == '':
path, separator, contact_part = channel_name.rpartition('/')
# if failed return name unchanged
if path == '':
return channel_name
# strip domain part
account_name = contact_part.partition('@')[0]
# if failed return name unchanged
if account_name == '':
return channel_name
# create unique channel name
return 'SIP/%s-uuid-%s' % (account_name, uuid)
def message_originate(self, message):
destination_number = str(sval(message, 'Exten'))
action_id = sval(message, 'ActionID')
self.ami.send_originate_ack(action_id)
uuid = self.event_socket.originate(self.account_name, destination_number, action_id)
def handle_fs_event(self, event):
event_type = event['Event-Name']
#ldebug('event type received: %s' % event_type, self)
event_types = {
'CHANNEL_CREATE': self.event_channel_create,
'CHANNEL_DESTROY': self.event_channel_destroy,
'CHANNEL_STATE': self.event_channel_state,
'CHANNEL_ANSWER': self.event_channel_answer,
'CHANNEL_BRIDGE': self.event_channel_bridge,
}
uuid = event_types[event_type](event)
if not uuid:
return False
channel = sval(self.channels, uuid);
if not channel:
return False
o_uuid = channel['o_uuid']
o_channel = sval(self.channels, o_uuid);
if sval(channel, 'origination_action') or sval(o_channel, 'origination_action'):
if not sval(channel, 'ami_start') and not sval(o_channel, 'ami_start'):
if sval(channel, 'owned') and sval(channel, 'origination_action'):
ldebug('sending AMI events for origitate call start (on this channel): %s' % uuid, self)
self.ami_send_originate_start(channel)
self.channels[uuid]['ami_start'] = True
elif sval(o_channel, 'owned') and sval(o_channel, 'origination_action'):
ldebug('sending AMI events for origitate call start (on other channel): %s' % uuid, self)
self.ami_send_originate_start(o_channel)
self.channels[o_uuid]['ami_start'] = True
elif o_channel:
if sval(channel, 'owned') and sval(channel, 'origination_action'):
ldebug('sending AMI events for origitate call progress (on this channel): %s' % uuid, self)
self.ami_send_originate_outbound(channel)
self.channels[uuid]['origination_action'] = False
elif sval(o_channel, 'owned') and sval(o_channel, 'origination_action'):
ldebug('sending AMI events for origitate call progress (on other channel): %s' % uuid, self)
self.ami_send_originate_outbound(o_channel)
self.channels[o_uuid]['origination_action'] = False
elif o_channel:
if not sval(channel, 'ami_start') and not sval(o_channel, 'ami_start'):
if sval(channel, 'owned') and sval(channel, 'direction') == 'inbound':
ldebug('sending AMI events for outbound call start (on this channel): %s' % uuid, self)
self.ami_send_outbound_start(channel)
self.channels[uuid]['ami_start'] = True
elif sval(o_channel, 'owned') and sval(channel, 'direction') == 'outbound':
ldebug('sending AMI events for outbound call start (on other channel): %s' % uuid, self)
self.ami_send_outbound_start(o_channel)
self.channels[o_uuid]['ami_start'] = True
if not sval(channel, 'ami_start')and not sval(o_channel, 'ami_start'):
if sval(channel, 'owned') and sval(channel, 'direction') == 'outbound':
ldebug('sending AMI events for inbound call start (on this channel): %s' % uuid, self)
self.ami_send_inbound_start(channel)
self.channels[uuid]['ami_start'] = True
elif sval(o_channel, 'owned') and sval(channel, 'direction') == 'inbound':
ldebug('sending AMI events for inbound call start (on other channel): %s' % uuid, self)
self.ami_send_inbound_start(o_channel)
self.channels[o_uuid]['ami_start'] = True
def event_channel_create(self, event):
uuid = sval(event, 'Unique-ID')
o_uuid = sval(event, 'Other-Leg-Unique-ID')
if uuid in self.channels:
ldebug('channel already listed: %s' % uuid, self)
return false
channel_name = self.to_unique_channel_name(uuid, unquote(str(sval(event, 'Channel-Name'))))
o_channel_name = self.to_unique_channel_name(o_uuid, unquote(str(sval(event, 'Other-Leg-Channel-Name'))))
if self.account_name in channel_name:
channel_owned = True
else:
channel_owned = False
if self.account_name in o_channel_name:
channel_related = True
else:
channel_related = False
if not channel_owned and not channel_related:
ldebug('channel neither owned nor reladed to account: %s' % uuid, self)
return False
channel = {
'uuid': uuid,
'name': channel_name,
'direction': sval(event, 'Call-Direction'),
'channel_state': sval(event, 'Channel-State'),
'call_state': sval(event, 'Channel-Call-State'),
'answer_state': sval(event, 'Answer-State'),
'owned': channel_owned,
'related': channel_related,
'caller_id_name': unquote(str(sval(event, 'Caller-Caller-ID-Name'))),
'caller_id_number': unquote(str(sval(event, 'Caller-Caller-ID-Number'))),
'callee_id_name': unquote(str(sval(event, 'Caller-Callee-ID-Name'))),
'callee_id_number': unquote(str(sval(event, 'Caller-Callee-ID-Number'))),
'destination_number': str(sval(event, 'Caller-Destination-Number')),
'origination_action': sval(event, 'variable_origination_action'),
'o_uuid': o_uuid,
'o_name': o_channel_name,
}
if channel['answer_state'] == 'ringing':
if channel['direction'] == 'inbound':
asterisk_channel_state = 4
else:
asterisk_channel_state = 5
else:
asterisk_channel_state = 0
if not o_uuid:
ldebug('one legged call, channel: %s' % uuid, self)
elif o_uuid not in self.channels:
o_channel = {
'uuid': o_uuid,
'name': o_channel_name,
'direction': sval(event, 'Other-Leg-Direction'),
'channel_state': sval(event, 'Channel-State'),
'call_state': sval(event, 'Channel-Call-State'),
'answer_state': sval(event, 'Answer-State'),
'owned': channel_related,
'related': channel_owned,
'caller_id_name': unquote(str(sval(event, 'Caller-Caller-ID-Name'))),
'caller_id_number': unquote(str(sval(event, 'Caller-Caller-ID-Number'))),
'callee_id_name': unquote(str(sval(event, 'Caller-Callee-ID-Name'))),
'callee_id_number': unquote(str(sval(event, 'Caller-Callee-ID-Number'))),
'destination_number': str(sval(event, 'Other-Leg-Destination-Number')),
'o_uuid': uuid,
'o_name': channel_name,
}
if o_channel['answer_state'] == 'ringing':
if o_channel['direction'] == 'inbound':
asterisk_o_channel_state = 4
else:
asterisk_o_channel_state = 5
else:
asterisk_o_channel_state = 0
ldebug('create channel list entry for related channel: %s, name: %s' % (o_uuid, o_channel_name), self)
self.channels[o_uuid] = o_channel
else:
ldebug('updating channel: %s, name: %s, o_uuid: %s, o_name %s' % (o_uuid, o_channel_name, uuid, channel_name), self)
self.channels[o_uuid]['o_uuid'] = uuid
self.channels[o_uuid]['o_name'] = channel_name
o_channel = self.channels[o_uuid]
if channel_owned:
ldebug('create channel list entry for own channel: %s, name: %s' % (uuid, channel_name), self)
elif channel_related:
ldebug('create channel list entry for related channel: %s, name: %s' % (uuid, channel_name), self)
self.channels[uuid] = channel
return uuid
def event_channel_destroy(self, event):
uuid = sval(event, 'Unique-ID')
hangup_cause_code = int(sval(event, 'variable_hangup_cause_q850'))
channel = sval(self.channels, uuid)
if channel:
channel['hangup_cause_code'] = hangup_cause_code
if sval(channel, 'ami_start'):
self.ami_send_outbound_end(channel)
del self.channels[uuid]
ldebug('channel removed from list: %s, cause %d' % (uuid, hangup_cause_code), self)
return uuid
def event_channel_state(self, event):
uuid = sval(event, 'Unique-ID')
channel_state = sval(event, 'Channel-State')
call_state = sval(event, 'Channel-Call-State')
answer_state = sval(event, 'Answer-State')
if sval(self.channels, uuid) and False:
ldebug('updating channel state - channel: %s, channel_state: %s, call_state %s, answer_state: %s' % (uuid, channel_state, call_state, answer_state), self)
self.channels[uuid]['channel_state'] = channel_state
self.channels[uuid]['call_state'] = call_state
self.channels[uuid]['answer_state'] = answer_state
return uuid
def event_channel_answer(self, event):
uuid = sval(event, 'Unique-ID')
o_uuid = sval(event, 'Other-Leg-Unique-ID')
channel = sval(self.channels, uuid)
if not o_uuid:
o_uuid = sval(channel, 'o_uuid')
o_channel = sval(self.channels, o_uuid)
origination_action = sval(channel, 'origination_action')
if channel:
channel_state = sval(event, 'Channel-State')
call_state = sval(event, 'Channel-Call-State')
answer_state = sval(event, 'Answer-State')
ldebug('channel answered - channel: %s, owned: %s, channel_state: %s, call_state %s, answer_state: %s, other leg: %s' % (uuid, sval(channel, 'owned'), channel_state, call_state, answer_state, o_uuid), self)
self.ami.send_event_newstate(uuid, sval(channel, 'name'), 6, sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'))
self.channels[uuid]['channel_state'] = channel_state
self.channels[uuid]['call_state'] = call_state
self.channels[uuid]['answer_state'] = answer_state
if sval(channel, 'origination_action'):
if sval(channel, 'owned'):
ldebug('sending AMI originate response - success: %s' % uuid, self)
self.ami.send_event_originate_response(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), '101', sval(channel, 'origination_action'), 4)
elif not o_uuid:
ldebug('sending AMI events for outbound call start on one legged call (this channel): %s' % uuid, self)
self.ami_send_outbound_start(channel)
self.ami.send_event_bridge(uuid, sval(channel, 'name'), sval(channel, 'caller_id_number'), o_uuid, sval(o_channel, 'name'), sval(o_channel, 'caller_id_number'))
self.channels[uuid]['ami_start'] = True
return uuid
return False
def event_channel_bridge(self, event):
uuid = sval(event, 'Unique-ID')
o_uuid = sval(event, 'Other-Leg-Unique-ID')
ldebug('bridge channel: %s to %s' % (uuid, o_uuid), self)
channel = sval(self.channels, uuid)
o_channel = sval(self.channels, o_uuid)
if sval(channel, 'owned') or sval(o_channel, 'owned'):
ldebug('sending AMI bridge response: %s -> %s' % (uuid, o_uuid), self)
self.ami.send_event_bridge(uuid, sval(channel, 'name'), sval(channel, 'caller_id_number'), o_uuid, sval(o_channel, 'name'), sval(o_channel, 'caller_id_number'))
def ami_send_outbound_start(self, channel):
self.ami.send_event_newchannel(sval(channel, 'uuid'), sval(channel, 'name'), 0, sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), sval(channel, 'destination_number'))
self.ami.send_event_newstate(sval(channel, 'uuid'), sval(channel, 'name'), 4, sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'))
self.ami.send_event_newchannel(sval(channel, 'o_uuid'), sval(channel, 'o_name'), 0, '', '', '')
self.ami.send_event_dial_begin(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), sval(channel, 'o_name'), sval(channel, 'o_uuid'), sval(channel, 'destination_number'))
self.ami.send_event_newcallerid(sval(channel, 'o_uuid'), sval(channel, 'o_name'), sval(channel, 'destination_number'), '', 0)
self.ami.send_event_newstate(sval(channel, 'o_uuid'), sval(channel, 'o_name'), 5, sval(channel, 'destination_number'), '')
def ami_send_outbound_end(self, channel):
self.ami.send_event_hangup(sval(channel, 'o_uuid'), sval(channel, 'o_name'), sval(channel, 'destination_number'), '', sval(channel, 'hangup_cause_code'))
self.ami.send_event_dial_end(sval(channel, 'uuid'), sval(channel, 'name'))
self.ami.send_event_hangup(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), sval(channel, 'hangup_cause_code'))
if sval(channel, 'origination_action'):
self.ami.send_event_originate_response(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), sval(channel, 'destination_number'), sval(channel, 'origination_action'), 1)
def ami_send_inbound_start(self, channel):
self.ami.send_event_newchannel(sval(channel, 'o_uuid'), sval(channel, 'o_name'), 0, sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), sval(channel, 'callee_id_number'))
self.ami.send_event_newstate(sval(channel, 'o_uuid'), sval(channel, 'o_name'), 4, sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'))
self.ami.send_event_newchannel(sval(channel, 'uuid'), sval(channel, 'name'), 0, '', '', '')
self.ami.send_event_dial_begin(sval(channel, 'o_uuid'), sval(channel, 'o_name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), sval(channel, 'name'), sval(channel, 'uuid'), sval(channel, 'destination_number'))
self.ami.send_event_newstate(sval(channel, 'uuid'), sval(channel, 'name'), 5, sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'))
self.ami.send_event_newcallerid(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'destination_number'), '', 0)
def ami_send_originate_start(self, channel):
self.ami.send_event_newchannel(sval(channel, 'uuid'), sval(channel, 'name'), 0, '', '', '')
self.ami.send_event_newcallerid(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), 0)
self.ami.send_event_newaccountcode(sval(channel, 'uuid'), sval(channel, 'name'))
self.ami.send_event_newcallerid(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), 0)
self.ami.send_event_newstate(sval(channel, 'uuid'), sval(channel, 'name'), 5, sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'))
def ami_send_originate_outbound(self, channel):
self.ami.send_event_newchannel(sval(channel, 'o_uuid'), sval(channel, 'o_name'), 0, '', '', '')
self.ami.send_event_dial_begin(sval(channel, 'uuid'), sval(channel, 'name'), sval(channel, 'caller_id_number'), sval(channel, 'caller_id_name'), sval(channel, 'o_name'), sval(channel, 'o_uuid'), sval(channel, 'destination_number'))
self.ami.send_event_newcallerid(sval(channel, 'o_uuid'), sval(channel, 'o_name'), sval(channel, 'destination_number'), '', 0)
self.ami.send_event_newstate(sval(channel, 'o_uuid'), sval(channel, 'o_name'), 5, sval(channel, 'destination_number'), '')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateLinkResources')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
group_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateLinkResources/{groupId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str'),
"groupId": _SERIALIZER.url("group_id", group_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResources":
"""List private link resources.
List private link resources for the given IotHub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResources, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2021_07_01.models.PrivateLinkResources
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResources"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResources', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateLinkResources'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
resource_name: str,
group_id: str,
**kwargs: Any
) -> "_models.GroupIdInformation":
"""Get the specified private link resource.
Get the specified private link resource for the given IotHub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param group_id: The name of the private link resource.
:type group_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GroupIdInformation, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2021_07_01.models.GroupIdInformation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GroupIdInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
group_id=group_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GroupIdInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/iotHubs/{resourceName}/privateLinkResources/{groupId}'} # type: ignore
|
|
from .xgig import XgigEvent, ParsedCommand
from itertools import takewhile, dropwhile
import logging
COMMANDS = {
'SMART': 0xB0,
'READ_FPDMA_QUEUED' : 0x60,
'WRITE_FPDMA_QUEUED' : 0x61,
'DATA_SET_MANAGEMENT' : 0x06,
'WRITE_DMA' : 0xCA,
'WRITE_DMA_EXT' : 0x35,
'READ_DMA' : 0xC8,
'READ_DMA_EXT' : 0x25,
'FLUSH_CACHE_EXT' : 0xEA,
'IDENTIFY_DEVICE' : 0xEC,
'CHECK_POWER_MODE' : 0xE5
}
COMMANDS.update({ v : k for k, v in COMMANDS.items() })
FIS_TYPES = {
'UNKNOWN' : 0x00,
'FIS_REG_H2D' : 0x27,
'FIS_REG_D2H' : 0x34,
'FIS_DMA_ACT' : 0x39,
'FIS_DMA_SETUP' : 0x41,
'FIS_DATA' : 0x46,
'FIS_BIST' : 0x58,
'FIS_PIO_SETUP' : 0x5F,
'FIS_DEV_BITS' : 0xA1
}
FIS_TYPES.update({ v : k for k, v in FIS_TYPES.items() } )
SMART = {
'READ_DATA' : 0xD0,
'SMART_READ_WARRANTY_FAILURE_THRESHOLDS' : 0xD1,
'RETURN_STATUS' : 0xDA
}
SMART.update({ v : k for k, v in SMART.items() })
def getAndAssertKnown(v, m):
assert v in m, "Unknown value %s" % hex(v)
return m[v]
class FISCommand(XgigEvent):
def __init__(self, event):
super().__init__(event)
self.fisType = self.eventData()[4]
def parseFISLBA(data):
lba = int(0)
lba = lba | data[14]
lba = lba << 8
lba = lba | data[13]
lba = lba << 8
lba = lba | data[12]
lba = lba << 8
lba = lba | data[10]
lba = lba << 8
lba = lba | data[9]
lba = lba << 8
lba = lba | data[8]
return lba
def parseFISSectorCount(data):
count = int(0)
count |= data[17]
count <<= 8
count |= data[16]
return count
class FISRegH2D(FISCommand):
def __init__(self, event):
super().__init__(event)
self.lba = parseFISLBA(self.eventData())
self.command = self.eventData()[6]
class FISRegD2H(FISCommand):
def __init__(self, event):
super().__init__(event)
self.lba = parseFISLBA(self.eventData())
class FISSetDeviceBits(FISCommand):
def __init__(self, event, eqDepth):
super().__init__(event)
self.eqDepth = eqDepth
data = self.eventData()
act = int(0)
act = act | data[11]
act = act << 8
act = act | data[10]
act = act << 8
act = act | data[9]
act = act << 8
act = act | data[8]
acts = []
for x in range(32):
if act & (1 << x):
acts.append(x)
self.acts = acts
class Smart(FISRegH2D):
def __init__(self, event):
super().__init__(event)
self.feature = self.eventData()[7]
class WriteFPDMAQueued(FISRegH2D):
def __init__(self, event, qDepth):
super().__init__(event)
self.qDepth = qDepth
data = self.eventData()
count = int(0)
count = count | self.eventData()[15]
count = count << 8
count = count | self.eventData()[7]
self.sectorCount = count
self.queueTag = self.eventData()[16] >> 3
mask = 1 << 7
self.fua = (self.eventData()[11] & mask) == mask
class ReadFPDMAQueued(FISRegH2D):
def __init__(self, event, qDepth):
super().__init__(event)
self.qDepth = qDepth
data = self.eventData()
count = int(0)
count = count | self.eventData()[15]
count = count << 8
count = count | self.eventData()[7]
self.sectorCount = count
self.queueTag = self.eventData()[16] >> 3
mask = 1 << 7
self.fua = (self.eventData()[11] & mask) == mask
class FlushCacheExt(FISRegH2D):
def __init__(self, event):
super().__init__(event)
self.lba = 0
self.sectorCount = 0
class DataSetManagement(FISRegH2D):
def __init__(self, event):
super().__init__(event)
self.lba = 0
self.sectorCount = 0
class WriteDMA(FISRegH2D):
def __init__(self, event):
super().__init__(event)
self.lba &= 0xFFFFFF
self.sectorCount = parseFISSectorCount(self.eventData())
self.sectorCount = 256 if self.sectorCount == 0 else self.sectorCount
class WriteDMAExt(FISRegH2D):
def __init__(self, event):
super().__init__(event)
self.sectorCount = parseFISSectorCount(self.eventData())
self.sectorCount = 65536 if self.sectorCount == 0 else self.sectorCount
class ReadDMA(FISRegH2D):
def __init__(self, event):
super().__init__(event)
self.lba &= 0xFFFFFF
self.sectorCount = parseFISSectorCount(self.eventData())
self.sectorCount = 256 if self.sectorCount == 0 else self.sectorCount
class ReadDMAExt(FISRegH2D):
def __init__(self, event):
super().__init__(event)
self.sectorCount = parseFISSectorCount(self.eventData())
self.sectorCount = 65536 if self.sectorCount == 0 else self.sectorCount
class CheckPowerMode(FISRegH2D):
def __init__(self, event):
super().__init__(event)
def FISDMAAct(FISCommand):
def __init__(self, event):
super().__init__(event)
class Parser(object):
def __init__(self, events):
self.__events = events
self.__commands = []
self.__inFlightQueued = {}
self.__lastQueued = None
self.__inFlightUnqueued = None
self.__prevEvent = None
def __iter__(self):
for e in self.__events:
if "sata" not in e:
continue
sata = e["sata"]
fisType = sata["fisType"]
fisType = getAndAssertKnown(fisType, FIS_TYPES)
self.handle(fisType, e)
self.__commands.sort(key=lambda c: c.sTime())
for c in takewhile(lambda c: c.done, self.__commands):
yield c
self.__commands[:] = list(dropwhile(lambda c: c.done, self.__commands))
self.__commands.sort(key=lambda c: c.sTime())
for cmd in self.__commands:
yield cmd
def handle(self, t, e):
h = getattr(self, t, None)
self.LOGGER.warn("Unhandled %s (%s, %s)", t, e["metadata"]["id"], e["metadata"]["sTimestamp"]) if h is None else h(e)
def FIS_REG_H2D(self, e):
sata = e["sata"]
command = sata["command"]
command = getAndAssertKnown(command, COMMANDS)
self.handle(command, e)
self.__prevEvent = XgigEvent(e)
def SMART(self, e):
smart = Smart(e)
feature = getAndAssertKnown(smart.feature, SMART)
if feature == 'READ_DATA':
self.LOGGER.info("Ignoring SATA READ_DATA")
elif feature == 'SMART_READ_WARRANTY_FAILURE_THRESHOLDS':
self.LOGGER.info("Ignoring SATA SMART_READ_WARRANTY_FAILURE_THRESHOLDS")
elif feature == 'RETURN_STATUS':
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ smart ], cmdType='-', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
def WRITE_FPDMA_QUEUED(self, e):
write = WriteFPDMAQueued(e, len(self.__inFlightQueued))
self.__inFlightQueued[write.queueTag] = ParsedCommand(events=[ write ], queued=True, cmdType='W', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightQueued[write.queueTag])
self.__lastQueued = write.queueTag
def READ_FPDMA_QUEUED(self, e):
read = ReadFPDMAQueued(e, len(self.__inFlightQueued))
self.__inFlightQueued[read.queueTag] = ParsedCommand(events=[ read ], queued=True, cmdType='R', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightQueued[read.queueTag])
self.__lastQueued = read.queueTag
def FLUSH_CACHE_EXT(self, e):
flush = FlushCacheExt(e)
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ flush ], cmdType='F', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
def DATA_SET_MANAGEMENT(self, e):
dsm = DataSetManagement(e)
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ dsm ], cmdType='-', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
def WRITE_DMA(self, e):
w = WriteDMA(e)
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ w ], cmdType='W', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
def WRITE_DMA_EXT(self, e):
w = WriteDMAExt(e)
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ w ], cmdType='W', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
def READ_DMA(self, e):
r = ReadDMA(e)
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ r ], cmdType='R', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
def READ_DMA_EXT(self, e):
r = ReadDMAExt(e)
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ r ], cmdType='R', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
def FIS_REG_D2H(self, e):
fisRegD2H = FISRegD2H(e)
if self.__inFlightUnqueued is not None:
self.__inFlightUnqueued.events.append(fisRegD2H)
self.__inFlightUnqueued.done = True
self.__inFlightUnqueued = None
self.__prevEvent = XgigEvent(e)
elif self.__lastQueued is not None:
self.__inFlightQueued[self.__lastQueued].events.append(fisRegD2H)
self.__lastQueued = None
else:
self.LOGGER.warn("Unhandled FIS_REG_D2H (%s, %s)", e["metadata"]["id"], e["metadata"]["sTimestamp"])
def FIS_DEV_BITS(self, e):
bits = FISSetDeviceBits(e, len(self.__inFlightQueued) - 1)
for act in bits.acts:
cmd = self.__inFlightQueued[act]
del self.__inFlightQueued[act]
cmd.events.append(bits)
cmd.done = True
self.__prevEvent = XgigEvent(e)
def FIS_DMA_ACT(self, e):
act = FISDMAAct(e)
if self.__inFlightUnqueued is not None:
self.__inFlightUnqueued.events.append(act)
self.__prevEvent = XgigEvent(e)
else:
self.LOGGER.warn("Unhandled FIS_DMA_ACT (%s, %s)", e["metadata"]["id"], e["metadata"]["sTimestamp"])
def IDENTIFY_DEVICE(self, e):
self.LOGGER.info("Ignoring SATA IDENTIFY_DEVICE")
def FIS_PIO_SETUP(self, e):
self.LOGGER.info("Ignoring SATA FIS_PIO_SETUP")
def CHECK_POWER_MODE(self, e):
p = CheckPowerMode(e)
assert self.__inFlightUnqueued is None
self.__inFlightUnqueued = ParsedCommand(events=[ p ], cmdType='-', prevEvent=self.__prevEvent)
self.__commands.append(self.__inFlightUnqueued)
Parser.LOGGER = logging.getLogger(Parser.__name__)
|
|
"""Zwave discovery schemas."""
from . import const
DEFAULT_VALUES_SCHEMA = {
'power': {
const.DISC_SCHEMAS: [
{const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_POWER]},
{const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_METER],
const.DISC_INDEX: [const.INDEX_METER_POWER]},
],
const.DISC_OPTIONAL: True,
},
}
DISCOVERY_SCHEMAS = [
{const.DISC_COMPONENT: 'binary_sensor',
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_ENTRY_CONTROL,
const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_METER,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_THERMOSTAT],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_BINARY],
const.DISC_TYPE: const.TYPE_BOOL,
const.DISC_GENRE: const.GENRE_USER,
},
'off_delay': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [9],
const.DISC_OPTIONAL: True,
}})},
{const.DISC_COMPONENT: 'climate',
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_THERMOSTAT,
const.GENERIC_TYPE_SENSOR_MULTILEVEL],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_SETPOINT],
},
'temperature': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SENSOR_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SENSOR_MULTILEVEL_TEMPERATURE],
const.DISC_OPTIONAL: True,
},
'mode': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_THERMOSTAT_MODE],
const.DISC_OPTIONAL: True,
},
'fan_mode': {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_FAN_MODE],
const.DISC_OPTIONAL: True,
},
'operating_state': {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_OPERATING_STATE],
const.DISC_OPTIONAL: True,
},
'fan_state': {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_THERMOSTAT_FAN_STATE],
const.DISC_OPTIONAL: True,
},
'zxt_120_swing_mode': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [33],
const.DISC_OPTIONAL: True,
}})},
{const.DISC_COMPONENT: 'cover', # Rollershutter
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_GENRE: const.GENRE_USER,
},
'open': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_BRIGHT],
const.DISC_OPTIONAL: True,
},
'close': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DIM],
const.DISC_OPTIONAL: True,
}})},
{const.DISC_COMPONENT: 'cover', # Garage Door Switch
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY],
const.DISC_GENRE: const.GENRE_USER,
}})},
{const.DISC_COMPONENT: 'cover', # Garage Door Barrier
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_ENTRY_CONTROL],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_CLASS_A_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_B_MOTOR_CONTROL,
const.SPECIFIC_TYPE_CLASS_C_MOTOR_CONTROL,
const.SPECIFIC_TYPE_MOTOR_MULTIPOSITION,
const.SPECIFIC_TYPE_SECURE_BARRIER_ADDON,
const.SPECIFIC_TYPE_SECURE_DOOR],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_BARRIER_OPERATOR],
const.DISC_INDEX: [const.INDEX_BARRIER_OPERATOR_LABEL],
}})},
{const.DISC_COMPONENT: 'fan',
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_FAN_SWITCH],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL],
const.DISC_TYPE: const.TYPE_BYTE,
}})},
{const.DISC_COMPONENT: 'light',
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_REMOTE],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_POWER_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_SCENE_SWITCH_MULTILEVEL,
const.SPECIFIC_TYPE_NOT_USED],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_LEVEL],
const.DISC_TYPE: const.TYPE_BYTE,
},
'dimming_duration': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_MULTILEVEL],
const.DISC_INDEX: [const.INDEX_SWITCH_MULTILEVEL_DURATION],
const.DISC_OPTIONAL: True,
},
'color': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR],
const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_COLOR],
const.DISC_OPTIONAL: True,
},
'color_channels': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_COLOR],
const.DISC_INDEX: [const.INDEX_SWITCH_COLOR_CHANNELS],
const.DISC_OPTIONAL: True,
}})},
{const.DISC_COMPONENT: 'lock',
const.DISC_GENERIC_DEVICE_CLASS: [const.GENERIC_TYPE_ENTRY_CONTROL],
const.DISC_SPECIFIC_DEVICE_CLASS: [
const.SPECIFIC_TYPE_DOOR_LOCK,
const.SPECIFIC_TYPE_ADVANCED_DOOR_LOCK,
const.SPECIFIC_TYPE_SECURE_KEYPAD_DOOR_LOCK,
const.SPECIFIC_TYPE_SECURE_LOCKBOX],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_DOOR_LOCK],
const.DISC_INDEX: [const.INDEX_DOOR_LOCK_LOCK],
},
'access_control': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_ACCESS_CONTROL],
const.DISC_OPTIONAL: True,
},
'alarm_type': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_TYPE],
const.DISC_OPTIONAL: True,
},
'alarm_level': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_ALARM],
const.DISC_INDEX: [const.INDEX_ALARM_LEVEL],
const.DISC_OPTIONAL: True,
},
'v2btze_advanced': {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_CONFIGURATION],
const.DISC_INDEX: [12],
const.DISC_OPTIONAL: True,
}})},
{const.DISC_COMPONENT: 'sensor',
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
const.COMMAND_CLASS_ALARM,
const.COMMAND_CLASS_SENSOR_ALARM,
const.COMMAND_CLASS_INDICATOR],
const.DISC_GENRE: const.GENRE_USER,
}})},
{const.DISC_COMPONENT: 'switch',
const.DISC_GENERIC_DEVICE_CLASS: [
const.GENERIC_TYPE_METER,
const.GENERIC_TYPE_SENSOR_ALARM,
const.GENERIC_TYPE_SENSOR_BINARY,
const.GENERIC_TYPE_SWITCH_BINARY,
const.GENERIC_TYPE_ENTRY_CONTROL,
const.GENERIC_TYPE_SENSOR_MULTILEVEL,
const.GENERIC_TYPE_SWITCH_MULTILEVEL,
const.GENERIC_TYPE_SENSOR_NOTIFICATION,
const.GENERIC_TYPE_GENERIC_CONTROLLER,
const.GENERIC_TYPE_SWITCH_REMOTE,
const.GENERIC_TYPE_REPEATER_SLAVE,
const.GENERIC_TYPE_THERMOSTAT,
const.GENERIC_TYPE_WALL_CONTROLLER],
const.DISC_VALUES: dict(DEFAULT_VALUES_SCHEMA, **{
const.DISC_PRIMARY: {
const.DISC_COMMAND_CLASS: [const.COMMAND_CLASS_SWITCH_BINARY],
const.DISC_TYPE: const.TYPE_BOOL,
const.DISC_GENRE: const.GENRE_USER,
}})},
]
|
|
from __future__ import absolute_import, unicode_literals
from django.db.models.lookups import Lookup
from django.db.models.query import QuerySet
from django.db.models.sql.where import SubqueryConstraint, WhereNode
from django.utils.six import text_type
from wagtail.wagtailsearch.index import class_is_indexed
class FilterError(Exception):
pass
class FieldError(Exception):
pass
class BaseSearchQuery(object):
DEFAULT_OPERATOR = 'or'
def __init__(self, queryset, query_string, fields=None, operator=None, order_by_relevance=True):
self.queryset = queryset
self.query_string = query_string
self.fields = fields
self.operator = operator or self.DEFAULT_OPERATOR
self.order_by_relevance = order_by_relevance
def _get_filterable_field(self, field_attname):
# Get field
field = dict(
(field.get_attname(self.queryset.model), field)
for field in self.queryset.model.get_filterable_search_fields()
).get(field_attname, None)
return field
def _process_lookup(self, field, lookup, value):
raise NotImplementedError
def _connect_filters(self, filters, connector, negated):
raise NotImplementedError
def _process_filter(self, field_attname, lookup, value):
# Get the field
field = self._get_filterable_field(field_attname)
if field is None:
raise FieldError(
'Cannot filter search results with field "' + field_attname + '". Please add index.FilterField(\'' +
field_attname + '\') to ' + self.queryset.model.__name__ + '.search_fields.'
)
# Process the lookup
result = self._process_lookup(field, lookup, value)
if result is None:
raise FilterError(
'Could not apply filter on search results: "' + field_attname + '__' +
lookup + ' = ' + text_type(value) + '". Lookup "' + lookup + '"" not recognised.'
)
return result
def _get_filters_from_where_node(self, where_node):
# Check if this is a leaf node
if isinstance(where_node, Lookup):
field_attname = where_node.lhs.target.attname
lookup = where_node.lookup_name
value = where_node.rhs
# Ignore pointer fields that show up in specific page type queries
if field_attname.endswith('_ptr_id'):
return
# Process the filter
return self._process_filter(field_attname, lookup, value)
elif isinstance(where_node, SubqueryConstraint):
raise FilterError('Could not apply filter on search results: Subqueries are not allowed.')
elif isinstance(where_node, WhereNode):
# Get child filters
connector = where_node.connector
child_filters = [self._get_filters_from_where_node(child) for child in where_node.children]
child_filters = [child_filter for child_filter in child_filters if child_filter]
return self._connect_filters(child_filters, connector, where_node.negated)
else:
raise FilterError('Could not apply filter on search results: Unknown where node: ' + str(type(where_node)))
def _get_filters_from_queryset(self):
return self._get_filters_from_where_node(self.queryset.query.where)
class BaseSearchResults(object):
def __init__(self, backend, query, prefetch_related=None):
self.backend = backend
self.query = query
self.prefetch_related = prefetch_related
self.start = 0
self.stop = None
self._results_cache = None
self._count_cache = None
def _set_limits(self, start=None, stop=None):
if stop is not None:
if self.stop is not None:
self.stop = min(self.stop, self.start + stop)
else:
self.stop = self.start + stop
if start is not None:
if self.stop is not None:
self.start = min(self.stop, self.start + start)
else:
self.start = self.start + start
def _clone(self):
klass = self.__class__
new = klass(self.backend, self.query, prefetch_related=self.prefetch_related)
new.start = self.start
new.stop = self.stop
return new
def _do_search(self):
raise NotImplementedError
def _do_count(self):
raise NotImplementedError
def results(self):
if self._results_cache is None:
self._results_cache = self._do_search()
return self._results_cache
def count(self):
if self._count_cache is None:
if self._results_cache is not None:
self._count_cache = len(self._results_cache)
else:
self._count_cache = self._do_count()
return self._count_cache
def __getitem__(self, key):
new = self._clone()
if isinstance(key, slice):
# Set limits
start = int(key.start) if key.start else None
stop = int(key.stop) if key.stop else None
new._set_limits(start, stop)
# Copy results cache
if self._results_cache is not None:
new._results_cache = self._results_cache[key]
return new
else:
if self._results_cache is not None:
return self._results_cache[key]
new.start = self.start + key
new.stop = self.start + key + 1
return list(new)[0]
def __iter__(self):
return iter(self.results())
def __len__(self):
return len(self.results())
def __repr__(self):
data = list(self[:21])
if len(data) > 20:
data[-1] = "...(remaining elements truncated)..."
return '<SearchResults {}>'.format(repr(data))
class BaseSearchBackend(object):
query_class = None
results_class = None
rebuilder_class = None
def __init__(self, params):
pass
def get_index_for_model(self, model):
return None
def get_rebuilder(self):
return None
def reset_index(self):
raise NotImplementedError
def add_type(self, model):
raise NotImplementedError
def refresh_index(self):
raise NotImplementedError
def add(self, obj):
raise NotImplementedError
def add_bulk(self, model, obj_list):
raise NotImplementedError
def delete(self, obj):
raise NotImplementedError
def search(self, query_string, model_or_queryset, fields=None, filters=None,
prefetch_related=None, operator=None, order_by_relevance=True):
# Find model/queryset
if isinstance(model_or_queryset, QuerySet):
model = model_or_queryset.model
queryset = model_or_queryset
else:
model = model_or_queryset
queryset = model_or_queryset.objects.all()
# Model must be a class that is in the index
if not class_is_indexed(model):
return []
# Check that theres still a query string after the clean up
if query_string == "":
return []
# Only fields that are indexed as a SearchField can be passed in fields
if fields:
allowed_fields = {field.field_name for field in model.get_searchable_search_fields()}
for field_name in fields:
if field_name not in allowed_fields:
raise FieldError(
'Cannot search with field "' + field_name + '". Please add index.SearchField(\'' +
field_name + '\') to ' + model.__name__ + '.search_fields.'
)
# Apply filters to queryset
if filters:
queryset = queryset.filter(**filters)
# Prefetch related
if prefetch_related:
for prefetch in prefetch_related:
queryset = queryset.prefetch_related(prefetch)
# Check operator
if operator is not None:
operator = operator.lower()
if operator not in ['or', 'and']:
raise ValueError("operator must be either 'or' or 'and'")
# Search
search_query = self.query_class(
queryset, query_string, fields=fields, operator=operator, order_by_relevance=order_by_relevance
)
return self.results_class(self, search_query)
|
|
import time
import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, date2num
import numpy as np
import subprocess as sp
import time
import sys
import os
# Takes south_local_signal.txt and south_global_signal.txt
# and adds microseconds, UTC time, as well as trigger type,
# in addition to the VEM signals for anode and dynode
secsInWeek = 604800
secsInDay = 86400
gpsEpoch = (1980, 1, 6, 0, 0, 0) # (year, month, day, hh, mm, ss)
def UTCFromGps(gpsWeek, SOW, leapSecs=18):
"""converts gps week and seconds to UTC
see comments of inverse function!
SOW = seconds of week
gpsWeek is the full number (not modulo 1024)
"""
secFract = SOW % 1
epochTuple = gpsEpoch + (-1, -1, 0)
t0 = time.mktime(epochTuple) - time.timezone #mktime is localtime, correct for UTC
tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs
t = t0 + tdiff
(year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t)
#use gmtime since localtime does not allow to switch off daylighsavings correction!!!
return (year, month, day, hh, mm, ss + secFract)
date_list = ['20161107',
'20161108',
'20161109',
'20161110',
'20161111',
'20161112',
'20161113',
'20161114',
'20161115',
'20161116',
'20161117',
'20161118',
'20161119',
'20161120',
'20161121',
'20161122',
'20161123',
'20161124',
'20161125',
'20161126',
'20161127',
'20161128',
'20161129',
'20161130',
'20161201',
'20161202',
'20161203',
'20161204',
'20161205',
'20161206',
'20161207',
'20161208',
'20161209',
'20161210',
'20161211',
'20161212',
'20161213',
'20161214',
'20161215',
'20161216',
'20161217',
'20161218',
'20161219',
'20161220',
'20161221',
'20161222',
'20161223',
'20161224',
'20161225',
'20161226',
'20161227',
'20161228',
'20161229',
'20161230',
'20161231',
'20170101',
'20170102',
'20170103',
'20170104',
'20170105',
'20170106',
'20170107',
'20170108',
'20170109',
'20170110',
'20170111',
'20170112',
'20170114',
'20170117',
'20170118',
'20170119',
'20170120',
'20170121',
'20170122',
'20170123',
'20170127',
'20170128',
'20170129',
'20170130',
'20170131',
'20170201',
'20170202',
'20170203',
'20170204',
'20170205',
'20170206',
'20170207',
'20170208',
'20170209',
'20170210',
'20170211',
'20170212',
'20170213',
'20170214',
'20170215',
'20170216',
'20170217',
'20170218',
'20170219',
'20170220',
'20170221',
'20170222',
'20170223',
'20170224',
'20170225',
'20170226',
'20170227',
'20170228',
'20170301',
'20170302',
'20170303',
'20170304',
'20170305',
'20170306',
'20170307',
'20170308',
'20170309',
'20170310',
'20170311',
'20170312',
'20170313',
'20170314',
'20170315',
'20170316',
'20170317',
'20170318',
'20170319',
'20170320',
'20170321',
'20170322',
'20170323',]
for date in date_list:
fdate = date
yr = int(fdate[:4])
mo = int(fdate[4:6])
dy = int(fdate[6:])
# Array columns: GPS sec, A1, A2, A3, D1, D2, D3
local_sig = np.loadtxt('/home/augta/web_monitor/south_local_signal.txt',dtype='S500')
local_coi = np.loadtxt('/home/augta/data/coincidence/%i_%02d_%02d.CTAL.gz' %(yr,mo,dy)
,usecols=(1,),dtype='S100')
sp.call(['cp','/home/augta/data/south/t2/%i_%02d_%02d.T2.gz' %(yr,mo,dy),'.'])
sp.call(['gunzip','%i_%02d_%02d.T2.gz' %(yr,mo,dy)])
file_name = "%i_%02d_%02d.T2" %(yr,mo,dy)
with open(file_name,'r') as f:
all_data = f.read()
sp.call(['rm',file_name])
new_slf = '/home/augta/web_monitor/south_local_signal_extra.txt'
if local_coi.size > 0:
if local_coi.size == 1:
tmp = str(local_coi)
local_coi = []
local_coi.append(tmp)
for i in local_coi:
# Find where GPS second is
try:
adi = all_data.index(i.split('.')[0])
#Time stamp not in file, edit manually
except:
print i
print "Previous second: %i" %gps_int
continue
# Get string blob with T2 list
blob = all_data[adi:adi+1000]
our_second = blob.split('--\n')[0]
micro = i.split('.')[1]
mi = our_second.index('%s' %str(int(micro)))
ttype = our_second[mi-2]
# Compute UTC time
gps_sec_str = i.split('.')[0]
gps_int = int(gps_sec_str)
week = gps_int / secsInWeek
sow = gps_int - week*secsInWeek
utc = UTCFromGps(week,sow)
utc_str = "%i-%02d-%02d-%02d:%02d:%02d" %utc
utc_str = utc_str + '.%06d' %int(micro)
# Find matching local signal data
for j in local_sig:
if gps_sec_str in j[0]:
vems = j[1:]
# Now we have everything we need to write to a file
with open(new_slf,'a') as f:
out_str = '%s %s %s' %(i,utc_str,ttype)
out_str += ' %s'*12 %tuple(vems)
out_str += '\n'
f.write(out_str)
global_sig = np.loadtxt('/home/augta/web_monitor/south_global_signal.txt',dtype='S500')
global_coi = np.loadtxt('/home/augta/data/coincidence/%i_%02d_%02d.CTAG.gz' %(yr,mo,dy),
usecols=(6,),dtype='S100',comments=None)
new_sgf = '/home/augta/web_monitor/south_global_signal_extra.txt'
print global_coi.size
if global_coi.size > 0:
if global_coi.size == 1:
tmp = str(global_coi)
global_coi = []
global_coi.append(tmp)
for i in global_coi:
# Find where GPS second is
try:
adi = all_data.index(i.split('.')[0])
#Time stamp not in file, edit manually
except:
print i
print "Previous second: %i" %gps_int
continue
# Get string blob with T2 list
blob = all_data[adi:adi+1000]
our_second = blob.split('--\n')[0]
micro = i.split('.')[1]
mi = our_second.index('%s' %str(int(micro)))
ttype = our_second[mi-2]
# Compute UTC time
gps_sec_str = i.split('.')[0]
gps_int = int(gps_sec_str)
week = gps_int / secsInWeek
sow = gps_int - week*secsInWeek
utc = UTCFromGps(week,sow)
utc_str = "%i-%02d-%02d-%02d:%02d:%02d" %utc
utc_str = utc_str + '.%06d' %int(micro)
# Find matching local signal data
for j in global_sig:
if gps_sec_str in j[0]:
vems = j[1:]
# Now we have everything we need to write to a file
with open(new_sgf,'a') as f:
out_str = '%s %s %s' %(i,utc_str,ttype)
out_str += ' %s'*12 %tuple(vems)
out_str += '\n'
f.write(out_str)
|
|
import cPickle
import cv2
import os
import json
import numpy as np
from ..logger import logger
from imdb import IMDB
# coco api
from ..pycocotools.coco import COCO
from ..pycocotools.cocoeval import COCOeval
from ..pycocotools import mask as COCOmask
class coco(IMDB):
def __init__(self, image_set, root_path, data_path):
"""
fill basic information to initialize imdb
:param image_set: train2014, val2014, test2015
:param root_path: 'data', will write 'rpn_data', 'cache'
:param data_path: 'data/coco'
"""
super(coco, self).__init__('COCO', image_set, root_path, data_path)
self.root_path = root_path
self.data_path = data_path
self.coco = COCO(self._get_ann_file())
# deal with class names
cats = [cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())]
self.classes = ['__background__'] + cats
self.num_classes = len(self.classes)
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))
self._coco_ind_to_class_ind = dict([(self._class_to_coco_ind[cls], self._class_to_ind[cls])
for cls in self.classes[1:]])
# load image file names
self.image_set_index = self._load_image_set_index()
self.num_images = len(self.image_set_index)
logger.info('%s num_images %d' % (self.name, self.num_images))
# deal with data name
view_map = {'minival2014': 'val2014',
'valminusminival2014': 'val2014'}
self.data_name = view_map[image_set] if image_set in view_map else image_set
def _get_ann_file(self):
""" self.data_path / annotations / instances_train2014.json """
prefix = 'instances' if 'test' not in self.image_set else 'image_info'
return os.path.join(self.data_path, 'annotations',
prefix + '_' + self.image_set + '.json')
def _load_image_set_index(self):
""" image id: int """
image_ids = self.coco.getImgIds()
return image_ids
def image_path_from_index(self, index):
""" example: images / train2014 / COCO_train2014_000000119993.jpg """
filename = 'COCO_%s_%012d.jpg' % (self.data_name, index)
image_path = os.path.join(self.data_path, 'images', self.data_name, filename)
assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)
return image_path
def gt_roidb(self):
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
logger.info('%s gt roidb loaded from %s' % (self.name, cache_file))
return roidb
gt_roidb = [self._load_coco_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
logger.info('%s wrote gt roidb to %s' % (self.name, cache_file))
return gt_roidb
def _load_coco_annotation(self, index):
"""
coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']
iscrowd:
crowd instances are handled by marking their overlaps with all categories to -1
and later excluded in training
bbox:
[x1, y1, w, h]
:param index: coco image id
:return: roidb entry
"""
im_ann = self.coco.loadImgs(index)[0]
width = im_ann['width']
height = im_ann['height']
annIds = self.coco.getAnnIds(imgIds=index, iscrowd=None)
objs = self.coco.loadAnns(annIds)
# sanitize bboxes
valid_objs = []
for obj in objs:
x, y, w, h = obj['bbox']
x1 = np.max((0, x))
y1 = np.max((0, y))
x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
objs = valid_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
for ix, obj in enumerate(objs):
cls = self._coco_ind_to_class_ind[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
if obj['iscrowd']:
overlaps[ix, :] = -1.0
else:
overlaps[ix, cls] = 1.0
roi_rec = {'image': self.image_path_from_index(index),
'height': height,
'width': width,
'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'flipped': False}
return roi_rec
def evaluate_detections(self, detections):
""" detections_val2014_results.json """
res_folder = os.path.join(self.cache_path, 'results')
if not os.path.exists(res_folder):
os.makedirs(res_folder)
res_file = os.path.join(res_folder, 'detections_%s_results.json' % self.image_set)
self._write_coco_results(detections, res_file)
if 'test' not in self.image_set:
self._do_python_eval(res_file, res_folder)
def _write_coco_results(self, detections, res_file):
""" example results
[{"image_id": 42,
"category_id": 18,
"bbox": [258.15,41.29,348.26,243.78],
"score": 0.236}, ...]
"""
results = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
logger.info('collecting %s results (%d/%d)' % (cls, cls_ind, self.num_classes - 1))
coco_cat_id = self._class_to_coco_ind[cls]
results.extend(self._coco_results_one_category(detections[cls_ind], coco_cat_id))
logger.info('writing results json to %s' % res_file)
with open(res_file, 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
def _coco_results_one_category(self, boxes, cat_id):
results = []
for im_ind, index in enumerate(self.image_set_index):
dets = boxes[im_ind].astype(np.float)
if len(dets) == 0:
continue
scores = dets[:, -1]
xs = dets[:, 0]
ys = dets[:, 1]
ws = dets[:, 2] - xs + 1
hs = dets[:, 3] - ys + 1
result = [{'image_id': index,
'category_id': cat_id,
'bbox': [xs[k], ys[k], ws[k], hs[k]],
'score': scores[k]} for k in xrange(dets.shape[0])]
results.extend(result)
return results
def _do_python_eval(self, res_file, res_folder):
ann_type = 'bbox'
coco_dt = self.coco.loadRes(res_file)
coco_eval = COCOeval(self.coco, coco_dt)
coco_eval.params.useSegm = (ann_type == 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
self._print_detection_metrics(coco_eval)
eval_file = os.path.join(res_folder, 'detections_%s_results.pkl' % self.image_set)
with open(eval_file, 'wb') as f:
cPickle.dump(coco_eval, f, cPickle.HIGHEST_PROTOCOL)
logger.info('eval results saved to %s' % eval_file)
def _print_detection_metrics(self, coco_eval):
IoU_lo_thresh = 0.5
IoU_hi_thresh = 0.95
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)
# precision has dims (iou, recall, cls, area range, max dets)
# area range index 0: all area ranges
# max dets index 2: 100 per image
precision = \
coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
ap_default = np.mean(precision[precision > -1])
logger.info('~~~~ Mean and per-category AP @ IoU=%.2f,%.2f] ~~~~' % (IoU_lo_thresh, IoU_hi_thresh))
logger.info('%-15s %5.1f' % ('all', 100 * ap_default))
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
# minus 1 because of __background__
precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, cls_ind - 1, 0, 2]
ap = np.mean(precision[precision > -1])
logger.info('%-15s %5.1f' % (cls, 100 * ap))
logger.info('~~~~ Summary metrics ~~~~')
coco_eval.summarize()
|
|
__author__ = 'tylin'
__version__ = 1.0
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. Version 1.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
class COCO:
def __init__(self, annotation_file='annotations/instances_val2014_1_0.json'):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
print 'loading annotations into memory...'
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
print datetime.datetime.utcnow() - time_t
print 'annotations loaded!'
time_t = datetime.datetime.utcnow()
# create index
print 'creating index...'
imgToAnns = {ann['image_id']: [] for ann in dataset['annotations']}
anns = {ann['id']: [] for ann in dataset['annotations']}
for ann in dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
imgs = {im['id']: {} for im in dataset['images']}
for img in dataset['images']:
imgs[img['id']] = img
cats = []
catToImgs = []
if dataset['type'] == 'instances':
cats = {cat['id']: [] for cat in dataset['categories']}
for cat in dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in dataset['categories']}
for ann in dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print datetime.datetime.utcnow() - time_t
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
self.dataset = dataset
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if self.dataset['type'] == 'instances':
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if self.dataset['type'] == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if not ann['iscrowd']:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
mask = COCO.decodeMask(ann['segmentation'])
img = np.ones( (mask.shape[0], mask.shape[1], 3) )
light_green = np.array([2.0,166.0,101.0])/255
for i in range(3):
img[:,:,i] = light_green[i]
ax.imshow(np.dstack( (img, mask*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
if self.dataset['type'] == 'captions':
for ann in anns:
print ann['caption']
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)
M[rr, cc] = 1
return M
|
|
from __future__ import absolute_import
import logging
from datetime import timedelta
from django.utils import timezone
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.decorators import sudo_required
from sentry.api.serializers import serialize
from sentry.models import (
AuditLogEntryEvent, Group, GroupStatus, Project, ProjectStatus
)
from sentry.plugins import plugins
from sentry.tasks.deletion import delete_project
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('GetProject')
def get_project_scenario(runner):
runner.request(
method='GET',
path='/projects/%s/%s/' % (
runner.org.slug, runner.default_project.slug)
)
@scenario('DeleteProject')
def delete_project_scenario(runner):
with runner.isolated_project('Plain Proxy') as project:
runner.request(
method='DELETE',
path='/projects/%s/%s/' % (
runner.org.slug, project.slug)
)
@scenario('UpdateProject')
def update_project_scenario(runner):
with runner.isolated_project('Plain Proxy') as project:
runner.request(
method='PUT',
path='/projects/%s/%s/' % (
runner.org.slug, project.slug),
data={
'name': 'Plane Proxy',
'slug': 'plane-proxy',
'options': {
'sentry:origins': 'http://example.com\nhttp://example.invalid',
}
}
)
def clean_newline_inputs(value):
result = []
for v in value.split('\n'):
v = v.lower().strip()
if v:
result.append(v)
return result
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('name', 'slug')
class ProjectDetailsEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def _get_unresolved_count(self, project):
queryset = Group.objects.filter(
status=GroupStatus.UNRESOLVED,
project=project,
)
resolve_age = project.get_option('sentry:resolve_age', None)
if resolve_age:
queryset = queryset.filter(
last_seen__gte=timezone.now() - timedelta(hours=int(resolve_age)),
)
return queryset.count()
@attach_scenarios([get_project_scenario])
def get(self, request, project):
"""
Retrieve a Project
``````````````````
Return details on an individual project.
:pparam string organization_slug: the slug of the organization the
project belongs to.
:pparam string project_slug: the slug of the project to delete.
:auth: required
"""
active_plugins = [
{
'name': plugin.get_title(),
'id': plugin.slug,
}
for plugin in plugins.configurable_for_project(project, version=None)
if plugin.is_enabled(project)
and plugin.has_project_conf()
]
data = serialize(project, request.user)
data['options'] = {
'sentry:origins': '\n'.join(project.get_option('sentry:origins', ['*']) or []),
'sentry:resolve_age': int(project.get_option('sentry:resolve_age', 0)),
'sentry:scrub_data': bool(project.get_option('sentry:scrub_data', True)),
'sentry:scrub_defaults': bool(project.get_option('sentry:scrub_defaults', True)),
'sentry:sensitive_fields': project.get_option('sentry:sensitive_fields', []),
}
data['activePlugins'] = active_plugins
data['team'] = serialize(project.team, request.user)
data['organization'] = serialize(project.organization, request.user)
include = set(filter(bool, request.GET.get('include', '').split(',')))
if 'stats' in include:
data['stats'] = {
'unresolved': self._get_unresolved_count(project),
}
return Response(data)
@attach_scenarios([update_project_scenario])
@sudo_required
def put(self, request, project):
"""
Update a Project
````````````````
Update various attributes and configurable settings for the given
project. Only supplied values are updated.
:pparam string organization_slug: the slug of the organization the
project belongs to.
:pparam string project_slug: the slug of the project to delete.
:param string name: the new name for the project.
:param string slug: the new slug for the project.
:param object options: optional options to override in the
project settings.
:auth: required
"""
serializer = ProjectSerializer(project, data=request.DATA, partial=True)
if serializer.is_valid():
project = serializer.save()
options = request.DATA.get('options', {})
if 'sentry:origins' in options:
project.update_option(
'sentry:origins',
clean_newline_inputs(options['sentry:origins'])
)
if 'sentry:resolve_age' in options:
project.update_option('sentry:resolve_age', int(options['sentry:resolve_age']))
if 'sentry:scrub_data' in options:
project.update_option('sentry:scrub_data', bool(options['sentry:scrub_data']))
if 'sentry:scrub_defaults' in options:
project.update_option('sentry:scrub_defaults', bool(options['sentry:scrub_defaults']))
if 'sentry:sensitive_fields' in options:
project.update_option(
'sentry:sensitive_fields',
[s.strip().lower() for s in options['sentry:sensitive_fields']]
)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_EDIT,
data=project.get_audit_log_data(),
)
data = serialize(project, request.user)
data['options'] = {
'sentry:origins': '\n'.join(project.get_option('sentry:origins', '*') or []),
'sentry:resolve_age': int(project.get_option('sentry:resolve_age', 0)),
}
return Response(data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@attach_scenarios([delete_project_scenario])
@sudo_required
def delete(self, request, project):
"""
Delete a Project
````````````````
Schedules a project for deletion.
Deletion happens asynchronously and therefor is not immediate.
However once deletion has begun the state of a project changes and
will be hidden from most public views.
:pparam string organization_slug: the slug of the organization the
project belongs to.
:pparam string project_slug: the slug of the project to delete.
:auth: required
"""
if project.is_internal_project():
return Response('{"error": "Cannot remove projects internally used by Sentry."}',
status=status.HTTP_403_FORBIDDEN)
logging.getLogger('sentry.deletions').info(
'Project %s/%s (id=%s) removal requested by user (id=%s)',
project.organization.slug, project.slug, project.id, request.user.id)
updated = Project.objects.filter(
id=project.id,
status=ProjectStatus.VISIBLE,
).update(status=ProjectStatus.PENDING_DELETION)
if updated:
delete_project.delay(object_id=project.id, countdown=3600)
self.create_audit_entry(
request=request,
organization=project.organization,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_REMOVE,
data=project.get_audit_log_data(),
)
return Response(status=204)
|
|
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Management Utility
"""
from __future__ import print_function
# FIXME(sirp): When we have glance-admin we can consider merging this into it
# Perhaps for consistency with Nova, we would then rename glance-admin ->
# glance-manage (or the other way around)
import os
import sys
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo.config import cfg
from oslo.db.sqlalchemy import migration
from glance.common import config
from glance.common import exception
from glance.common import utils
from glance.db import migration as db_migration
from glance.db.sqlalchemy import api as db_api
from glance.db.sqlalchemy import metadata
from glance.openstack.common import gettextutils
from glance.openstack.common import log
from glance.openstack.common import strutils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_LW = gettextutils._LW
# Decorators for actions
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
class DbCommands(object):
"""Class for managing the db"""
def __init__(self):
pass
def version(self):
"""Print database's current migration level"""
print(migration.db_version(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
db_migration.INIT_VERSION))
@args('--version', metavar='<version>', help='Database version')
def upgrade(self, version=None):
"""Upgrade the database's migration level"""
migration.db_sync(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
version)
@args('--version', metavar='<version>', help='Database version')
def downgrade(self, version=None):
"""Downgrade the database's migration level"""
migration.db_sync(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
version)
@args('--version', metavar='<version>', help='Database version')
def version_control(self, version=None):
"""Place a database under migration control"""
migration.db_version_control(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
version)
@args('--version', metavar='<version>', help='Database version')
@args('--current_version', metavar='<version>',
help='Current Database version')
def sync(self, version=None, current_version=None):
"""
Place a database under migration control and upgrade/downgrade it,
creating first if necessary.
"""
if current_version not in (None, 'None'):
migration.db_version_control(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
version=current_version)
migration.db_sync(db_api.get_engine(),
db_migration.MIGRATE_REPO_PATH,
version)
@args('--path', metavar='<path>', help='Path to the directory where '
'json metadata files are stored')
def load_metadefs(self, path=None):
"""Load metadefinition json files to database"""
metadata.db_load_metadefs(db_api.get_engine(),
path)
def unload_metadefs(self):
"""Unload metadefinitions from database"""
metadata.db_unload_metadefs(db_api.get_engine())
@args('--path', metavar='<path>', help='Path to the directory where '
'json metadata files should be '
'saved.')
def export_metadefs(self, path=None):
"""Export metadefinitions data from database to files"""
metadata.db_export_metadefs(db_api.get_engine(),
path)
class DbLegacyCommands(object):
"""Class for managing the db using legacy commands"""
def __init__(self, command_object):
self.command_object = command_object
def version(self):
self.command_object.version()
def upgrade(self, version=None):
self.command_object.upgrade(CONF.command.version)
def downgrade(self, version=None):
self.command_object.downgrade(CONF.command.version)
def version_control(self, version=None):
self.command_object.version_control(CONF.command.version)
def sync(self, version=None, current_version=None):
self.command_object.sync(CONF.command.version,
CONF.command.current_version)
def load_metadefs(self, path=None):
self.command_object.load_metadefs(CONF.command.path)
def unload_metadefs(self):
self.command_object.unload_metadefs()
def export_metadefs(self, path=None):
self.command_object.export_metadefs(CONF.command.path)
def add_legacy_command_parsers(command_object, subparsers):
legacy_command_object = DbLegacyCommands(command_object)
parser = subparsers.add_parser('db_version')
parser.set_defaults(action_fn=legacy_command_object.version)
parser.set_defaults(action='db_version')
parser = subparsers.add_parser('db_upgrade')
parser.set_defaults(action_fn=legacy_command_object.upgrade)
parser.add_argument('version', nargs='?')
parser.set_defaults(action='db_upgrade')
parser = subparsers.add_parser('db_downgrade')
parser.set_defaults(action_fn=legacy_command_object.downgrade)
parser.add_argument('version')
parser.set_defaults(action='db_downgrade')
parser = subparsers.add_parser('db_version_control')
parser.set_defaults(action_fn=legacy_command_object.version_control)
parser.add_argument('version', nargs='?')
parser.set_defaults(action='db_version_control')
parser = subparsers.add_parser('db_sync')
parser.set_defaults(action_fn=legacy_command_object.sync)
parser.add_argument('version', nargs='?')
parser.add_argument('current_version', nargs='?')
parser.set_defaults(action='db_sync')
parser = subparsers.add_parser('db_load_metadefs')
parser.set_defaults(action_fn=legacy_command_object.load_metadefs)
parser.add_argument('path', nargs='?')
parser.set_defaults(action='db_load_metadefs')
parser = subparsers.add_parser('db_unload_metadefs')
parser.set_defaults(action_fn=legacy_command_object.unload_metadefs)
parser.set_defaults(action='db_unload_metadefs')
parser = subparsers.add_parser('db_export_metadefs')
parser.set_defaults(action_fn=legacy_command_object.export_metadefs)
parser.add_argument('path', nargs='?')
parser.set_defaults(action='db_export_metadefs')
def add_command_parsers(subparsers):
command_object = DbCommands()
parser = subparsers.add_parser('db')
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
# FIXME(basha): hack to assume dest is the arg name without
# the leading hyphens if no dest is supplied
kwargs.setdefault('dest', args[0][2:])
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(
kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*')
add_legacy_command_parsers(command_object, subparsers)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def main():
CONF.register_cli_opt(command_opt)
try:
cfg_files = cfg.find_config_files(project='glance',
prog='glance-registry')
cfg_files.extend(cfg.find_config_files(project='glance',
prog='glance-api'))
config.parse_args(default_config_files=cfg_files,
usage="%(prog)s [options] <cmd>")
log.setup('glance')
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
try:
if CONF.command.action.startswith('db'):
return CONF.command.action_fn()
else:
func_kwargs = {}
for k in CONF.command.action_kwargs:
v = getattr(CONF.command, 'action_kwarg_' + k)
if v is None:
continue
func_kwargs[k] = strutils.safe_decode(v)
func_args = [strutils.safe_decode(arg)
for arg in CONF.command.action_args]
return CONF.command.action_fn(*func_args, **func_kwargs)
except exception.GlanceException as e:
sys.exit("ERROR: %s" % utils.exception_to_str(e))
if __name__ == '__main__':
main()
|
|
from django.core.urlresolvers import reverse
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth.views import logout as Signout
from django.views.generic import TemplateView
from django.template.context import RequestContext
from django.views.generic.list import ListView
from django.conf import settings
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.http import HttpResponseForbidden, Http404
from userena.forms import (SignupForm, SignupFormOnlyEmail, AuthenticationForm,
ChangeEmailForm, EditProfileForm)
from userena.models import UserenaSignup
from userena.decorators import secure_required
from userena.backends import UserenaAuthenticationBackend
from userena.utils import signin_redirect, get_profile_model
from userena import signals as userena_signals
from userena import settings as userena_settings
from guardian.decorators import permission_required_or_403
import warnings
class ExtraContextTemplateView(TemplateView):
""" Add extra context to a simple template view """
extra_context = None
def get_context_data(self, *args, **kwargs):
context = super(ExtraContextTemplateView, self).get_context_data(*args, **kwargs)
if self.extra_context:
context.update(self.extra_context)
return context
# this view is used in POST requests, e.g. signup when the form is not valid
post = TemplateView.get
class ProfileListView(ListView):
""" Lists all profiles """
context_object_name='profile_list'
page=1
paginate_by=50
template_name='userena/profile_list.html'
extra_context=None
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProfileListView, self).get_context_data(**kwargs)
try:
page = int(self.request.GET.get('page', None))
except (TypeError, ValueError):
page = self.page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not self.request.user.is_staff:
raise Http404
if not self.extra_context: self.extra_context = dict()
context['page'] = page
context['paginate_by'] = self.paginate_by
context['extra_context'] = self.extra_context
return context
def get_queryset(self):
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(self.request.user)
return queryset
@secure_required
def signup(request, signup_form=SignupForm,
template_name='userena/signup_form.html', success_url=None,
extra_context=None):
"""
Signup of an account.
Signup requiring a username, email and password. After signup a user gets
an email with an activation link used to activate their account. After
successful signup redirects to ``success_url``.
:param signup_form:
Form that will be used to sign a user. Defaults to userena's
:class:`SignupForm`.
:param template_name:
String containing the template name that will be used to display the
signup form. Defaults to ``userena/signup_form.html``.
:param success_url:
String containing the URI which should be redirected to after a
successful signup. If not supplied will redirect to
``userena_signup_complete`` view.
:param extra_context:
Dictionary containing variables which are added to the template
context. Defaults to a dictionary with a ``form`` key containing the
``signup_form``.
**Context**
``form``
Form supplied by ``signup_form``.
"""
# If no usernames are wanted and the default form is used, fallback to the
# default form that doesn't display to enter the username.
if userena_settings.USERENA_WITHOUT_USERNAMES and (signup_form == SignupForm):
signup_form = SignupFormOnlyEmail
form = signup_form()
if request.method == 'POST':
form = signup_form(request.POST, request.FILES)
if form.is_valid():
user = form.save()
# Send the signup complete signal
userena_signals.signup_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_signup_complete',
kwargs={'username': user.username})
# A new signed user should logout the old one.
if request.user.is_authenticated():
logout(request)
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def activate(request, activation_key,
template_name='userena/activate_fail.html',
success_url=None, extra_context=None):
"""
Activate a user with an activation key.
The key is a SHA1 string. When the SHA1 is found with an
:class:`UserenaSignup`, the :class:`User` of that account will be
activated. After a successful activation the view will redirect to
``success_url``. If the SHA1 is not found, the user will be shown the
``template_name`` template displaying a fail message.
:param activation_key:
String of a SHA1 string of 40 characters long. A SHA1 is always 160bit
long, with 4 bits per character this makes it --160/4-- 40 characters
long.
:param template_name:
String containing the template name that is used when the
``activation_key`` is invalid and the activation fails. Defaults to
``userena/activation_fail.html``.
:param success_url:
String containing the URL where the user should be redirected to after
a successful activation. Will replace ``%(username)s`` with string
formatting if supplied. If ``success_url`` is left empty, will direct
to ``userena_profile_detail`` view.
:param extra_context:
Dictionary containing variables which could be added to the template
context. Default to an empty dictionary.
"""
user = UserenaSignup.objects.activate_user(activation_key)
if user:
# Sign the user in.
auth_user = authenticate(identification=user.email,
check_password=False)
login(request, auth_user)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your account has been activated and you have been signed in.'),
fail_silently=True)
if success_url: redirect_to = success_url % {'username': user.username }
else: redirect_to = reverse('userena_profile_detail',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def email_confirm(request, confirmation_key,
template_name='userena/email_confirm_fail.html',
success_url=None, extra_context=None):
"""
Confirms an email address with a confirmation key.
Confirms a new email address by running :func:`User.objects.confirm_email`
method. If the method returns an :class:`User` the user will have his new
e-mail address set and redirected to ``success_url``. If no ``User`` is
returned the user will be represented with a fail message from
``template_name``.
:param confirmation_key:
String with a SHA1 representing the confirmation key used to verify a
new email address.
:param template_name:
String containing the template name which should be rendered when
confirmation fails. When confirmation is successful, no template is
needed because the user will be redirected to ``success_url``.
:param success_url:
String containing the URL which is redirected to after a successful
confirmation. Supplied argument must be able to be rendered by
``reverse`` function.
:param extra_context:
Dictionary of variables that are passed on to the template supplied by
``template_name``.
"""
user = UserenaSignup.objects.confirm_email(confirmation_key)
if user:
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your email address has been changed.'),
fail_silently=True)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_email_confirm_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
else:
if not extra_context: extra_context = dict()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def direct_to_user_template(request, username, template_name,
extra_context=None):
"""
Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access to
the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
"""
user = get_object_or_404(User, username__iexact=username)
if not extra_context: extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signin(request, auth_form=AuthenticationForm,
template_name='userena/signin_form.html',
redirect_field_name=REDIRECT_FIELD_NAME,
redirect_signin_function=signin_redirect, extra_context=None):
"""
Signin using email or username with password.
Signs a user in by combining email/username with password. If the
combination is correct and the user :func:`is_active` the
:func:`redirect_signin_function` is called with the arguments
``REDIRECT_FIELD_NAME`` and an instance of the :class:`User` who is is
trying the login. The returned value of the function will be the URL that
is redirected to.
A user can also select to be remembered for ``USERENA_REMEMBER_DAYS``.
:param auth_form:
Form to use for signing the user in. Defaults to the
:class:`AuthenticationForm` supplied by userena.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signin_form.html``.
:param redirect_field_name:
Form field name which contains the value for a redirect to the
succeeding page. Defaults to ``next`` and is set in
``REDIRECT_FIELD_NAME`` setting.
:param redirect_signin_function:
Function which handles the redirect. This functions gets the value of
``REDIRECT_FIELD_NAME`` and the :class:`User` who has logged in. It
must return a string which specifies the URI to redirect to.
:param extra_context:
A dictionary containing extra variables that should be passed to the
rendered template. The ``form`` key is always the ``auth_form``.
**Context**
``form``
Form used for authentication supplied by ``auth_form``.
"""
form = auth_form
if request.method == 'POST':
form = auth_form(request.POST, request.FILES)
if form.is_valid():
identification, password, remember_me = (form.cleaned_data['identification'],
form.cleaned_data['password'],
form.cleaned_data['remember_me'])
user = authenticate(identification=identification,
password=password)
if user.is_active:
login(request, user)
if remember_me:
request.session.set_expiry(userena_settings.USERENA_REMEMBER_ME_DAYS[1] * 86400)
else: request.session.set_expiry(0)
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('You have been signed in.'),
fail_silently=True)
# Whereto now?
redirect_to = redirect_signin_function(
request.REQUEST.get(redirect_field_name), user)
return redirect(redirect_to)
else:
return redirect(reverse('userena_disabled',
kwargs={'username': user.username}))
if not extra_context: extra_context = dict()
extra_context.update({
'form': form,
'next': request.REQUEST.get(redirect_field_name),
})
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
def signout(request, next_page=userena_settings.USERENA_REDIRECT_ON_SIGNOUT,
template_name='userena/signout.html', *args, **kwargs):
"""
Signs out the user and adds a success message ``You have been signed
out.`` If next_page is defined you will be redirected to the URI. If
not the template in template_name is used.
:param next_page:
A string which specifies the URI to redirect to.
:param template_name:
String defining the name of the template to use. Defaults to
``userena/signout.html``.
"""
if request.user.is_authenticated() and userena_settings.USERENA_USE_MESSAGES: # pragma: no cover
messages.success(request, _('You have been signed out.'), fail_silently=True)
return Signout(request, next_page, template_name, *args, **kwargs)
@secure_required
@permission_required_or_403('change_user', (User, 'username', 'username'))
def email_change(request, username, email_form=ChangeEmailForm,
template_name='userena/email_form.html', success_url=None,
extra_context=None):
"""
Change email address
:param username:
String of the username which specifies the current account.
:param email_form:
Form that will be used to change the email address. Defaults to
:class:`ChangeEmailForm` supplied by userena.
:param template_name:
String containing the template to be used to display the email form.
Defaults to ``userena/email_form.html``.
:param success_url:
Named URL where the user will get redirected to when successfully
changing their email address. When not supplied will redirect to
``userena_email_complete`` URL.
:param extra_context:
Dictionary containing extra variables that can be used to render the
template. The ``form`` key is always the form supplied by the keyword
argument ``form`` and the ``user`` key by the user whose email address
is being changed.
**Context**
``form``
Form that is used to change the email address supplied by ``form``.
``account``
Instance of the ``Account`` whose email address is about to be changed.
**Todo**
Need to have per-object permissions, which enables users with the correct
permissions to alter the email address of others.
"""
user = get_object_or_404(User, username__iexact=username)
form = email_form(user)
if request.method == 'POST':
form = email_form(user,
request.POST,
request.FILES)
if form.is_valid():
email_result = form.save()
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_email_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_user', (User, 'username', 'username'))
def password_change(request, username, template_name='userena/password_form.html',
pass_form=PasswordChangeForm, success_url=None, extra_context=None):
""" Change password of user.
This view is almost a mirror of the view supplied in
:func:`contrib.auth.views.password_change`, with the minor change that in
this view we also use the username to change the password. This was needed
to keep our URLs logical (and REST) across the entire application. And
that in a later stadium administrators can also change the users password
through the web application itself.
:param username:
String supplying the username of the user who's password is about to be
changed.
:param template_name:
String of the name of the template that is used to display the password
change form. Defaults to ``userena/password_form.html``.
:param pass_form:
Form used to change password. Default is the form supplied by Django
itself named ``PasswordChangeForm``.
:param success_url:
Named URL that is passed onto a :func:`reverse` function with
``username`` of the active user. Defaults to the
``userena_password_complete`` URL.
:param extra_context:
Dictionary of extra variables that are passed on to the template. The
``form`` key is always used by the form supplied by ``pass_form``.
**Context**
``form``
Form used to change the password.
"""
user = get_object_or_404(User,
username__iexact=username)
form = pass_form(user=user)
if request.method == "POST":
form = pass_form(user=user, data=request.POST)
if form.is_valid():
form.save()
# Send a signal that the password has changed
userena_signals.password_complete.send(sender=None,
user=user)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_password_change_complete',
kwargs={'username': user.username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
@secure_required
@permission_required_or_403('change_profile', (get_profile_model(), 'user__username', 'username'))
def profile_edit(request, username, edit_profile_form=EditProfileForm,
template_name='userena/profile_form.html', success_url=None,
extra_context=None, **kwargs):
"""
Edit profile.
Edits a profile selected by the supplied username. First checks
permissions if the user is allowed to edit this profile, if denied will
show a 404. When the profile is successfully edited will redirect to
``success_url``.
:param username:
Username of the user which profile should be edited.
:param edit_profile_form:
Form that is used to edit the profile. The :func:`EditProfileForm.save`
method of this form will be called when the form
:func:`EditProfileForm.is_valid`. Defaults to :class:`EditProfileForm`
from userena.
:param template_name:
String of the template that is used to render this view. Defaults to
``userena/edit_profile_form.html``.
:param success_url:
Named URL which will be passed on to a django ``reverse`` function after
the form is successfully saved. Defaults to the ``userena_detail`` url.
:param extra_context:
Dictionary containing variables that are passed on to the
``template_name`` template. ``form`` key will always be the form used
to edit the profile, and the ``profile`` key is always the edited
profile.
**Context**
``form``
Form that is used to alter the profile.
``profile``
Instance of the ``Profile`` that is edited.
"""
user = get_object_or_404(User,
username__iexact=username)
profile = user.get_profile()
user_initial = {'first_name': user.first_name,
'last_name': user.last_name}
form = edit_profile_form(instance=profile, initial=user_initial)
if request.method == 'POST':
form = edit_profile_form(request.POST, request.FILES, instance=profile,
initial=user_initial)
if form.is_valid():
profile = form.save()
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Your profile has been updated.'),
fail_silently=True)
if success_url: redirect_to = success_url
else: redirect_to = reverse('userena_profile_detail', kwargs={'username': username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context['form'] = form
extra_context['profile'] = profile
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_detail(
request, username,
template_name=userena_settings.USERENA_PROFILE_DETAIL_TEMPLATE,
extra_context=None, **kwargs):
"""
Detailed view of an user.
:param username:
String of the username of which the profile should be viewed.
:param template_name:
String representing the template name that should be used to display
the profile.
:param extra_context:
Dictionary of variables which should be supplied to the template. The
``profile`` key is always the current profile.
**Context**
``profile``
Instance of the currently viewed ``Profile``.
"""
user = get_object_or_404(User,
username__iexact=username)
profile = user.get_profile()
if not profile.can_view_profile(request.user):
return HttpResponseForbidden(_("You don't have permission to view this profile."))
if not extra_context: extra_context = dict()
extra_context['profile'] = user.get_profile()
extra_context['hide_email'] = userena_settings.USERENA_HIDE_EMAIL
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request)
def profile_list(request, page=1, template_name='userena/profile_list.html',
paginate_by=50, extra_context=None, **kwargs): # pragma: no cover
"""
Returns a list of all profiles that are public.
It's possible to disable this by changing ``USERENA_DISABLE_PROFILE_LIST``
to ``True`` in your settings.
:param page:
Integer of the active page used for pagination. Defaults to the first
page.
:param template_name:
String defining the name of the template that is used to render the
list of all users. Defaults to ``userena/list.html``.
:param paginate_by:
Integer defining the amount of displayed profiles per page. Defaults to
50 profiles per page.
:param extra_context:
Dictionary of variables that are passed on to the ``template_name``
template.
**Context**
``profile_list``
A list of profiles.
``is_paginated``
A boolean representing whether the results are paginated.
If the result is paginated. It will also contain the following variables.
``paginator``
An instance of ``django.core.paginator.Paginator``.
``page_obj``
An instance of ``django.core.paginator.Page``.
"""
warnings.warn("views.profile_list is deprecated. Use ProfileListView instead", DeprecationWarning, stacklevel=2)
try:
page = int(request.GET.get('page', None))
except (TypeError, ValueError):
page = page
if userena_settings.USERENA_DISABLE_PROFILE_LIST \
and not request.user.is_staff:
raise Http404
profile_model = get_profile_model()
queryset = profile_model.objects.get_visible_profiles(request.user)
if not extra_context: extra_context = dict()
return list_detail.object_list(request,
queryset=queryset,
paginate_by=paginate_by,
page=page,
template_name=template_name,
extra_context=extra_context,
template_object_name='profile',
**kwargs)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewayConnectionsOperations(object):
"""VirtualNetworkGatewayConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.VirtualNetworkGatewayConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGatewayConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkGatewayConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.VirtualNetworkGatewayConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetworkGatewayConnection"]
"""Creates or updates a virtual network gateway connection in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the create or update virtual network gateway
connection operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetworkGatewayConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkGatewayConnection"
"""Gets the specified virtual network gateway connection by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection.
:type virtual_network_gateway_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGatewayConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGatewayConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network Gateway connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection.
:type virtual_network_gateway_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}'} # type: ignore
def _set_shared_key_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.ConnectionSharedKey"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionSharedKey"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionSharedKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._set_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionSharedKey')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'} # type: ignore
def begin_set_shared_key(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.ConnectionSharedKey"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionSharedKey"]
"""The Put VirtualNetworkGatewayConnectionSharedKey operation sets the virtual network gateway
connection shared key for passed virtual network gateway connection in the specified resource
group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network gateway connection name.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the Begin Set Virtual Network Gateway connection
Shared key operation throughNetwork resource provider.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.ConnectionSharedKey
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionSharedKey or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.ConnectionSharedKey]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionSharedKey"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._set_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'} # type: ignore
def get_shared_key(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionSharedKey"
"""The Get VirtualNetworkGatewayConnectionSharedKey operation retrieves information about the
specified virtual network gateway connection shared key through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network gateway connection shared
key name.
:type virtual_network_gateway_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionSharedKey, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.ConnectionSharedKey
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionSharedKey"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get_shared_key.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkGatewayConnectionListResult"]
"""The List VirtualNetworkGatewayConnections operation retrieves all the virtual network gateways
connections created.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.VirtualNetworkGatewayConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkGatewayConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections'} # type: ignore
def _reset_shared_key_initial(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.ConnectionResetSharedKey"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ConnectionResetSharedKey"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ConnectionResetSharedKey"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._reset_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionResetSharedKey')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectionResetSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset'} # type: ignore
def begin_reset_shared_key(
self,
resource_group_name, # type: str
virtual_network_gateway_connection_name, # type: str
parameters, # type: "_models.ConnectionResetSharedKey"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionResetSharedKey"]
"""The VirtualNetworkGatewayConnectionResetSharedKey operation resets the virtual network gateway
connection shared key for passed virtual network gateway connection in the specified resource
group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The virtual network gateway connection reset
shared key Name.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the begin reset virtual network gateway connection
shared key operation through network resource provider.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.ConnectionResetSharedKey
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionResetSharedKey or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.ConnectionResetSharedKey]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionResetSharedKey"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionResetSharedKey', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset'} # type: ignore
|
|
import gym
import random
import numpy as np
from itertools import count
from collections import deque
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import os
from torch.autograd import Variable
from atari_wrappers import WarpFrame, FrameStack, ClipRewardEnv
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(1)
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
# Experience replay memory
class ExperienceMemory:
def __init__(self, n):
self.memory = deque(maxlen=n)
def add_transition(self, s, a, r, next_s, done):
self.memory.append((s, a, r, next_s, done))
def __len__(self):
return len(self.memory)
def sample(self, batch_size):
# https://stackoverflow.com/questions/40181284/how-to-get-random-sample-from-deque-in-python-3
# Since python3.5 you can just do a random sample on a deque with a size
sample_batch = random.sample(self.memory, batch_size)
state_batch = []
action_batch = []
reward_batch = []
next_state_batch = []
done_batch = []
for s in sample_batch:
state_batch.append(s[0])
action_batch.append(s[1])
reward_batch.append(s[2])
next_state_batch.append(s[3])
done_batch.append(s[4])
return np.asarray(state_batch), np.asarray(action_batch), np.asarray(reward_batch), \
np.asarray(next_state_batch), np.asarray(done_batch)
# Environment according to deepmind's paper "Human Level Control Through Deep Reinforcement Learning"
def deepmind_env(env_id, m=4):
env = gym.make(env_id)
# Wrap the frames to 84x84 and grayscale
env = WarpFrame(env)
# Stack the 4 most recent frames
env = FrameStack(env, m)
# Clip rewards to -1 and 1
env = ClipRewardEnv(env)
return env
# The neural network
class Model(nn.Module):
def __init__(self, possible_actions):
super().__init__()
self.conv1 = nn.Conv2d(4, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(64*7*7, 512)
self.fc2 = nn.Linear(512, possible_actions)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.fc1(x.view(x.size(0), -1)))
return self.fc2(x)
class Agent:
def __init__(self, game_id):
# initialize the game environment
self.env = deepmind_env(game_id)
# Init Q
self.Q = Model(self.env.action_space.n)
# Init target Q with the same weights as self.Q
self.target_Q = Model(self.env.action_space.n)
self.sync_target_q()
if use_cuda:
self.Q.cuda()
self.target_Q.cuda()
self.memory = ExperienceMemory(1000000)
self.gamma = 0.99
self.loss = torch.nn.MSELoss()
self.optimizer = optim.RMSprop(self.Q.parameters(), lr=0.0001)
def sync_target_q(self):
# Syncs the Q target with the target Q function
# https://discuss.pytorch.org/t/are-there-any-recommended-methods-to-clone-a-model/483/5
copy_from = list(self.Q.parameters())
copy_to = list(self.target_Q.parameters())
n = len(copy_from)
for i in range(0, n):
copy_to[i].data[:] = copy_from[i].data[:]
def get_eps(self, current_steps, max_exploration, start_eps, end_eps):
# Gets the current epsilon value
# linearly decline
return max(end_eps, start_eps - current_steps / max_exploration)
def get_action(self, current_eps, states):
# Get an action based on the current eps and the state
if random.random() > current_eps:
# Our states are 84 x 84 x 4 but pytorch expects a 4D tensor
# so we add an extra dimension
states = np.expand_dims(states, 0)
actions = self.Q(Variable(torch.from_numpy(states)).type(FloatTensor))
print(np.argmax(actions.data.cpu().numpy()))
return np.argmax(actions.data.cpu().numpy())
else:
return LongTensor([[random.randrange(self.env.action_space.n)]])
def get_yi(self, next_states, rewards, done):
q_target_vals = self.target_Q(Variable(torch.from_numpy(next_states)).type(FloatTensor))
# We get a batch size x 1 tensor back
# We want the values from the last dimension
q_target_vals = np.max(q_target_vals.data.cpu().numpy(), axis=1)
# For every state that is done, set Q to zero
mask = (done == 1)
q_target_vals[mask] = 0
yi = rewards + self.gamma * q_target_vals
return Variable(torch.from_numpy(yi)).type(FloatTensor)
def update_weights(self, batch_size):
if len(self.memory) < batch_size:
return
# get a random minibatch of transitions
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(batch_size)
# Get our yi's
yi = self.get_yi(next_state_batch, reward_batch, done_batch)
# Now we need to get our normal q values
q_values = self.Q(Variable(torch.from_numpy(state_batch).type(FloatTensor)))
# Now select the actions we took
actions_taken = torch.gather(q_values, 1,
Variable(torch.from_numpy(action_batch)).type(LongTensor).view(-1, 1))
loss = self.loss(actions_taken, yi)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def load_agent(self, file):
self.Q.load_state_dict(torch.load(file, map_location=lambda storage, loc: storage))
self.sync_target_q()
def save_agent(self, episode):
if not os.path.exists("saved_model/"):
os.makedirs("saved_model/")
torch.save(self.Q.state_dict(), "saved_model/agent_episode_{}.pth".format(episode))
def play(self, episodes):
for episode in range(1, episodes+1):
state = self.env.reset()
for _ in count(start=1):
action = self.get_action(0, state)
state, reward, done, _ = self.env.step(action)
self.env.render()
if done:
break
def train(self, episodes, sync_target=10000, max_eploration=10**5, end_eps=0.1, start_eps=1, batch_size=32):
steps = 0
self.save_agent(0)
for episode in range(1, episodes + 1):
state = self.env.reset()
current_reward = 0
for t in count(start=1):
# select action with prob eps
current_eps = self.get_eps(steps, max_eploration, start_eps, end_eps)
action = self.get_action(current_eps, state)
# execute action in emulator
next_state, reward, done, _ = self.env.step(action)
# Add this to our memory
self.memory.add_transition(state, action, reward, next_state, done)
# Update our weights now
self.update_weights(batch_size)
steps += 1
current_reward += reward
state = next_state
# every C steps we reset target Q
if (steps % sync_target) == 0:
print("Sync target network")
self.sync_target_q()
if done:
break
print("Episode: {} finished".format(episode))
# information stuff
if (episode % 10) == 0:
print("--- Saving episode {} ---".format(episode))
self.save_agent(episode)
print("Episode reward: {}".format(current_reward))
print("Eps: {}".format(current_eps))
if (current_reward) >= 19:
self.save_agent("final")
if __name__ == '__main__':
agent = Agent("PongDeterministic-v4")
agent.train(1000)
|
|
from __future__ import unicode_literals
import datetime
import time
import sys
from email.header import Header
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from django.conf import settings
from django.core import signals
from django.core import signing
from django.core.exceptions import DisallowedRedirect
from django.http.cookie import SimpleCookie
from django.utils import six, timezone
from django.utils.encoding import force_bytes, force_text, iri_to_uri
from django.utils.http import cookie_date
from django.utils.six.moves import map
# See http://www.iana.org/assignments/http-status-codes
REASON_PHRASES = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
102: 'PROCESSING',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
207: 'MULTI-STATUS',
208: 'ALREADY REPORTED',
226: 'IM USED',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
418: "I'M A TEAPOT",
422: 'UNPROCESSABLE ENTITY',
423: 'LOCKED',
424: 'FAILED DEPENDENCY',
426: 'UPGRADE REQUIRED',
428: 'PRECONDITION REQUIRED',
429: 'TOO MANY REQUESTS',
431: 'REQUEST HEADER FIELDS TOO LARGE',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
506: 'VARIANT ALSO NEGOTIATES',
507: 'INSUFFICIENT STORAGE',
508: 'LOOP DETECTED',
510: 'NOT EXTENDED',
511: 'NETWORK AUTHENTICATION REQUIRED',
}
class BadHeaderError(ValueError):
pass
class HttpResponseBase(six.Iterator):
"""
An HTTP response base class with dictionary-accessed headers.
This class doesn't handle content. It should not be used directly.
Use the HttpResponse and StreamingHttpResponse subclasses instead.
"""
status_code = 200
reason_phrase = None # Use default reason phrase for status code.
def __init__(self, content_type=None, status=None, reason=None):
# _headers is a mapping of the lower-case name to the original case of
# the header (required for working with legacy systems) and the header
# value. Both the name of the header and its value are ASCII strings.
self._headers = {}
self._charset = settings.DEFAULT_CHARSET
self._closable_objects = []
# This parameter is set by the handler. It's necessary to preserve the
# historical behavior of request_finished.
self._handler_class = None
if not content_type:
content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
self._charset)
self.cookies = SimpleCookie()
if status is not None:
self.status_code = status
if reason is not None:
self.reason_phrase = reason
elif self.reason_phrase is None:
self.reason_phrase = REASON_PHRASES.get(self.status_code,
'UNKNOWN STATUS CODE')
self['Content-Type'] = content_type
def serialize_headers(self):
"""HTTP headers as a bytestring."""
def to_bytes(val, encoding):
return val if isinstance(val, bytes) else val.encode(encoding)
headers = [
(b': '.join([to_bytes(key, 'ascii'), to_bytes(value, 'latin-1')]))
for key, value in self._headers.values()
]
return b'\r\n'.join(headers)
if six.PY3:
__bytes__ = serialize_headers
else:
__str__ = serialize_headers
def _convert_to_charset(self, value, charset, mime_encode=False):
"""Converts headers key/value to ascii/latin-1 native strings.
`charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
`value` can't be represented in the given charset, MIME-encoding
is applied.
"""
if not isinstance(value, (bytes, six.text_type)):
value = str(value)
try:
if six.PY3:
if isinstance(value, str):
# Ensure string is valid in given charset
value.encode(charset)
else:
# Convert bytestring using given charset
value = value.decode(charset)
else:
if isinstance(value, str):
# Ensure string is valid in given charset
value.decode(charset)
else:
# Convert unicode string to given charset
value = value.encode(charset)
except UnicodeError as e:
if mime_encode:
# Wrapping in str() is a workaround for #12422 under Python 2.
value = str(Header(value, 'utf-8', maxlinelen=sys.maxsize).encode())
else:
e.reason += ', HTTP response headers must be in %s format' % charset
raise
if str('\n') in value or str('\r') in value:
raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
return value
def __setitem__(self, header, value):
header = self._convert_to_charset(header, 'ascii')
value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
self._headers[header.lower()] = (header, value)
def __delitem__(self, header):
try:
del self._headers[header.lower()]
except KeyError:
pass
def __getitem__(self, header):
return self._headers[header.lower()][1]
def __getstate__(self):
# SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we
# serialize to a string instead
state = self.__dict__.copy()
state['cookies'] = str(state['cookies'])
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.cookies = SimpleCookie(self.cookies)
def has_header(self, header):
"""Case-insensitive check for a header."""
return header.lower() in self._headers
__contains__ = has_header
def items(self):
return self._headers.values()
def get(self, header, alternate=None):
return self._headers.get(header.lower(), (None, alternate))[1]
def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
"""
Sets a cookie.
``expires`` can be:
- a string in the correct format,
- a naive ``datetime.datetime`` object in UTC,
- an aware ``datetime.datetime`` object in any time zone.
If it is a ``datetime.datetime`` object then ``max_age`` will be calculated.
"""
self.cookies[key] = value
if expires is not None:
if isinstance(expires, datetime.datetime):
if timezone.is_aware(expires):
expires = timezone.make_naive(expires, timezone.utc)
delta = expires - expires.utcnow()
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + datetime.timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
self.cookies[key]['expires'] = expires
if max_age is not None:
self.cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
self.cookies[key]['expires'] = cookie_date(time.time() +
max_age)
if path is not None:
self.cookies[key]['path'] = path
if domain is not None:
self.cookies[key]['domain'] = domain
if secure:
self.cookies[key]['secure'] = True
if httponly:
self.cookies[key]['httponly'] = True
def set_signed_cookie(self, key, value, salt='', **kwargs):
value = signing.get_cookie_signer(salt=key + salt).sign(value)
return self.set_cookie(key, value, **kwargs)
def delete_cookie(self, key, path='/', domain=None):
self.set_cookie(key, max_age=0, path=path, domain=domain,
expires='Thu, 01-Jan-1970 00:00:00 GMT')
# Common methods used by subclasses
def make_bytes(self, value):
"""Turn a value into a bytestring encoded in the output charset."""
# Per PEP 3333, this response body must be bytes. To avoid returning
# an instance of a subclass, this function returns `bytes(value)`.
# This doesn't make a copy when `value` already contains bytes.
# If content is already encoded (eg. gzip), assume bytes.
if self.has_header('Content-Encoding'):
return bytes(value)
# Handle string types -- we can't rely on force_bytes here because:
# - under Python 3 it attemps str conversion first
# - when self._charset != 'utf-8' it re-encodes the content
if isinstance(value, bytes):
return bytes(value)
if isinstance(value, six.text_type):
return bytes(value.encode(self._charset))
# Handle non-string types (#16494)
return force_bytes(value, self._charset)
# These methods partially implement the file-like object interface.
# See http://docs.python.org/lib/bltin-file-objects.html
# The WSGI server must call this method upon completion of the request.
# See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
def close(self):
for closable in self._closable_objects:
try:
closable.close()
except Exception:
pass
signals.request_finished.send(sender=self._handler_class)
def write(self, content):
raise Exception("This %s instance is not writable" % self.__class__.__name__)
def flush(self):
pass
def tell(self):
raise Exception("This %s instance cannot tell its position" % self.__class__.__name__)
class HttpResponse(HttpResponseBase):
"""
An HTTP response class with a string as content.
This content that can be read, appended to or replaced.
"""
streaming = False
def __init__(self, content=b'', *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
# Content is a bytestring. See the `content` property methods.
self.content = content
def serialize(self):
"""Full HTTP message, including headers, as a bytestring."""
return self.serialize_headers() + b'\r\n\r\n' + self.content
if six.PY3:
__bytes__ = serialize
else:
__str__ = serialize
@property
def content(self):
return b''.join(self._container)
@content.setter
def content(self, value):
# Consume iterators upon assignment to allow repeated iteration.
if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.string_types)):
if hasattr(value, 'close'):
self._closable_objects.append(value)
value = b''.join(self.make_bytes(chunk) for chunk in value)
else:
value = self.make_bytes(value)
# Create a list of properly encoded bytestrings to support write().
self._container = [value]
def __iter__(self):
return iter(self._container)
def write(self, content):
self._container.append(self.make_bytes(content))
def tell(self):
return len(self.content)
class StreamingHttpResponse(HttpResponseBase):
"""
A streaming HTTP response class with an iterator as content.
This should only be iterated once, when the response is streamed to the
client. However, it can be appended to or replaced with a new iterator
that wraps the original content (or yields entirely new content).
"""
streaming = True
def __init__(self, streaming_content=(), *args, **kwargs):
super(StreamingHttpResponse, self).__init__(*args, **kwargs)
# `streaming_content` should be an iterable of bytestrings.
# See the `streaming_content` property methods.
self.streaming_content = streaming_content
@property
def content(self):
raise AttributeError("This %s instance has no `content` attribute. "
"Use `streaming_content` instead." % self.__class__.__name__)
@property
def streaming_content(self):
return map(self.make_bytes, self._iterator)
@streaming_content.setter
def streaming_content(self, value):
# Ensure we can never iterate on "value" more than once.
self._iterator = iter(value)
if hasattr(value, 'close'):
self._closable_objects.append(value)
def __iter__(self):
return self.streaming_content
class HttpResponseRedirectBase(HttpResponse):
allowed_schemes = ['http', 'https', 'ftp']
def __init__(self, redirect_to, *args, **kwargs):
parsed = urlparse(force_text(redirect_to))
if parsed.scheme and parsed.scheme not in self.allowed_schemes:
raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
super(HttpResponseRedirectBase, self).__init__(*args, **kwargs)
self['Location'] = iri_to_uri(redirect_to)
url = property(lambda self: self['Location'])
class HttpResponseRedirect(HttpResponseRedirectBase):
status_code = 302
class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
status_code = 301
class HttpResponseNotModified(HttpResponse):
status_code = 304
def __init__(self, *args, **kwargs):
super(HttpResponseNotModified, self).__init__(*args, **kwargs)
del self['content-type']
@HttpResponse.content.setter
def content(self, value):
if value:
raise AttributeError("You cannot set content to a 304 (Not Modified) response")
self._container = []
class HttpResponseBadRequest(HttpResponse):
status_code = 400
class HttpResponseNotFound(HttpResponse):
status_code = 404
class HttpResponseForbidden(HttpResponse):
status_code = 403
class HttpResponseNotAllowed(HttpResponse):
status_code = 405
def __init__(self, permitted_methods, *args, **kwargs):
super(HttpResponseNotAllowed, self).__init__(*args, **kwargs)
self['Allow'] = ', '.join(permitted_methods)
class HttpResponseGone(HttpResponse):
status_code = 410
class HttpResponseServerError(HttpResponse):
status_code = 500
class Http404(Exception):
pass
|
|
# Copyright (c) 2016-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division
import json
import logging
import random
import six
from threading import Lock
from tornado.ioloop import PeriodicCallback
from .constants import (
_max_id_bits,
DEFAULT_SAMPLING_INTERVAL,
SAMPLER_TYPE_CONST,
SAMPLER_TYPE_PROBABILISTIC,
SAMPLER_TYPE_RATE_LIMITING,
SAMPLER_TYPE_LOWER_BOUND,
)
from .metrics import Metrics, LegacyMetricsFactory
from .utils import ErrorReporter
from .rate_limiter import RateLimiter
default_logger = logging.getLogger('jaeger_tracing')
SAMPLER_TYPE_TAG_KEY = 'sampler.type'
SAMPLER_PARAM_TAG_KEY = 'sampler.param'
DEFAULT_SAMPLING_PROBABILITY = 0.001
DEFAULT_LOWER_BOUND = 1.0 / (10.0 * 60.0) # sample once every 10 minutes
DEFAULT_MAX_OPERATIONS = 2000
STRATEGIES_STR = 'perOperationStrategies'
OPERATION_STR = 'operation'
DEFAULT_LOWER_BOUND_STR = 'defaultLowerBoundTracesPerSecond'
PROBABILISTIC_SAMPLING_STR = 'probabilisticSampling'
SAMPLING_RATE_STR = 'samplingRate'
DEFAULT_SAMPLING_PROBABILITY_STR = 'defaultSamplingProbability'
OPERATION_SAMPLING_STR = 'operationSampling'
MAX_TRACES_PER_SECOND_STR = 'maxTracesPerSecond'
RATE_LIMITING_SAMPLING_STR = 'rateLimitingSampling'
STRATEGY_TYPE_STR = 'strategyType'
PROBABILISTIC_SAMPLING_STRATEGY = 'PROBABILISTIC'
RATE_LIMITING_SAMPLING_STRATEGY = 'RATE_LIMITING'
class Sampler(object):
"""
Sampler is responsible for deciding if a particular span should be
"sampled", i.e. recorded in permanent storage.
"""
def __init__(self, tags=None):
self._tags = tags
def is_sampled(self, trace_id, operation=''):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def __eq__(self, other):
return (
isinstance(other, self.__class__) and self.__dict__ == other.__dict__
)
def __ne__(self, other):
return not self.__eq__(other)
class ConstSampler(Sampler):
"""ConstSampler always returns the same decision."""
def __init__(self, decision):
super(ConstSampler, self).__init__(
tags={
SAMPLER_TYPE_TAG_KEY: SAMPLER_TYPE_CONST,
SAMPLER_PARAM_TAG_KEY: decision,
}
)
self.decision = decision
def is_sampled(self, trace_id, operation=''):
return self.decision, self._tags
def close(self):
pass
def __str__(self):
return 'ConstSampler(%s)' % self.decision
class ProbabilisticSampler(Sampler):
"""
A sampler that randomly samples a certain percentage of traces specified
by the samplingRate, in the range between 0.0 and 1.0.
It relies on the fact that new trace IDs are 64bit random numbers
themselves, thus making the sampling decision without generating a new
random number, but simply calculating if traceID < (samplingRate * 2^64).
Note that we actually ignore (zero out) the most significant bit.
"""
def __init__(self, rate):
super(ProbabilisticSampler, self).__init__(
tags={
SAMPLER_TYPE_TAG_KEY: SAMPLER_TYPE_PROBABILISTIC,
SAMPLER_PARAM_TAG_KEY: rate,
}
)
assert 0.0 <= rate <= 1.0, 'Sampling rate must be between 0.0 and 1.0'
self.rate = rate
self.max_number = 1 << _max_id_bits
self.boundary = rate * self.max_number
def is_sampled(self, trace_id, operation=''):
trace_id = trace_id & (self.max_number - 1)
return trace_id < self.boundary, self._tags
def close(self):
pass
def __str__(self):
return 'ProbabilisticSampler(%s)' % self.rate
class RateLimitingSampler(Sampler):
"""
Samples at most max_traces_per_second. The distribution of sampled
traces follows burstiness of the service, i.e. a service with uniformly
distributed requests will have those requests sampled uniformly as well,
but if requests are bursty, especially sub-second, then a number of
sequential requests can be sampled each second.
"""
def __init__(self, max_traces_per_second=10):
super(RateLimitingSampler, self).__init__()
self.rate_limiter = None
self._init(max_traces_per_second)
def _init(self, max_traces_per_second):
assert max_traces_per_second >= 0, \
'max_traces_per_second must not be negative'
self._tags = {
SAMPLER_TYPE_TAG_KEY: SAMPLER_TYPE_RATE_LIMITING,
SAMPLER_PARAM_TAG_KEY: max_traces_per_second,
}
self.traces_per_second = max_traces_per_second
max_balance = max(self.traces_per_second, 1.0)
if not self.rate_limiter:
self.rate_limiter = RateLimiter(
credits_per_second=self.traces_per_second,
max_balance=max_balance
)
else:
self.rate_limiter.update(max_traces_per_second, max_balance)
def is_sampled(self, trace_id, operation=''):
return self.rate_limiter.check_credit(1.0), self._tags
def close(self):
pass
def __eq__(self, other):
"""The last_tick and balance fields can be different"""
if not isinstance(other, self.__class__):
return False
d1 = dict(self.rate_limiter.__dict__)
d2 = dict(other.rate_limiter.__dict__)
d1['balance'] = d2['balance']
d1['last_tick'] = d2['last_tick']
return d1 == d2
def update(self, max_traces_per_second):
if self.traces_per_second == max_traces_per_second:
return False
self._init(max_traces_per_second)
return True
def __str__(self):
return 'RateLimitingSampler(%s)' % self.traces_per_second
class GuaranteedThroughputProbabilisticSampler(Sampler):
"""
A sampler that leverages both ProbabilisticSampler and RateLimitingSampler.
The RateLimitingSampler is used as a guaranteed lower bound sampler such
that every operation is sampled at least once in a time interval defined by
the lower_bound. ie a lower_bound of 1.0 / (60 * 10) will sample an
operation at least once every 10 minutes.
The ProbabilisticSampler is given higher priority when tags are emitted,
ie. if is_sampled() for both samplers return true, the tags for
ProbabilisticSampler will be used.
"""
def __init__(self, operation, lower_bound, rate):
super(GuaranteedThroughputProbabilisticSampler, self).__init__(
tags={
SAMPLER_TYPE_TAG_KEY: SAMPLER_TYPE_LOWER_BOUND,
SAMPLER_PARAM_TAG_KEY: rate,
}
)
self.probabilistic_sampler = ProbabilisticSampler(rate)
self.lower_bound_sampler = RateLimitingSampler(lower_bound)
self.operation = operation
self.rate = rate
self.lower_bound = lower_bound
def is_sampled(self, trace_id, operation=''):
sampled, tags = \
self.probabilistic_sampler.is_sampled(trace_id, operation)
if sampled:
self.lower_bound_sampler.is_sampled(trace_id, operation)
return True, tags
sampled, _ = self.lower_bound_sampler.is_sampled(trace_id, operation)
return sampled, self._tags
def close(self):
self.probabilistic_sampler.close()
self.lower_bound_sampler.close()
def update(self, lower_bound, rate):
# (NB) This function should only be called while holding a Write lock.
if self.rate != rate:
self.probabilistic_sampler = ProbabilisticSampler(rate)
self.rate = rate
self._tags = {
SAMPLER_TYPE_TAG_KEY: SAMPLER_TYPE_LOWER_BOUND,
SAMPLER_PARAM_TAG_KEY: rate,
}
if self.lower_bound != lower_bound:
self.lower_bound_sampler.update(lower_bound)
self.lower_bound = lower_bound
def __str__(self):
return 'GuaranteedThroughputProbabilisticSampler(%s, %f, %f)' \
% (self.operation, self.rate, self.lower_bound)
class AdaptiveSampler(Sampler):
"""
A sampler that leverages both ProbabilisticSampler and RateLimitingSampler
via the GuaranteedThroughputProbabilisticSampler. This sampler keeps track
of all operations and delegates calls the the respective
GuaranteedThroughputProbabilisticSampler.
"""
def __init__(self, strategies, max_operations):
super(AdaptiveSampler, self).__init__()
samplers = {}
for strategy in strategies.get(STRATEGIES_STR, []):
operation = strategy.get(OPERATION_STR)
sampler = GuaranteedThroughputProbabilisticSampler(
operation,
strategies.get(DEFAULT_LOWER_BOUND_STR, DEFAULT_LOWER_BOUND),
get_sampling_probability(strategy)
)
samplers[operation] = sampler
self.samplers = samplers
self.default_sampler = \
ProbabilisticSampler(strategies.get(DEFAULT_SAMPLING_PROBABILITY_STR,
DEFAULT_SAMPLING_PROBABILITY))
self.default_sampling_probability = \
strategies.get(DEFAULT_SAMPLING_PROBABILITY_STR, DEFAULT_SAMPLING_PROBABILITY)
self.lower_bound = strategies.get(DEFAULT_LOWER_BOUND_STR, DEFAULT_LOWER_BOUND)
self.max_operations = max_operations
def is_sampled(self, trace_id, operation=''):
sampler = self.samplers.get(operation)
if not sampler:
if len(self.samplers) >= self.max_operations:
return self.default_sampler.is_sampled(trace_id, operation)
sampler = GuaranteedThroughputProbabilisticSampler(
operation,
self.lower_bound,
self.default_sampling_probability
)
self.samplers[operation] = sampler
return sampler.is_sampled(trace_id, operation)
return sampler.is_sampled(trace_id, operation)
def update(self, strategies):
# (NB) This function should only be called while holding a Write lock.
for strategy in strategies.get(STRATEGIES_STR, []):
operation = strategy.get(OPERATION_STR)
lower_bound = strategies.get(DEFAULT_LOWER_BOUND_STR, DEFAULT_LOWER_BOUND)
sampling_rate = get_sampling_probability(strategy)
sampler = self.samplers.get(operation)
if not sampler:
sampler = GuaranteedThroughputProbabilisticSampler(
operation,
lower_bound,
sampling_rate
)
self.samplers[operation] = sampler
else:
sampler.update(lower_bound, sampling_rate)
self.lower_bound = strategies.get(DEFAULT_LOWER_BOUND_STR, DEFAULT_LOWER_BOUND)
if self.default_sampling_probability != strategies.get(DEFAULT_SAMPLING_PROBABILITY_STR,
DEFAULT_SAMPLING_PROBABILITY):
self.default_sampling_probability = \
strategies.get(DEFAULT_SAMPLING_PROBABILITY_STR, DEFAULT_SAMPLING_PROBABILITY)
self.default_sampler = \
ProbabilisticSampler(self.default_sampling_probability)
def close(self):
for _, sampler in six.iteritems(self.samplers):
sampler.close()
def __str__(self):
return 'AdaptiveSampler(%f, %f, %d)' \
% (self.default_sampling_probability, self.lower_bound,
self.max_operations)
class RemoteControlledSampler(Sampler):
"""Periodically loads the sampling strategy from a remote server."""
def __init__(self, channel, service_name, **kwargs):
"""
:param channel: channel for communicating with jaeger-agent
:param service_name: name of this application
:param kwargs: optional parameters
- init_sampler: initial value of the sampler,
else ProbabilisticSampler(0.001)
- sampling_refresh_interval: interval in seconds for polling
for new strategy
- logger: Logger instance
- metrics: metrics facade, used to emit metrics on errors.
This parameter has been deprecated, please use
metrics_factory instead.
- metrics_factory: used to generate metrics for errors
- error_reporter: ErrorReporter instance
- max_operations: maximum number of unique operations the
AdaptiveSampler will keep track of
:param init:
:return:
"""
super(RemoteControlledSampler, self).__init__()
self._channel = channel
self.service_name = service_name
self.logger = kwargs.get('logger', default_logger)
self.sampler = kwargs.get('init_sampler')
self.sampling_refresh_interval = \
kwargs.get('sampling_refresh_interval') or DEFAULT_SAMPLING_INTERVAL
self.metrics_factory = kwargs.get('metrics_factory') \
or LegacyMetricsFactory(kwargs.get('metrics') or Metrics())
self.metrics = SamplerMetrics(self.metrics_factory)
self.error_reporter = kwargs.get('error_reporter') or \
ErrorReporter(Metrics())
self.max_operations = kwargs.get('max_operations') or \
DEFAULT_MAX_OPERATIONS
if not self.sampler:
self.sampler = ProbabilisticSampler(DEFAULT_SAMPLING_PROBABILITY)
else:
self.sampler.is_sampled(0) # assert we got valid sampler API
self.lock = Lock()
self.running = True
self.periodic = None
self.io_loop = channel.io_loop
if not self.io_loop:
self.logger.error(
'Cannot acquire IOLoop, sampler will not be updated')
else:
# according to IOLoop docs, it's not safe to use timeout methods
# unless already running in the loop, so we use `add_callback`
self.io_loop.add_callback(self._init_polling)
def is_sampled(self, trace_id, operation=''):
with self.lock:
return self.sampler.is_sampled(trace_id, operation)
def _init_polling(self):
"""
Bootstrap polling for sampling strategy.
To avoid spiky traffic from the samplers, we use a random delay
before the first poll.
"""
with self.lock:
if not self.running:
return
r = random.Random()
delay = r.random() * self.sampling_refresh_interval
self.io_loop.call_later(delay=delay,
callback=self._delayed_polling)
self.logger.info(
'Delaying sampling strategy polling by %d sec', delay)
def _delayed_polling(self):
periodic = self._create_periodic_callback()
self._poll_sampling_manager() # Initialize sampler now
with self.lock:
if not self.running:
return
self.periodic = periodic
periodic.start() # start the periodic cycle
self.logger.info(
'Tracing sampler started with sampling refresh '
'interval %d sec', self.sampling_refresh_interval)
def _create_periodic_callback(self):
return PeriodicCallback(
callback=self._poll_sampling_manager,
# convert interval to milliseconds
callback_time=self.sampling_refresh_interval * 1000,
io_loop=self.io_loop)
def _sampling_request_callback(self, future):
exception = future.exception()
if exception:
self.metrics.sampler_query_failure(1)
self.error_reporter.error(
'Fail to get sampling strategy from jaeger-agent: %s',
exception)
return
response = future.result()
# In Python 3.5 response.body is of type bytes and json.loads() does only support str
# See: https://github.com/jaegertracing/jaeger-client-python/issues/180
if hasattr(response.body, 'decode') and callable(response.body.decode):
response_body = response.body.decode('utf-8')
else:
response_body = response.body
try:
sampling_strategies_response = json.loads(response_body)
self.metrics.sampler_retrieved(1)
except Exception as e:
self.metrics.sampler_query_failure(1)
self.error_reporter.error(
'Fail to parse sampling strategy '
'from jaeger-agent: %s [%s]', e, response_body)
return
self._update_sampler(sampling_strategies_response)
self.logger.debug('Tracing sampler set to %s', self.sampler)
def _update_sampler(self, response):
with self.lock:
try:
if response.get(OPERATION_SAMPLING_STR):
self._update_adaptive_sampler(response.get(OPERATION_SAMPLING_STR))
else:
self._update_rate_limiting_or_probabilistic_sampler(response)
except Exception as e:
self.metrics.sampler_update_failure(1)
self.error_reporter.error(
'Fail to update sampler'
'from jaeger-agent: %s [%s]', e, response)
def _update_adaptive_sampler(self, per_operation_strategies):
if isinstance(self.sampler, AdaptiveSampler):
self.sampler.update(per_operation_strategies)
else:
self.sampler = AdaptiveSampler(per_operation_strategies, self.max_operations)
self.metrics.sampler_updated(1)
def _update_rate_limiting_or_probabilistic_sampler(self, response):
s_type = response.get(STRATEGY_TYPE_STR)
new_sampler = self.sampler
if s_type == PROBABILISTIC_SAMPLING_STRATEGY:
sampling_rate = get_sampling_probability(response)
new_sampler = ProbabilisticSampler(rate=sampling_rate)
elif s_type == RATE_LIMITING_SAMPLING_STRATEGY:
mtps = get_rate_limit(response)
if mtps < 0 or mtps >= 500:
raise ValueError(
'Rate limiting parameter not in [0, 500) range: %s' % mtps)
if isinstance(self.sampler, RateLimitingSampler):
if self.sampler.update(max_traces_per_second=mtps):
self.metrics.sampler_updated(1)
else:
new_sampler = RateLimitingSampler(max_traces_per_second=mtps)
else:
raise ValueError('Unsupported sampling strategy type: %s' % s_type)
if self.sampler != new_sampler:
self.sampler = new_sampler
self.metrics.sampler_updated(1)
def _poll_sampling_manager(self):
self.logger.debug('Requesting tracing sampler refresh')
fut = self._channel.request_sampling_strategy(self.service_name)
fut.add_done_callback(self._sampling_request_callback)
def close(self):
with self.lock:
self.running = False
if self.periodic:
self.periodic.stop()
def get_sampling_probability(strategy=None):
if not strategy:
return DEFAULT_SAMPLING_PROBABILITY
probability_strategy = strategy.get(PROBABILISTIC_SAMPLING_STR)
if not probability_strategy:
return DEFAULT_SAMPLING_PROBABILITY
return probability_strategy.get(SAMPLING_RATE_STR, DEFAULT_SAMPLING_PROBABILITY)
def get_rate_limit(strategy=None):
if not strategy:
return DEFAULT_LOWER_BOUND
rate_limit_strategy = strategy.get(RATE_LIMITING_SAMPLING_STR)
if not rate_limit_strategy:
return DEFAULT_LOWER_BOUND
return rate_limit_strategy.get(MAX_TRACES_PER_SECOND_STR, DEFAULT_LOWER_BOUND)
class SamplerMetrics(object):
"""Sampler specific metrics."""
def __init__(self, metrics_factory):
self.sampler_retrieved = \
metrics_factory.create_counter(name='jaeger:sampler_queries', tags={'result': 'ok'})
self.sampler_query_failure = \
metrics_factory.create_counter(name='jaeger:sampler_queries', tags={'result': 'err'})
self.sampler_updated = \
metrics_factory.create_counter(name='jaeger:sampler_updates', tags={'result': 'ok'})
self.sampler_update_failure = \
metrics_factory.create_counter(name='jaeger:sampler_updates', tags={'result': 'err'})
|
|
import argparse
import os
import random
import string
from awacs.aws import PolicyDocument, Statement
from awacs.s3 import Action as S3Action
from awacs.sqs import Action as SQSAction
from troposphere import *
from troposphere import ec2, rds
from troposphere.iam import Role, InstanceProfile, Policy
from troposphere.s3 import Bucket
from troposphere.sqs import Queue
def init_cloud(args):
template = Template()
queue = template.add_resource(
Queue(
"{0}".format(args.sqs_name),
QueueName="{0}".format(args.sqs_name),
)
)
bucket = template.add_resource(
Bucket(
"{0}".format(args.s3_name),
BucketName="{0}".format(args.s3_name)
)
)
kala_security_group = template.add_resource(
ec2.SecurityGroup(
"{0}".format(args.kala_security_group),
GroupName="{0}".format(args.kala_security_group),
GroupDescription="Enable HTTP and HTTPS access on the inbound port",
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol="tcp",
FromPort="80",
ToPort="80",
CidrIp="0.0.0.0/0",
),
ec2.SecurityGroupRule(
IpProtocol="tcp",
FromPort="443",
ToPort="443",
CidrIp="0.0.0.0/0",
),
]
)
)
database_security_group = template.add_resource(
ec2.SecurityGroup(
"{0}".format(args.database_security_group),
GroupName="{0}".format(args.database_security_group),
GroupDescription="Enable Database access for the security groups",
SecurityGroupIngress=[
ec2.SecurityGroupRule(
IpProtocol="tcp",
FromPort="5432",
ToPort="5432",
SourceSecurityGroupName=Ref(kala_security_group),
),
]
)
)
database = template.add_resource(
rds.DBInstance(
"{0}".format(args.rds_instance_name),
DBInstanceIdentifier="{0}".format(args.rds_instance_name),
DBName=args.rds_name,
MasterUsername="{0}".format(args.rds_username),
MasterUserPassword="{0}".format(args.rds_password),
AllocatedStorage=args.rds_allocated_storage,
DBInstanceClass=args.rds_instance_class,
Engine="postgres",
MultiAZ=args.production,
StorageEncrypted=True,
VPCSecurityGroups=[GetAtt(database_security_group, "GroupId")]
)
)
s3_policy = PolicyDocument(
Version="2012-10-17",
Id="{0}Policy".format(args.s3_name),
Statement=[
Statement(
Effect="Allow",
Action=[S3Action("*")],
Resource=[
Join("", [GetAtt(bucket, "Arn"), "/*"])
]
),
]
)
sqs_policy = PolicyDocument(
Version="2012-10-17",
Id="{0}Policy".format(args.s3_name),
Statement=[
Statement(
Effect="Allow",
Action=[
SQSAction("*")
],
Resource=[GetAtt(queue, "Arn")]
)
]
)
role = Role(
'{0}Role'.format(args.iam_role),
RoleName='{0}Role'.format(args.iam_role),
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
}
}
]
},
Policies=[
Policy(
PolicyName="KalaS3Policy",
PolicyDocument=s3_policy
),
Policy(
PolicyName="KalaSQSPolicy",
PolicyDocument=sqs_policy
)
]
)
template.add_resource(
role
)
template.add_resource(
InstanceProfile(
"{0}InstanceProfile".format(args.iam_role),
Roles=[Ref(role)],
InstanceProfileName="{0}InstanceProfile".format(args.iam_role)
)
)
return template
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
help='Specify an output format',
choices=['json', 'yaml'],
type=str,
default='json'
)
parser.add_argument(
'--production',
help='Specify if this is production. The default is False',
choices=[True, False],
type=bool,
default=False
)
parser.add_argument(
'--iam_role',
help='Give the IAM role a different name. The default is KalaInstanceProfile',
type=str,
default='KalaInstanceProfile'
)
parser.add_argument(
'--kala_security_group',
help='Give the kala security group a different name. The default is KalaSecurityGroup',
type=str,
default='KalaSecurityGroup'
)
parser.add_argument(
'--database_security_group',
help='Give the database security group a different name. The default is DatabaseSecurityGroup',
type=str,
default='DatabaseSecurityGroup'
)
parser.add_argument(
'--sqs_name',
help='Change the export queue name to something else. The default is ExportQueue',
type=str,
default='ExportQueue'
)
parser.add_argument(
'--s3_name',
help='Change the export bucket name to something else. The default is exports{random_string}',
type=str,
default='exports'
)
parser.add_argument(
'--elb_name',
help='Give the ELB a different name. The default is KalaELB',
type=str,
default='KalaELB'
)
parser.add_argument(
'--rds_username',
help='Database username. The default is kala',
type=str,
default='kala'
)
parser.add_argument(
'--rds_password',
help='Database password',
type=str,
required=True
)
parser.add_argument(
'--rds_allocated_storage',
help='Database storage size in (GB)',
type=str,
default='100'
)
parser.add_argument(
'--rds_instance_class',
help='Database instance type. The default is db.t2.medium',
type=str,
default='db.t2.medium'
)
parser.add_argument(
'--rds_instance_name',
help='Database name. The default is kala',
type=str,
default='kala'
)
parser.add_argument(
'--rds_name',
help='Database name',
type=str,
default='kala'
)
args = parser.parse_args()
if args.s3_name == 'exports':
args.s3_name += ''.join([random.choice(string.ascii_lowercase) for n in range(10)])
print("Export Queue Name: {0}".format(args.s3_name))
template = init_cloud(args)
os.makedirs('../build', exist_ok=True)
with open('../build/initial.{0}'.format(args.output), 'w') as _file:
data = template.to_json() if args.output == 'json' else template.to_yaml()
_file.write(data)
|
|
# Copyright 2011 The greplin-exception-catcher Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppEngine server for collecting exceptions."""
# pylint: disable=E0611
from google.appengine.dist import use_library
use_library('django', '1.2')
# pylint: disable=E0611
from google.appengine.api import users
# pylint: disable=E0611
from google.appengine.ext import db, webapp
# pylint: disable=E0611
from google.appengine.ext.webapp import template
# pylint: disable=E0611
from google.appengine.ext.webapp.util import run_wsgi_app
import config
import queue
from datetime import datetime, timedelta
try:
from django.utils import simplejson as json
except ImportError:
import json
import logging
import random
import sys
import time
import traceback
from common import getProject, getTemplatePath
from datamodel import LoggedError, LoggedErrorInstance, AggregatedStats
####### Parse the configuration. #######
NAME = config.get('name')
SECRET_KEY = config.get('secretKey')
REQUIRE_AUTH = config.get('requireAuth', True)
####### Utility methods. #######
INSTANCE_FILTERS = ('environment', 'server', 'affectedUser')
INTEGER_FILTERS = ('affectedUser',)
def getFilters(request):
"""Gets the filters applied to the given request."""
filters = {}
for key, value in request.params.items():
if key in INSTANCE_FILTERS or key in ('project', 'errorLevel', 'maxAgeHours'):
filters[key] = value
return filters
def filterInstances(dataSet, key, value):
"""Filters a data set."""
if key in INTEGER_FILTERS:
return dataSet.filter(key + ' =', int(value))
elif key == 'maxAgeHours':
return dataSet
else:
return dataSet.filter(key + ' =', value)
def getErrors(filters, limit, offset):
"""Gets a list of errors, filtered by the given filters."""
for key in filters:
if key in INSTANCE_FILTERS:
return None, getInstances(filters, limit=limit, offset=offset)
errors = LoggedError.all().filter('active =', True)
for key, value in filters.items():
if key == 'maxAgeHours':
errors = errors.filter('firstOccurrence >', datetime.now() - timedelta(hours = int(value)))
elif key == 'project':
errors = errors.filter('project =', getProject(value))
else:
errors = errors.filter(key, value)
if 'maxAgeHours' in filters:
errors = errors.order('-firstOccurrence')
else:
errors = errors.order('-lastOccurrence')
return errors.fetch(limit, offset), None
def getInstances(filters, parent = None, limit = None, offset = None):
"""Gets a list of instances of the given parent error, filtered by the given filters."""
query = LoggedErrorInstance.all()
if parent:
query = query.filter('error =', parent)
if filters:
for key, value in filters.items():
if key in INSTANCE_FILTERS:
query = filterInstances(query, key, value)
elif key == 'project' and not parent:
query = query.filter('project =', getProject(value))
return query.order('-date').fetch(limit or 51, offset or 0)
####### Pages #######
class AuthPage(webapp.RequestHandler):
"""Base class for pages that require authentication."""
def __getUser(self):
"""Gets a user."""
return users.get_current_user()
def get(self, *args):
"""Handles a get, ensuring the user is authenticated."""
user = self.__getUser()
if user or not REQUIRE_AUTH:
self.doAuthenticatedGet(user, *args)
else:
self.redirect(users.create_login_url(self.request.uri))
def doAuthenticatedGet(self, _, *__):
"""Performs a get with an authenticated user."""
self.error(500)
def post(self, *args):
"""Handles a post, ensuring the user is authenticated."""
user = self.__getUser()
if user or not REQUIRE_AUTH:
self.doAuthenticatedPost(user, *args)
else:
self.redirect(users.create_login_url(self.request.uri))
def doAuthenticatedPost(self, _, *__):
"""Performs a post with an authenticated user."""
self.error(500)
class ReportPage(webapp.RequestHandler):
"""Page handler for reporting a new exception."""
def post(self):
"""Handles a new error report via POST."""
key = self.request.get('key')
if key != SECRET_KEY:
self.error(403)
return
# Add the task to the instances queue.
queue.queueException(self.request.body)
class StatPage(webapp.RequestHandler):
"""Page handler for collecting error instance stats."""
def get(self):
"""Handles a new error report via POST."""
key = self.request.get('key')
if key != SECRET_KEY:
self.error(403)
return
counts = []
project = self.request.get('project')
if project:
project = getProject(project)
if not project:
self.response.out.write(' '.join(['0' for _ in counts]))
for minutes in self.request.get('minutes').split():
query = LoggedErrorInstance.all()
if project:
query = query.ancestor(project)
counts.append(query.filter('date >=', datetime.now() - timedelta(minutes = int(minutes))).count())
self.response.out.write(' '.join((str(count) for count in counts)))
class AggregateViewPage(webapp.RequestHandler):
"""Page handler for collecting error instance stats."""
def get(self, viewLength):
"""Handles a new error report via POST."""
if viewLength != 'day':
# TODO(robbyw): For viewLength == week or viewLength == month, aggregate the aggregates.
viewLength = 'day'
data = AggregatedStats.all().order('-date').get()
data = json.loads(data.json)[:25]
for _, row in data:
logging.info(row)
row['servers'] = sorted(row['servers'].items(), key = lambda x: x[1], reverse=True)
row['environments'] = sorted(row['environments'].items(), key = lambda x: x[1], reverse=True)
keys, values = zip(*data)
errors = LoggedError.get([db.Key(key) for key in keys])
context = {
'title': 'Top 25 exceptions over the last %s' % viewLength,
'errors': zip(errors, values),
'total': len(data)
}
self.response.out.write(template.render(getTemplatePath('aggregation.html'), context))
class ListPage(AuthPage):
"""Page displaying a list of exceptions."""
def doAuthenticatedGet(self, user):
self.response.headers['Content-Type'] = 'text/html'
filters = getFilters(self.request)
page = int(self.request.get('page', 0))
errors, instances = getErrors(filters, limit = 51, offset = page * 50)
if errors is not None:
hasMore = len(errors) == 51
errors = errors[:50]
else:
hasMore = len(instances) == 51
instances = instances[:50]
context = {
'title': NAME,
'extraScripts': ['list'],
'user': user,
'filters': filters.items(),
'errors': errors,
'instances': instances,
'hasMore': hasMore,
'nextPage': page + 1
}
self.response.out.write(template.render(getTemplatePath('list.html'), context))
class ViewPage(AuthPage):
"""Page displaying a single exception."""
def doAuthenticatedGet(self, user, *args):
key, = args
self.response.headers['Content-Type'] = 'text/html'
error = LoggedError.get(key)
filters = getFilters(self.request)
context = {
'title': '%s - %s' % (error.lastMessage, NAME),
'extraScripts': ['view'],
'user': user,
'error': error,
'filters': filters.items(),
'instances': getInstances(filters, parent=error)[:100]
}
self.response.out.write(template.render(getTemplatePath('view.html'), context))
class ResolvePage(AuthPage):
"""Page that resolves an exception."""
def doAuthenticatedGet(self, _, *args):
key, = args
self.response.headers['Content-Type'] = 'text/plain'
error = LoggedError.get(key)
error.active = False
error.put()
self.response.out.write('ok')
class ClearDatabasePage(AuthPage):
"""Page for clearing the database."""
def doAuthenticatedGet(self, _):
if users.is_current_user_admin():
for error in LoggedError.all():
error.delete()
for instance in LoggedErrorInstance.all():
instance.delete()
self.response.out.write('Done')
else:
self.redirect(users.create_login_url(self.request.uri))
class ErrorPage(webapp.RequestHandler):
"""Page that generates demonstration errors."""
def get(self):
"""Handles page get for the error page."""
for _ in range(10):
error = random.choice(range(4))
errorLevel = 'error'
project = 'frontend'
try:
if error == 0:
project = 'backend'
x = 10 / 0
elif error == 1:
errorLevel = 'warning'
json.loads('{"abc", [1, 2')
elif error == 2:
x = {}
x = x['y']
elif error == 3:
x = {}
x = x['z']
except (KeyError, ZeroDivisionError, ValueError):
excInfo = sys.exc_info()
stack = traceback.format_exc()
env = random.choice(['dev', 'prod'])
exception = {
'timestamp': time.time(),
'project': project,
'serverName':'%s %s %d' % (env, project, random.choice(range(3))),
'type': excInfo[0].__module__ + '.' + excInfo[0].__name__,
'environment': env,
'errorLevel': errorLevel,
'message': str(excInfo[1]),
'logMessage': 'Log message goes here',
'backtrace': stack,
'context':{'userId':random.choice(range(20))}
}
queue.queueException(json.dumps(exception))
self.response.out.write('Done!')
####### Application. #######
def main():
"""Runs the server."""
endpoints = [
('/', ListPage),
('/clear', ClearDatabasePage),
('/report', ReportPage),
('/view/(.*)', ViewPage),
('/resolve/(.*)', ResolvePage),
('/stats', StatPage),
('/review/(.*)', AggregateViewPage),
] + queue.getEndpoints()
if config.get('demo'):
endpoints.append(('/error', ErrorPage))
application = webapp.WSGIApplication(endpoints, debug=True)
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <[email protected]>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
import networkx as nx
import pprint as pp
import logging as log
import subprocess
import math_functions as m
import re
class BalancedTreeAutomorphismStatistics(object):
"""
Calculates statistics relating to the symmetries of a graph, by determination
of the graph's automorphism group and orbit structure. This class relies
upon Brendan McKay and Adolfo Piperno's nauty and Traces package.
http://pallini.di.uniroma1.it/index.html
This class requires that the "dreadnaut" executable from nauty/Traces be
available somewhere on the execution search path for the simulation process. This
code does not install nauty itself.
The code takes graphs as NetworkX objects, and handles reformatting them for
nauty internally.
Statistics are returned in the form of a dict, as in this example:
{ 'orbits' : 5,
'groupsize' : 2.079483819622e117,
'graphorder' : 341
'orbitcounts' : [1, 4, 16, 256]
'remainingdensity' : 1.0
}
Most of these come from nauty output, but the "remainingdensity" statistic is
the ratio of the measured vertex count, and the number of vertices in a single
balanced tree of the original configured size.
"""
# TODO: This code will require additional
# work when we switch to general graphs for trait structures, because we'll need to be
# able to identify an "original" graph for every sampled trait graph an individual
# possesses, and identify them regardless of shape (perhaps with a graph attribute in networkx).
def __init__(self, simconfig):
self.simconfig = simconfig
self.r = int(self.simconfig.branching_factor)
self.h = int(self.simconfig.depth_factor)
self.num_trees = self.simconfig.num_trees
self.n_per_tree = m.num_nodes_balanced_tree(self.r, self.h)
# EXAMPLE OUTPUT
#
# 5 orbits; grpsize=2.079483819622e117; 255 gens; 30388 nodes; maxlev=205
# cpu time = 0.04 seconds
# 0; 1:4 (4); 5:20 (16); 21:84 (64); 85:340 (256);
def _parse_nauty_output(self, raw, graph):
results = {}
lines = raw.split('\n')
numlines = len(lines)
# orbit number is in the first line, as is grpsize
onum_pat = re.compile('(\d+) orbit.*;')
exp_pat = re.compile(r"grpsize=([\d|\.|e|E]+);")
m = onum_pat.search(lines[0])
num_orbits = None
if m:
num_orbits = int(m.group(1))
#log.debug("num orbits: %s", num_orbits)
else:
log.error("Could not parse number of orbits from nauty output")
results['orbits'] = num_orbits
m2 = exp_pat.search(lines[0])
groupsize = None
if m2:
groupsize = float(m2.group(1))
#log.debug("groupsize: %s", groupsize)
else:
log.error("Could not parse groupsize from nauty output")
results['groupsize'] = groupsize
single_num = re.compile('\w*(\d+)$')
multiple_num = re.compile('.*\((\d+)\).*')
orbit_multiplicites = []
for lineno in range(2, numlines):
raw_orbits = lines[lineno].split(';')
for o in raw_orbits:
m = single_num.search(o)
if m:
orbit_multiplicites.append(1)
else:
m2 = multiple_num.search(o)
if m2:
mult = m2.group(1)
orbit_multiplicites.append(int(mult))
#log.debug("multiplicites: %s", orbit_multiplicites)
results['orbitcounts'] = orbit_multiplicites
return results
def calculate_graph_symmetries(self, graph):
"""
Public API for calculating graph symmetries. Takes a single networkx graph (usually
representing a graph of traits and usually single component (although there's no particular
reason why disconnected graphs will not work). Returns a dict with symmetry statistics,
in the format described in the class docstring.
"""
# we reformat the vertex labels
g = nx.convert_node_labels_to_integers(graph)
dread_graph = self._get_dreadnaught_for_graph(g)
num_vertices = g.number_of_nodes()
#log.debug("dread: %s", dread_graph)
raw = self._get_raw_nauty_output(dread_graph)
#log.debug("raw: %s", raw)
results = self._parse_nauty_output(raw, g)
# TODO: Figure out how to handle density and radius for multi-component graphs
results['remainingdensity'] = float(g.number_of_nodes()) / (float(self.n_per_tree) * float(self.num_trees))
return results
def _get_dreadnaught_for_graph(self,graph):
"""
Constructs a representation of the adjacency structure of the graph in the format
that dreadnaught/nauty understands. This employs the networkx "adjlist" format but
extends it slightly.
Only adjacency information is preserved in this format -- no additional vertex or edge
attributes, so "primary" storage of graphs should use the GraphML format.
"""
linefeed = chr(10)
n = graph.number_of_nodes()
dn = "n="
dn += str(n)
dn += ' g'
dn += linefeed
for line in nx.generate_adjlist(graph):
edges = line.split()
#if len(edges) == 1:
# dn += ';\n'
if len(edges) == 1:
dn += ";"
dn += linefeed
else:
dn += " ".join(edges[1:])
dn += ";"
dn += linefeed
dn += 'x o';
dn += linefeed
return dn
def _format_graph_as_nauty(self, graph):
"""
Constructs a representation of the adjacency structure of the graph in the format
that dreadnaught/nauty understands. This employs the networkx "adjlist" format but
extends it slightly.
Only adjacency information is preserved in this format -- no additional vertex or edge
attributes, so "primary" storage of graphs should use the GraphML format.
"""
linefeed = chr(10)
n = graph.number_of_nodes()
dn = "n="
dn += str(n)
dn += ' g'
dn += linefeed
for i in range(0, n):
nlist = graph.neighbors(i)
# we want to list only vertices which are greater than our own index;
# any vertices S less than our own index T would have resulted in (S,T)
# being mentioned when S was processed.
nlist_greater = [j for j in nlist if j > i]
if len(nlist_greater) == 1:
dn += str(i)
dn += ": "
dn += str(nlist_greater[0])
dn += ";"
dn += linefeed
elif len(nlist_greater) > 1:
dn += str(i)
dn += ": "
dn += " ".join(map(str,nlist_greater))
dn += ";"
dn += linefeed
else:
# we don't have to do anything
pass
dn += "."
dn += linefeed
dn += "x o"
dn += linefeed
return dn
def _get_raw_nauty_output(self,formatted):
"""
Uses Brendan McKay's nauty/traces package, and specifically the "dreadnaut" program,
to calculate the raw orbits (and orbit multiplicities) a graph. This information can
then be post-processed for a variety of statistics.
This method assumes that dreadnaut is on the path, and throws an error otherwise.
"""
try:
proc = subprocess.Popen(['dreadnaut', '-o', '-m -a'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
raw_output = proc.communicate(formatted)[0]
except OSError:
print "This program needs Brendan McKay's nauty program (dreadnaut, specifically) on the path"
exit(1)
return raw_output
|
|
#
# yosys -- Yosys Open SYnthesis Suite
#
# Copyright (C) 2012 Clifford Wolf <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys, re, os
import resource, subprocess
from copy import deepcopy
from select import select
from time import time
from queue import Queue, Empty
from threading import Thread
# This is needed so that the recursive SMT2 S-expression parser
# does not run out of stack frames when parsing large expressions
smtio_reclimit = 64 * 1024
smtio_stacksize = 128 * 1024 * 1024
if sys.getrecursionlimit() < smtio_reclimit:
sys.setrecursionlimit(smtio_reclimit)
if resource.getrlimit(resource.RLIMIT_STACK)[0] < smtio_stacksize:
resource.setrlimit(resource.RLIMIT_STACK, (smtio_stacksize, -1))
hex_dict = {
"0": "0000", "1": "0001", "2": "0010", "3": "0011",
"4": "0100", "5": "0101", "6": "0110", "7": "0111",
"8": "1000", "9": "1001", "A": "1010", "B": "1011",
"C": "1100", "D": "1101", "E": "1110", "F": "1111",
"a": "1010", "b": "1011", "c": "1100", "d": "1101",
"e": "1110", "f": "1111"
}
class SmtModInfo:
def __init__(self):
self.inputs = set()
self.outputs = set()
self.registers = set()
self.memories = dict()
self.wires = set()
self.wsize = dict()
self.clocks = dict()
self.cells = dict()
self.asserts = dict()
self.covers = dict()
self.anyconsts = dict()
self.anyseqs = dict()
self.allconsts = dict()
self.allseqs = dict()
self.asize = dict()
class SmtIo:
def __init__(self, opts=None):
self.logic = None
self.logic_qf = True
self.logic_ax = True
self.logic_uf = True
self.logic_bv = True
self.logic_dt = False
self.forall = False
self.produce_models = True
self.smt2cache = [list()]
self.p = None
if opts is not None:
self.logic = opts.logic
self.solver = opts.solver
self.solver_opts = opts.solver_opts
self.debug_print = opts.debug_print
self.debug_file = opts.debug_file
self.dummy_file = opts.dummy_file
self.timeinfo = opts.timeinfo
self.unroll = opts.unroll
self.noincr = opts.noincr
self.info_stmts = opts.info_stmts
self.nocomments = opts.nocomments
else:
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.timeinfo = os.name != "nt"
self.unroll = False
self.noincr = False
self.info_stmts = list()
self.nocomments = False
self.start_time = time()
self.modinfo = dict()
self.curmod = None
self.topmod = None
self.setup_done = False
def setup(self):
assert not self.setup_done
if self.forall:
self.unroll = False
if self.solver == "yices":
self.popen_vargs = ['yices-smt2', '--incremental'] + self.solver_opts
if self.solver == "z3":
self.popen_vargs = ['z3', '-smt2', '-in'] + self.solver_opts
if self.solver == "cvc4":
self.popen_vargs = ['cvc4', '--incremental', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
if self.solver == "mathsat":
self.popen_vargs = ['mathsat'] + self.solver_opts
if self.solver == "boolector":
self.popen_vargs = ['boolector', '--smt2', '-i'] + self.solver_opts
self.unroll = True
if self.solver == "abc":
if len(self.solver_opts) > 0:
self.popen_vargs = ['yosys-abc', '-S', '; '.join(self.solver_opts)]
else:
self.popen_vargs = ['yosys-abc', '-S', '%blast; &sweep -C 5000; &syn4; &cec -s -m -C 2000']
self.logic_ax = False
self.unroll = True
self.noincr = True
if self.solver == "dummy":
assert self.dummy_file is not None
self.dummy_fd = open(self.dummy_file, "r")
else:
if self.dummy_file is not None:
self.dummy_fd = open(self.dummy_file, "w")
if not self.noincr:
self.p_open()
if self.unroll:
assert not self.forall
self.logic_uf = False
self.unroll_idcnt = 0
self.unroll_buffer = ""
self.unroll_sorts = set()
self.unroll_objs = set()
self.unroll_decls = dict()
self.unroll_cache = dict()
self.unroll_stack = list()
if self.logic is None:
self.logic = ""
if self.logic_qf: self.logic += "QF_"
if self.logic_ax: self.logic += "A"
if self.logic_uf: self.logic += "UF"
if self.logic_bv: self.logic += "BV"
if self.logic_dt: self.logic = "ALL"
self.setup_done = True
for stmt in self.info_stmts:
self.write(stmt)
if self.produce_models:
self.write("(set-option :produce-models true)")
self.write("(set-logic %s)" % self.logic)
def timestamp(self):
secs = int(time() - self.start_time)
return "## %6d %3d:%02d:%02d " % (secs, secs // (60*60), (secs // 60) % 60, secs % 60)
def replace_in_stmt(self, stmt, pat, repl):
if stmt == pat:
return repl
if isinstance(stmt, list):
return [self.replace_in_stmt(s, pat, repl) for s in stmt]
return stmt
def unroll_stmt(self, stmt):
if not isinstance(stmt, list):
return stmt
stmt = [self.unroll_stmt(s) for s in stmt]
if len(stmt) >= 2 and not isinstance(stmt[0], list) and stmt[0] in self.unroll_decls:
assert stmt[1] in self.unroll_objs
key = tuple(stmt)
if key not in self.unroll_cache:
decl = deepcopy(self.unroll_decls[key[0]])
self.unroll_cache[key] = "|UNROLL#%d|" % self.unroll_idcnt
decl[1] = self.unroll_cache[key]
self.unroll_idcnt += 1
if decl[0] == "declare-fun":
if isinstance(decl[3], list) or decl[3] not in self.unroll_sorts:
self.unroll_objs.add(decl[1])
decl[2] = list()
else:
self.unroll_objs.add(decl[1])
decl = list()
elif decl[0] == "define-fun":
arg_index = 1
for arg_name, arg_sort in decl[2]:
decl[4] = self.replace_in_stmt(decl[4], arg_name, key[arg_index])
arg_index += 1
decl[2] = list()
if len(decl) > 0:
decl = self.unroll_stmt(decl)
self.write(self.unparse(decl), unroll=False)
return self.unroll_cache[key]
return stmt
def p_thread_main(self):
while True:
data = self.p.stdout.readline().decode("ascii")
if data == "": break
self.p_queue.put(data)
self.p_running = False
def p_open(self):
assert self.p is None
self.p = subprocess.Popen(self.popen_vargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.p_running = True
self.p_next = None
self.p_queue = Queue()
self.p_thread = Thread(target=self.p_thread_main)
self.p_thread.start()
def p_write(self, data, flush):
assert self.p is not None
self.p.stdin.write(bytes(data, "ascii"))
if flush: self.p.stdin.flush()
def p_read(self):
assert self.p is not None
assert self.p_running
if self.p_next is not None:
data = self.p_next
self.p_next = None
return data
return self.p_queue.get()
def p_poll(self):
assert self.p is not None
assert self.p_running
if self.p_next is not None:
return False
try:
self.p_next = self.p_queue.get(True, 0.1)
return False
except Empty:
return True
def p_close(self):
assert self.p is not None
self.p.stdin.close()
self.p_thread.join()
assert not self.p_running
self.p = None
self.p_next = None
self.p_queue = None
self.p_thread = None
def write(self, stmt, unroll=True):
if stmt.startswith(";"):
self.info(stmt)
if not self.setup_done:
self.info_stmts.append(stmt)
return
elif not self.setup_done:
self.setup()
stmt = stmt.strip()
if self.nocomments or self.unroll:
stmt = re.sub(r" *;.*", "", stmt)
if stmt == "": return
if unroll and self.unroll:
stmt = self.unroll_buffer + stmt
self.unroll_buffer = ""
s = re.sub(r"\|[^|]*\|", "", stmt)
if s.count("(") != s.count(")"):
self.unroll_buffer = stmt + " "
return
s = self.parse(stmt)
if self.debug_print:
print("-> %s" % s)
if len(s) == 3 and s[0] == "declare-sort" and s[2] == "0":
self.unroll_sorts.add(s[1])
return
elif len(s) == 4 and s[0] == "declare-fun" and s[2] == [] and s[3] in self.unroll_sorts:
self.unroll_objs.add(s[1])
return
elif len(s) >= 4 and s[0] == "declare-fun":
for arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
elif len(s) >= 4 and s[0] == "define-fun":
for arg_name, arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
stmt = self.unparse(self.unroll_stmt(s))
if stmt == "(push 1)":
self.unroll_stack.append((
deepcopy(self.unroll_sorts),
deepcopy(self.unroll_objs),
deepcopy(self.unroll_decls),
deepcopy(self.unroll_cache),
))
if stmt == "(pop 1)":
self.unroll_sorts, self.unroll_objs, self.unroll_decls, self.unroll_cache = self.unroll_stack.pop()
if self.debug_print:
print("> %s" % stmt)
if self.debug_file:
print(stmt, file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None and not stmt.startswith("(get-"):
self.p_close()
if stmt == "(push 1)":
self.smt2cache.append(list())
elif stmt == "(pop 1)":
self.smt2cache.pop()
else:
if self.p is not None:
self.p_write(stmt + "\n", True)
self.smt2cache[-1].append(stmt)
else:
self.p_write(stmt + "\n", True)
def info(self, stmt):
if not stmt.startswith("; yosys-smt2-"):
return
fields = stmt.split()
if fields[1] == "yosys-smt2-nomem":
if self.logic is None:
self.logic_ax = False
if fields[1] == "yosys-smt2-nobv":
if self.logic is None:
self.logic_bv = False
if fields[1] == "yosys-smt2-stdt":
if self.logic is None:
self.logic_dt = True
if fields[1] == "yosys-smt2-forall":
if self.logic is None:
self.logic_qf = False
self.forall = True
if fields[1] == "yosys-smt2-module":
self.curmod = fields[2]
self.modinfo[self.curmod] = SmtModInfo()
if fields[1] == "yosys-smt2-cell":
self.modinfo[self.curmod].cells[fields[3]] = fields[2]
if fields[1] == "yosys-smt2-topmod":
self.topmod = fields[2]
if fields[1] == "yosys-smt2-input":
self.modinfo[self.curmod].inputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-output":
self.modinfo[self.curmod].outputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-register":
self.modinfo[self.curmod].registers.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-memory":
self.modinfo[self.curmod].memories[fields[2]] = (int(fields[3]), int(fields[4]), int(fields[5]), int(fields[6]), fields[7] == "async")
if fields[1] == "yosys-smt2-wire":
self.modinfo[self.curmod].wires.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-clock":
for edge in fields[3:]:
if fields[2] not in self.modinfo[self.curmod].clocks:
self.modinfo[self.curmod].clocks[fields[2]] = edge
elif self.modinfo[self.curmod].clocks[fields[2]] != edge:
self.modinfo[self.curmod].clocks[fields[2]] = "event"
if fields[1] == "yosys-smt2-assert":
self.modinfo[self.curmod].asserts["%s_a %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-cover":
self.modinfo[self.curmod].covers["%s_c %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-anyconst":
self.modinfo[self.curmod].anyconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-anyseq":
self.modinfo[self.curmod].anyseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allconst":
self.modinfo[self.curmod].allconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allseq":
self.modinfo[self.curmod].allseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
def hiernets(self, top, regs_only=False):
def hiernets_worker(nets, mod, cursor):
for netname in sorted(self.modinfo[mod].wsize.keys()):
if not regs_only or netname in self.modinfo[mod].registers:
nets.append(cursor + [netname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiernets_worker(nets, celltype, cursor + [cellname])
nets = list()
hiernets_worker(nets, top, [])
return nets
def hieranyconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hieranyseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hiermems(self, top):
def hiermems_worker(mems, mod, cursor):
for memname in sorted(self.modinfo[mod].memories.keys()):
mems.append(cursor + [memname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiermems_worker(mems, celltype, cursor + [cellname])
mems = list()
hiermems_worker(mems, top, [])
return mems
def read(self):
stmt = []
count_brackets = 0
while True:
if self.solver == "dummy":
line = self.dummy_fd.readline().strip()
else:
line = self.p_read().strip()
if self.dummy_file is not None:
self.dummy_fd.write(line + "\n")
count_brackets += line.count("(")
count_brackets -= line.count(")")
stmt.append(line)
if self.debug_print:
print("< %s" % line)
if count_brackets == 0:
break
if self.solver != "dummy" and self.p.poll():
print("SMT Solver terminated unexpectedly: %s" % "".join(stmt))
sys.exit(1)
stmt = "".join(stmt)
if stmt.startswith("(error"):
print("SMT Solver Error: %s" % stmt, file=sys.stderr)
sys.exit(1)
return stmt
def check_sat(self):
if self.debug_print:
print("> (check-sat)")
if self.debug_file and not self.nocomments:
print("; running check-sat..", file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None:
self.p_close()
self.p_open()
for cache_ctx in self.smt2cache:
for cache_stmt in cache_ctx:
self.p_write(cache_stmt + "\n", False)
self.p_write("(check-sat)\n", True)
if self.timeinfo:
i = 0
s = "/-\|"
count = 0
num_bs = 0
while self.p_poll():
count += 1
if count < 25:
continue
if count % 10 == 0 or count == 25:
secs = count // 10
if secs < 60:
m = "(%d seconds)" % secs
elif secs < 60*60:
m = "(%d seconds -- %d:%02d)" % (secs, secs // 60, secs % 60)
else:
m = "(%d seconds -- %d:%02d:%02d)" % (secs, secs // (60*60), (secs // 60) % 60, secs % 60)
print("%s %s %c" % ("\b \b" * num_bs, m, s[i]), end="", file=sys.stderr)
num_bs = len(m) + 3
else:
print("\b" + s[i], end="", file=sys.stderr)
sys.stderr.flush()
i = (i + 1) % len(s)
if num_bs != 0:
print("\b \b" * num_bs, end="", file=sys.stderr)
sys.stderr.flush()
result = self.read()
if self.debug_file:
print("(set-info :status %s)" % result, file=self.debug_file)
print("(check-sat)", file=self.debug_file)
self.debug_file.flush()
return result
def parse(self, stmt):
def worker(stmt):
if stmt[0] == '(':
expr = []
cursor = 1
while stmt[cursor] != ')':
el, le = worker(stmt[cursor:])
expr.append(el)
cursor += le
return expr, cursor+1
if stmt[0] == '|':
expr = "|"
cursor = 1
while stmt[cursor] != '|':
expr += stmt[cursor]
cursor += 1
expr += "|"
return expr, cursor+1
if stmt[0] in [" ", "\t", "\r", "\n"]:
el, le = worker(stmt[1:])
return el, le+1
expr = ""
cursor = 0
while stmt[cursor] not in ["(", ")", "|", " ", "\t", "\r", "\n"]:
expr += stmt[cursor]
cursor += 1
return expr, cursor
return worker(stmt)[0]
def unparse(self, stmt):
if isinstance(stmt, list):
return "(" + " ".join([self.unparse(s) for s in stmt]) + ")"
return stmt
def bv2hex(self, v):
h = ""
v = self.bv2bin(v)
while len(v) > 0:
d = 0
if len(v) > 0 and v[-1] == "1": d += 1
if len(v) > 1 and v[-2] == "1": d += 2
if len(v) > 2 and v[-3] == "1": d += 4
if len(v) > 3 and v[-4] == "1": d += 8
h = hex(d)[2:] + h
if len(v) < 4: break
v = v[:-4]
return h
def bv2bin(self, v):
if v == "true": return "1"
if v == "false": return "0"
if v.startswith("#b"):
return v[2:]
if v.startswith("#x"):
return "".join(hex_dict.get(x) for x in v[2:])
assert False
def bv2int(self, v):
return int(self.bv2bin(v), 2)
def get(self, expr):
self.write("(get-value (%s))" % (expr))
return self.parse(self.read())[0][1]
def get_list(self, expr_list):
if len(expr_list) == 0:
return []
self.write("(get-value (%s))" % " ".join(expr_list))
return [n[1] for n in self.parse(self.read())]
def get_path(self, mod, path):
assert mod in self.modinfo
path = path.split(".")
for i in range(len(path)-1):
first = ".".join(path[0:i+1])
second = ".".join(path[i+1:])
if first in self.modinfo[mod].cells:
nextmod = self.modinfo[mod].cells[first]
return [first] + self.get_path(nextmod, second)
return [".".join(path)]
def net_expr(self, mod, base, path):
if len(path) == 0:
return base
if len(path) == 1:
assert mod in self.modinfo
if path[0] == "":
return base
if path[0] in self.modinfo[mod].cells:
return "(|%s_h %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].wsize:
return "(|%s_n %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].memories:
return "(|%s_m %s| %s)" % (mod, path[0], base)
assert 0
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.net_expr(nextmod, nextbase, path[1:])
def net_width(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
assert net_path[-1] in self.modinfo[mod].wsize
return self.modinfo[mod].wsize[net_path[-1]]
def net_clock(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
if net_path[-1] not in self.modinfo[mod].clocks:
return None
return self.modinfo[mod].clocks[net_path[-1]]
def net_exists(self, mod, net_path):
for i in range(len(net_path)-1):
if mod not in self.modinfo: return False
if net_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[net_path[i]]
if mod not in self.modinfo: return False
if net_path[-1] not in self.modinfo[mod].wsize: return False
return True
def mem_exists(self, mod, mem_path):
for i in range(len(mem_path)-1):
if mod not in self.modinfo: return False
if mem_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[mem_path[i]]
if mod not in self.modinfo: return False
if mem_path[-1] not in self.modinfo[mod].memories: return False
return True
def mem_expr(self, mod, base, path, port=None, infomode=False):
if len(path) == 1:
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].memories
if infomode:
return self.modinfo[mod].memories[path[0]]
return "(|%s_m%s %s| %s)" % (mod, "" if port is None else ":%s" % port, path[0], base)
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.mem_expr(nextmod, nextbase, path[1:], port=port, infomode=infomode)
def mem_info(self, mod, path):
return self.mem_expr(mod, "", path, infomode=True)
def get_net(self, mod_name, net_path, state_name):
return self.get(self.net_expr(mod_name, state_name, net_path))
def get_net_list(self, mod_name, net_path_list, state_name):
return self.get_list([self.net_expr(mod_name, state_name, n) for n in net_path_list])
def get_net_hex(self, mod_name, net_path, state_name):
return self.bv2hex(self.get_net(mod_name, net_path, state_name))
def get_net_hex_list(self, mod_name, net_path_list, state_name):
return [self.bv2hex(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def get_net_bin(self, mod_name, net_path, state_name):
return self.bv2bin(self.get_net(mod_name, net_path, state_name))
def get_net_bin_list(self, mod_name, net_path_list, state_name):
return [self.bv2bin(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def wait(self):
if self.p is not None:
self.p.wait()
self.p_close()
class SmtOpts:
def __init__(self):
self.shortopts = "s:S:v"
self.longopts = ["unroll", "noincr", "noprogress", "dump-smt2=", "logic=", "dummy=", "info=", "nocomments"]
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.unroll = False
self.noincr = False
self.timeinfo = os.name != "nt"
self.logic = None
self.info_stmts = list()
self.nocomments = False
def handle(self, o, a):
if o == "-s":
self.solver = a
elif o == "-S":
self.solver_opts.append(a)
elif o == "-v":
self.debug_print = True
elif o == "--unroll":
self.unroll = True
elif o == "--noincr":
self.noincr = True
elif o == "--noprogress":
self.timeinfo = False
elif o == "--dump-smt2":
self.debug_file = open(a, "w")
elif o == "--logic":
self.logic = a
elif o == "--dummy":
self.dummy_file = a
elif o == "--info":
self.info_stmts.append(a)
elif o == "--nocomments":
self.nocomments = True
else:
return False
return True
def helpmsg(self):
return """
-s <solver>
set SMT solver: z3, yices, boolector, cvc4, mathsat, dummy
default: yices
-S <opt>
pass <opt> as command line argument to the solver
--logic <smt2_logic>
use the specified SMT2 logic (e.g. QF_AUFBV)
--dummy <filename>
if solver is "dummy", read solver output from that file
otherwise: write solver output to that file
-v
enable debug output
--unroll
unroll uninterpreted functions
--noincr
don't use incremental solving, instead restart solver for
each (check-sat). This also avoids (push) and (pop).
--noprogress
disable timer display during solving
(this option is set implicitly on Windows)
--dump-smt2 <filename>
write smt2 statements to file
--info <smt2-info-stmt>
include the specified smt2 info statement in the smt2 output
--nocomments
strip all comments from the generated smt2 code
"""
class MkVcd:
def __init__(self, f):
self.f = f
self.t = -1
self.nets = dict()
self.clocks = dict()
def add_net(self, path, width):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, width)
def add_clock(self, path, edge):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, 1)
self.clocks[path] = (key, edge)
def set_net(self, path, bits):
path = tuple(path)
assert self.t >= 0
assert path in self.nets
if path not in self.clocks:
print("b%s %s" % (bits, self.nets[path][0]), file=self.f)
def set_time(self, t):
assert t >= self.t
if t != self.t:
if self.t == -1:
print("$var integer 32 t smt_step $end", file=self.f)
print("$var event 1 ! smt_clock $end", file=self.f)
scope = []
for path in sorted(self.nets):
while len(scope)+1 > len(path) or (len(scope) > 0 and scope[-1] != path[len(scope)-1]):
print("$upscope $end", file=self.f)
scope = scope[:-1]
while len(scope)+1 < len(path):
print("$scope module %s $end" % path[len(scope)], file=self.f)
scope.append(path[len(scope)-1])
key, width = self.nets[path]
if path in self.clocks and self.clocks[path][1] == "event":
print("$var event 1 %s %s $end" % (key, path[-1]), file=self.f)
else:
print("$var wire %d %s %s $end" % (width, key, path[-1]), file=self.f)
for i in range(len(scope)):
print("$upscope $end", file=self.f)
print("$enddefinitions $end", file=self.f)
self.t = t
assert self.t >= 0
if self.t > 0:
print("#%d" % (10 * self.t - 5), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "posedge":
print("b0 %s" % self.nets[path][0], file=self.f)
elif self.clocks[path][1] == "negedge":
print("b1 %s" % self.nets[path][0], file=self.f)
print("#%d" % (10 * self.t), file=self.f)
print("1!", file=self.f)
print("b%s t" % format(self.t, "032b"), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "negedge":
print("b0 %s" % self.nets[path][0], file=self.f)
else:
print("b1 %s" % self.nets[path][0], file=self.f)
|
|
""" generic mechanism for marking and selecting python functions. """
import typing
from typing import AbstractSet
from typing import List
from typing import Optional
from typing import Union
import attr
from .expression import Expression
from .expression import ParseError
from .structures import EMPTY_PARAMETERSET_OPTION
from .structures import get_empty_parameterset_mark
from .structures import Mark
from .structures import MARK_GEN
from .structures import MarkDecorator
from .structures import MarkGenerator
from .structures import ParameterSet
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config import UsageError
from _pytest.config.argparsing import Parser
from _pytest.store import StoreKey
if TYPE_CHECKING:
from _pytest.nodes import Item
__all__ = ["Mark", "MarkDecorator", "MarkGenerator", "get_empty_parameterset_mark"]
old_mark_config_key = StoreKey[Optional[Config]]()
def param(
*values: object,
marks: "Union[MarkDecorator, typing.Collection[Union[MarkDecorator, Mark]]]" = (),
id: Optional[str] = None
) -> ParameterSet:
"""Specify a parameter in `pytest.mark.parametrize`_ calls or
:ref:`parametrized fixtures <fixture-parametrize-marks>`.
.. code-block:: python
@pytest.mark.parametrize(
"test_input,expected",
[("3+5", 8), pytest.param("6*9", 42, marks=pytest.mark.xfail),],
)
def test_eval(test_input, expected):
assert eval(test_input) == expected
:param values: variable args of the values of the parameter set, in order.
:keyword marks: a single mark or a list of marks to be applied to this parameter set.
:keyword str id: the id to attribute to this parameter set.
"""
return ParameterSet.param(*values, marks=marks, id=id)
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general")
group._addoption(
"-k",
action="store",
dest="keyword",
default="",
metavar="EXPRESSION",
help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test_"
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other', while -k 'not test_method' "
"matches those that don't contain 'test_method' in their names. "
"-k 'not test_method and not test_other' will eliminate the matches. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them. "
"The matching is case-insensitive.",
)
group._addoption(
"-m",
action="store",
dest="markexpr",
default="",
metavar="MARKEXPR",
help="only run tests matching given mark expression.\n"
"For example: -m 'mark1 and not mark2'.",
)
group.addoption(
"--markers",
action="store_true",
help="show markers (builtin, plugin and per-project ones).",
)
parser.addini("markers", "markers for test functions", "linelist")
parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
@hookimpl(tryfirst=True)
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini("markers"):
parts = line.split(":", 1)
name = parts[0]
rest = parts[1] if len(parts) == 2 else ""
tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
return None
@attr.s(slots=True)
class KeywordMatcher:
"""A matcher for keywords.
Given a list of names, matches any substring of one of these names. The
string inclusion check is case-insensitive.
Will match on the name of colitem, including the names of its parents.
Only matches names of items which are either a :class:`Class` or a
:class:`Function`.
Additionally, matches on names in the 'extra_keyword_matches' set of
any item, as well as names directly assigned to test functions.
"""
_names = attr.ib(type=AbstractSet[str])
@classmethod
def from_item(cls, item: "Item") -> "KeywordMatcher":
mapped_names = set()
# Add the names of the current item and any parent items
import pytest
for node in item.listchain():
if not isinstance(node, (pytest.Instance, pytest.Session)):
mapped_names.add(node.name)
# Add the names added as extra keywords to current or parent items
mapped_names.update(item.listextrakeywords())
# Add the names attached to the current function through direct assignment
function_obj = getattr(item, "function", None)
if function_obj:
mapped_names.update(function_obj.__dict__)
# add the markers to the keywords as we no longer handle them correctly
mapped_names.update(mark.name for mark in item.iter_markers())
return cls(mapped_names)
def __call__(self, subname: str) -> bool:
subname = subname.lower()
names = (name.lower() for name in self._names)
for name in names:
if subname in name:
return True
return False
def deselect_by_keyword(items: "List[Item]", config: Config) -> None:
keywordexpr = config.option.keyword.lstrip()
if not keywordexpr:
return
if keywordexpr.startswith("-"):
# To be removed in pytest 7.0.0.
# Uncomment this after 6.0 release (#7361)
# warnings.warn(MINUS_K_DASH, stacklevel=2)
keywordexpr = "not " + keywordexpr[1:]
selectuntil = False
if keywordexpr[-1:] == ":":
# To be removed in pytest 7.0.0.
# Uncomment this after 6.0 release (#7361)
# warnings.warn(MINUS_K_COLON, stacklevel=2)
selectuntil = True
keywordexpr = keywordexpr[:-1]
try:
expression = Expression.compile(keywordexpr)
except ParseError as e:
raise UsageError(
"Wrong expression passed to '-k': {}: {}".format(keywordexpr, e)
) from None
remaining = []
deselected = []
for colitem in items:
if keywordexpr and not expression.evaluate(KeywordMatcher.from_item(colitem)):
deselected.append(colitem)
else:
if selectuntil:
keywordexpr = None
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
@attr.s(slots=True)
class MarkMatcher:
"""A matcher for markers which are present.
Tries to match on any marker names, attached to the given colitem.
"""
own_mark_names = attr.ib()
@classmethod
def from_item(cls, item) -> "MarkMatcher":
mark_names = {mark.name for mark in item.iter_markers()}
return cls(mark_names)
def __call__(self, name: str) -> bool:
return name in self.own_mark_names
def deselect_by_mark(items: "List[Item]", config: Config) -> None:
matchexpr = config.option.markexpr
if not matchexpr:
return
try:
expression = Expression.compile(matchexpr)
except ParseError as e:
raise UsageError(
"Wrong expression passed to '-m': {}: {}".format(matchexpr, e)
) from None
remaining = []
deselected = []
for item in items:
if expression.evaluate(MarkMatcher.from_item(item)):
remaining.append(item)
else:
deselected.append(item)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
def pytest_collection_modifyitems(items: "List[Item]", config: Config) -> None:
deselect_by_keyword(items, config)
deselect_by_mark(items, config)
def pytest_configure(config: Config) -> None:
config._store[old_mark_config_key] = MARK_GEN._config
MARK_GEN._config = config
empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""):
raise UsageError(
"{!s} must be one of skip, xfail or fail_at_collect"
" but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)
)
def pytest_unconfigure(config: Config) -> None:
MARK_GEN._config = config._store.get(old_mark_config_key, None)
|
|
#!/usr/bin/env python3
import os
import random
import bpy
import bpy_extras
from mathutils import Matrix, Vector
import math
import numpy as np
import scipy.io
import pickle
import png
RENDERING_PATH = './'
MAX_CAMERA_DIST = 2
MAX_DEPTH = 1e8
FACTOR_DEPTH = 10000
g_shape_synset_name_pairs = [('02691156', 'aeroplane'),
('02747177', 'ashtray'),
('02773838', 'backpack'),
('02801938', 'basket'),
('02808440', 'tub'), # bathtub
('02818832', 'bed'),
('02828884', 'bench'),
('02834778', 'bicycle'),
('02843684', 'mailbox'), # missing in objectnet3d, birdhouse, use view distribution of mailbox
('02858304', 'boat'),
('02871439', 'bookshelf'),
('02876657', 'bottle'),
('02880940', 'plate'), # missing in objectnet3d, bowl, use view distribution of plate
('02924116', 'bus'),
('02933112', 'cabinet'),
('02942699', 'camera'),
('02946921', 'can'),
('02954340', 'cap'),
('02958343', 'car'),
('02992529', 'cellphone'),
('03001627', 'chair'),
('03046257', 'clock'),
('03085013', 'keyboard'),
('03207941', 'dishwasher'),
('03211117', 'tvmonitor'),
('03261776', 'headphone'),
('03325088', 'faucet'),
('03337140', 'filing_cabinet'),
('03467517', 'guitar'),
('03513137', 'helmet'),
('03593526', 'jar'),
('03624134', 'knife'),
('03636649', 'lamp'),
('03642806', 'laptop'),
('03691459', 'speaker'),
('03710193', 'mailbox'),
('03759954', 'microphone'),
('03761084', 'microwave'),
('03790512', 'motorbike'),
('03797390', 'cup'), # missing in objectnet3d, mug, use view distribution of cup
('03928116', 'piano'),
('03938244', 'pillow'),
('03948459', 'rifle'), # missing in objectnet3d, pistol, use view distribution of rifle
('03991062', 'pot'),
('04004475', 'printer'),
('04074963', 'remote_control'),
('04090263', 'rifle'),
('04099429', 'road_pole'), # missing in objectnet3d, rocket, use view distribution of road_pole
('04225987', 'skateboard'),
('04256520', 'sofa'),
('04330267', 'stove'),
('04379243', 'diningtable'), # use view distribution of dining_table
('04401088', 'telephone'),
('04460130', 'road_pole'), # missing in objectnet3d, tower, use view distribution of road_pole
('04468005', 'train'),
('04530566', 'washing_machine'),
('04554684', 'dishwasher')] # washer, use view distribution of dishwasher
g_shape_synsets = [x[0] for x in g_shape_synset_name_pairs]
g_shape_names = [x[1] for x in g_shape_synset_name_pairs]
g_view_distribution_files = dict(zip(g_shape_synsets, [name+'.txt' for name in g_shape_names]))
g_syn_light_num_lowbound = 4
g_syn_light_num_highbound = 6
g_syn_light_dist_lowbound = 8
g_syn_light_dist_highbound = 12
g_syn_light_azimuth_degree_lowbound = 0
g_syn_light_azimuth_degree_highbound = 360
g_syn_light_elevation_degree_lowbound = 0
g_syn_light_elevation_degree_highbound = 90
g_syn_light_energy_mean = 3
g_syn_light_energy_std = 0.5
g_syn_light_environment_energy_lowbound = 0
g_syn_light_environment_energy_highbound = 1
def camPosToQuaternion(cx, cy, cz):
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
axis = (-cz, 0, cx)
angle = math.acos(cy)
a = math.sqrt(2) / 2
b = math.sqrt(2) / 2
w1 = axis[0]
w2 = axis[1]
w3 = axis[2]
c = math.cos(angle / 2)
d = math.sin(angle / 2)
q1 = a * c - b * d * w1
q2 = b * c + a * d * w1
q3 = a * d * w2 + b * d * w3
q4 = -b * d * w2 + a * d * w3
return (q1, q2, q3, q4)
def quaternionFromYawPitchRoll(yaw, pitch, roll):
c1 = math.cos(yaw / 2.0)
c2 = math.cos(pitch / 2.0)
c3 = math.cos(roll / 2.0)
s1 = math.sin(yaw / 2.0)
s2 = math.sin(pitch / 2.0)
s3 = math.sin(roll / 2.0)
q1 = c1 * c2 * c3 + s1 * s2 * s3
q2 = c1 * c2 * s3 - s1 * s2 * c3
q3 = c1 * s2 * c3 + s1 * c2 * s3
q4 = s1 * c2 * c3 - c1 * s2 * s3
return (q1, q2, q3, q4)
def camPosToQuaternion(cx, cy, cz):
q1a = 0
q1b = 0
q1c = math.sqrt(2) / 2
q1d = math.sqrt(2) / 2
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = cx / camDist
cy = cy / camDist
cz = cz / camDist
t = math.sqrt(cx * cx + cy * cy)
tx = cx / t
ty = cy / t
yaw = math.acos(ty)
if tx > 0:
yaw = 2 * math.pi - yaw
pitch = 0
tmp = min(max(tx*cx + ty*cy, -1),1)
#roll = math.acos(tx * cx + ty * cy)
roll = math.acos(tmp)
if cz < 0:
roll = -roll
print("%f %f %f" % (yaw, pitch, roll))
q2a, q2b, q2c, q2d = quaternionFromYawPitchRoll(yaw, pitch, roll)
q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d
q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d
q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d
q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d
return (q1, q2, q3, q4)
def camRotQuaternion(cx, cy, cz, theta):
theta = theta / 180.0 * math.pi
camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
cx = -cx / camDist
cy = -cy / camDist
cz = -cz / camDist
q1 = math.cos(theta * 0.5)
q2 = -cx * math.sin(theta * 0.5)
q3 = -cy * math.sin(theta * 0.5)
q4 = -cz * math.sin(theta * 0.5)
return (q1, q2, q3, q4)
def quaternionProduct(qx, qy):
a = qx[0]
b = qx[1]
c = qx[2]
d = qx[3]
e = qy[0]
f = qy[1]
g = qy[2]
h = qy[3]
q1 = a * e - b * f - c * g - d * h
q2 = a * f + b * e + c * h - d * g
q3 = a * g - b * h + c * e + d * f
q4 = a * h + b * g - c * f + d * e
return (q1, q2, q3, q4)
def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg):
phi = float(elevation_deg) / 180 * math.pi
theta = float(azimuth_deg) / 180 * math.pi
x = (dist * math.cos(theta) * math.cos(phi))
y = (dist * math.sin(theta) * math.cos(phi))
z = (dist * math.sin(phi))
return (x, y, z)
class BlenderRenderer(object):
def __init__(self, viewport_size_x, viewport_size_y):
'''
viewport_size_x, viewport_size_y: rendering viewport resolution
'''
# remove the default cube
bpy.ops.object.select_pattern(pattern="Cube")
bpy.ops.object.delete()
render_context = bpy.context.scene.render
world = bpy.context.scene.world
camera = bpy.data.objects['Camera']
# set the camera postion and orientation so that it is in
# the front of the object
camera.location = (1, 0, 0)
# render setting
render_context.resolution_percentage = 100
world.horizon_color = (1, 1, 1) # set background color to be white
# set file name for storing temporary rendering result
self.result_fn = '%s/render_result_%d.png' % (RENDERING_PATH, os.getpid())
bpy.context.scene.render.filepath = self.result_fn
# switch on nodes
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# create input render layer node
rl = tree.nodes.new('CompositorNodeRLayers')
# create output node
v = tree.nodes.new('CompositorNodeViewer')
# Links
links.new(rl.outputs[2], v.inputs[0]) # link Image output to Viewer input
self.render_context = render_context
self.camera = camera
self.model_loaded = False
self.render_context.resolution_x = viewport_size_x
self.render_context.resolution_y = viewport_size_y
self.render_context.use_antialiasing = False
self.pngWriter = png.Writer(viewport_size_x, viewport_size_y, greyscale=True, alpha=False, bitdepth=16)
def _set_lighting(self):
# clear default lights
bpy.ops.object.select_by_type(type='LAMP')
bpy.ops.object.delete(use_global=False)
# set environment lighting
bpy.context.scene.world.light_settings.use_environment_light = True
bpy.context.scene.world.light_settings.environment_energy = np.random.uniform(g_syn_light_environment_energy_lowbound, g_syn_light_environment_energy_highbound)
bpy.context.scene.world.light_settings.environment_color = 'PLAIN'
# set point lights
num_light = random.randint(g_syn_light_num_lowbound,g_syn_light_num_highbound)
print(num_light)
light_info = np.zeros((num_light, 4), dtype=np.float32)
for i in range(num_light):
light_azimuth_deg = np.random.uniform(g_syn_light_azimuth_degree_lowbound, g_syn_light_azimuth_degree_highbound)
light_elevation_deg = np.random.uniform(g_syn_light_elevation_degree_lowbound, g_syn_light_elevation_degree_highbound)
light_dist = np.random.uniform(g_syn_light_dist_lowbound, g_syn_light_dist_highbound)
lx, ly, lz = obj_centened_camera_pos(light_dist, light_azimuth_deg, light_elevation_deg)
bpy.ops.object.lamp_add(type='POINT', view_align = False, location=(lx, ly, lz))
light_energy = np.random.normal(g_syn_light_energy_mean, g_syn_light_energy_std)
bpy.data.objects['Point'].data.energy = light_energy
light_info[i, 0] = light_azimuth_deg
light_info[i, 1] = light_elevation_deg
light_info[i, 2] = light_dist
light_info[i, 3] = light_energy
self.light_info = light_info
def setViewpoint(self, azimuth, altitude, yaw, distance_ratio, fov):
cx, cy, cz = obj_centened_camera_pos(distance_ratio * MAX_CAMERA_DIST, azimuth, altitude)
q1 = camPosToQuaternion(cx, cy, cz)
q2 = camRotQuaternion(cx, cy, cz, yaw)
q = quaternionProduct(q2, q1)
self.camera.location[0] = cx
self.camera.location[1] = cy
self.camera.location[2] = cz
self.camera.rotation_mode = 'QUATERNION'
self.camera.rotation_quaternion[0] = q[0]
self.camera.rotation_quaternion[1] = q[1]
self.camera.rotation_quaternion[2] = q[2]
self.camera.rotation_quaternion[3] = q[3]
self.azimuth = azimuth
self.elevation = altitude
self.tilt = yaw
self.distance = distance_ratio * MAX_CAMERA_DIST
def setTransparency(self, transparency='SKY'):
""" transparency is either 'SKY', 'TRANSPARENT'
If set 'SKY', render background using sky color."""
self.render_context.alpha_mode = transparency
def makeMaterial(self, name, diffuse, specular, alpha):
mat = bpy.data.materials.new(name)
mat.diffuse_color = diffuse
mat.diffuse_shader = 'LAMBERT'
mat.diffuse_intensity = 1.0
mat.specular_color = specular
mat.specular_shader = 'COOKTORR'
mat.specular_intensity = 0.5
mat.alpha = alpha
mat.ambient = 1
mat.use_transparency = True
mat.transparency_method = 'Z_TRANSPARENCY'
mat.use_shadeless = True
mat.use_face_texture = False
return mat
def selectModel(self):
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.object.select_pattern(pattern="Camera")
bpy.ops.object.select_all(action='INVERT')
def printSelection(self):
print(bpy.context.selected_objects)
def clearModel(self):
self.selectModel()
bpy.ops.object.delete()
# The meshes still present after delete
for item in bpy.data.meshes:
bpy.data.meshes.remove(item)
for item in bpy.data.materials:
bpy.data.materials.remove(item)
def loadModel(self, file_path):
self.model_loaded = True
try:
if file_path.endswith('obj'):
bpy.ops.import_scene.obj(filepath=file_path)
elif file_path.endswith('3ds'):
bpy.ops.import_scene.autodesk_3ds(filepath=file_path)
elif file_path.endswith('dae'):
# Must install OpenCollada. Please read README.md
bpy.ops.wm.collada_import(filepath=file_path)
else:
self.model_loaded = False
raise Exception("Loading failed: %s" % (file_path))
except Exception:
self.model_loaded = False
def loadModels(self, file_paths, scales, classes, filename):
self.model_loaded = True
mesh = dict()
num = len(file_paths)
height_max = -np.inf * np.ones((num,), dtype=np.float32)
height_min = np.inf * np.ones((num,), dtype=np.float32)
for i in range(num):
file_path = file_paths[i]
try:
if file_path.endswith('obj'):
bpy.ops.import_scene.obj(filepath=file_path)
elif file_path.endswith('3ds'):
bpy.ops.import_scene.autodesk_3ds(filepath=file_path)
elif file_path.endswith('dae'):
# Must install OpenCollada. Please read README.md for installation
bpy.ops.wm.collada_import(filepath=file_path)
else:
# TODO
# Other formats not supported yet
self.model_loaded = False
raise Exception("Loading failed: %s" % (file_path))
for item in bpy.data.objects:
if item.type == 'MESH':
if item.name not in mesh:
mesh[item.name] = i
for vertex in item.data.vertices:
height_max[i] = max(height_max[i], scales[i] * vertex.co[1])
height_min[i] = min(height_min[i], scales[i] * vertex.co[1])
except Exception:
self.model_loaded = False
self.mesh = mesh
print(height_max)
print(height_min)
# rotate the objects
thetas = np.zeros((num,), dtype=np.float32)
for i in range(1, num):
# sample a rotation angle
thetas[i] = (2 * np.random.rand(1) - 1) * math.pi
for item in bpy.data.objects:
if item.type == 'MESH':
ind = mesh[item.name]
theta = thetas[ind]
R = np.array([[math.cos(theta), 0, math.sin(theta)], [0, 1, 0], [-math.sin(theta), 0, math.cos(theta)]])
for vertex in item.data.vertices:
rv = np.dot(R, np.array(vertex.co).reshape((3,1)))
vertex.co[0] = rv[0]
vertex.co[1] = rv[1]
vertex.co[2] = rv[2]
# os.sys.exit(1)
# rescale the meshes
for item in bpy.data.objects:
if item.type == 'MESH':
ind = mesh[item.name]
for vertex in item.data.vertices:
vertex.co *= scales[ind]
# make sure table, chair and sofa are on the ground
table_height_min = height_min[0]
for item in bpy.data.objects:
if item.type == 'MESH':
ind = mesh[item.name]
if classes[ind] == 'chair' or classes[ind] == 'sofa':
for vertex in item.data.vertices:
vertex.co[1] += table_height_min - height_min[ind]
# move objects on the table
table_height_max = height_max[0]
for item in bpy.data.objects:
if item.type == 'MESH':
ind = mesh[item.name]
if classes[ind] != 'chair' and classes[ind] != 'sofa' and classes[ind] != 'table':
for vertex in item.data.vertices:
vertex.co[1] += table_height_max - height_min[ind]
# move all the objects down
for item in bpy.data.objects:
if item.type == 'MESH':
ind = mesh[item.name]
for vertex in item.data.vertices:
vertex.co[1] -= 0.2
# collect the vertices
vertices = []
for i in range(num):
vertices.append(np.zeros((0, 3), dtype=np.float32))
for item in bpy.data.objects:
if item.type == 'MESH':
ind = mesh[item.name]
for vertex in item.data.vertices:
vertices[ind] = np.append(vertices[ind], np.array(vertex.co).reshape((1,3)), axis = 0)
# compute the boundary of objects
Xlim = np.zeros((num, 2), dtype=np.float32)
Ylim = np.zeros((num, 2), dtype=np.float32)
Zlim = np.zeros((num, 2), dtype=np.float32)
for i in range(num):
Xlim[i, 0] = vertices[i][:, 0].min()
Xlim[i, 1] = vertices[i][:, 0].max()
Ylim[i, 0] = vertices[i][:, 1].min()
Ylim[i, 1] = vertices[i][:, 1].max()
Zlim[i, 0] = vertices[i][:, 2].min()
Zlim[i, 1] = vertices[i][:, 2].max()
# sampling locations of objects
locations = np.zeros((num, 2), dtype=np.float32)
success = True
for i in range(1, num):
if classes[i] == 'chair' or classes[i] == 'sofa':
table_top = False
else:
table_top = True
count = 0
while 1:
count += 1
if table_top:
lx = Xlim[0, 1] - Xlim[0, 0]
lz = Zlim[0, 1] - Zlim[0, 0]
x = Xlim[0, 0] + np.random.rand(1) * lx
z = Zlim[0, 0] + np.random.rand(1) * lz
# check if object is inside the table or not
a = [x-(Xlim[i,1]-Xlim[i,0])/2, z-(Zlim[i,1]-Zlim[i,0])/2, x+(Xlim[i,1]-Xlim[i,0])/2, z+(Zlim[i,1]-Zlim[i,0])/2]
if a[2] < Xlim[0, 1] - 0.02 and \
a[0] > Xlim[0, 0] + 0.02 and \
a[3] < Zlim[0, 1] - 0.02 and \
a[1] > Zlim[0, 0] + 0.02:
flag = 1
else:
flag = 0
else:
lx = Xlim[0, 1] - Xlim[0, 0]
lz = Zlim[0, 1] - Zlim[0, 0]
if i == 1:
x = Xlim[0, 0] - 0.5
z = Zlim[0, 0] + np.random.rand(1) * lz
elif i == 2:
x = Xlim[0, 1] + 0.5
z = Zlim[0, 0] + np.random.rand(1) * lz
elif i == 3:
x = Xlim[0, 0] + np.random.rand(1) * lx
z = Zlim[0, 0] - 0.5
elif i == 4:
x = Xlim[0, 0] + np.random.rand(1) * lx
z = Zlim[0, 1] + 0.5
a = [x-(Xlim[i,1]-Xlim[i,0])/2, z-(Zlim[i,1]-Zlim[i,0])/2, x+(Xlim[i,1]-Xlim[i,0])/2, z+(Zlim[i,1]-Zlim[i,0])/2]
flag = 1
if flag == 1:
# check collision with other objects
if table_top:
r = range(1, i)
else:
r = range(0, i)
for j in r:
b = [locations[j,0]-(Xlim[j,1]-Xlim[j,0])/2, locations[j,1]-(Zlim[j,1]-Zlim[j,0])/2, \
locations[j,0]+(Xlim[j,1]-Xlim[j,0])/2, locations[j,1]+(Zlim[j,1]-Zlim[j,0])/2]
x1 = max(a[0], b[0])
y1 = max(a[1], b[1]);
x2 = min(a[2], b[2]);
y2 = min(a[3], b[3]);
w = x2 - x1;
h = y2 - y1;
inter = w * h
if w > 0 and h > 0 and inter > 0:
print('object {:d} collision with object {:d}'.format(i, j))
flag = 0
if flag == 1:
print('Sampled location for object %d' % i)
break
else:
if count > 1000:
print('Fail: cannot find location for object %d' % i)
break
if flag == 1:
locations[i, 0] = x
locations[i, 1] = z
else:
success = False
break
if success:
# move the meshes
for item in bpy.data.objects:
if item.type == 'MESH':
ind = mesh[item.name]
if ind > 0:
for vertex in item.data.vertices:
vertex.co[0] += locations[ind, 0]
vertex.co[2] += locations[ind, 1]
# add a transparent plane
V = np.zeros((0, 3), dtype=np.float32)
for item in bpy.data.objects:
if item.type == 'MESH':
for vertex in item.data.vertices:
V = np.append(V, np.array(vertex.co).reshape((1,3)), axis = 0)
factor = 3
x1 = factor * np.min(V[:,0])
x2 = factor * np.max(V[:,0])
y1 = factor * np.min(V[:,2])
y2 = factor * np.max(V[:,2])
z = np.min(V[:,1])
verts = [(x1, y1, z), (x2, y1, z), (x2, y2, z), (x1, y2, z)]
faces = [(0, 1, 2, 3)]
mesh_data = bpy.data.meshes.new("cube_mesh_data")
obj = bpy.data.objects.new("plane", mesh_data)
bpy.context.scene.objects.link(obj)
bpy.context.scene.objects.active = obj
obj.select = True
mesh_data.from_pydata(verts, [], faces)
mesh_data.update()
mat = self.makeMaterial('transparent', (0.5,0.5,0.5), (0,0,0), 1)
obj.data.materials.append(mat)
if success:
# save model
if filename:
bpy.ops.export_scene.obj(filepath=filename, use_selection=False)
return success
# Build intrinsic camera parameters from Blender camera data
def compute_intrinsic(self):
w = self.render_context.resolution_x * self.render_context.resolution_percentage / 100.
h = self.render_context.resolution_y * self.render_context.resolution_percentage / 100.
K = Matrix().to_3x3()
K[0][0] = w/2. / math.tan(self.camera.data.angle/2)
ratio = w/h
K[1][1] = h/2. / math.tan(self.camera.data.angle/2) * ratio
K[0][2] = w / 2.
K[1][2] = h / 2.
K[2][2] = 1.
return K
# Returns camera rotation and translation matrices from Blender.
# There are 3 coordinate systems involved:
# 1. The World coordinates: "world"
# - right-handed
# 2. The Blender camera coordinates: "bcam"
# - x is horizontal
# - y is up
# - right-handed: negative z look-at direction
# 3. The desired computer vision camera coordinates: "cv"
# - x is horizontal
# - y is down (to align to the actual pixel coordinates
# used in digital images)
# - right-handed: positive z look-at direction
def compute_rotation_translation(self):
# bcam stands for blender camera
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
location, rotation = self.camera.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1*R_world2bcam * location
# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = R_bcam2cv*R_world2bcam
T_world2cv = R_bcam2cv*T_world2bcam
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],)
))
return RT
def compute_projection_matrix(self):
K = self.compute_intrinsic()
RT = self.compute_rotation_translation()
return K*RT, RT, K
# backproject pixels into 3D points
def backproject(self, depth):
# compute projection matrix
P, RT, K = self.compute_projection_matrix()
P = np.matrix(P)
Pinv = np.linalg.pinv(P)
# compute the 3D points
width = depth.shape[1]
height = depth.shape[0]
points = np.zeros((height, width, 3), dtype=np.float32)
# camera location
C = self.camera.location
C = np.matrix(C).transpose()
Cmat = np.tile(C, (1, width*height))
# construct the 2D points matrix
x, y = np.meshgrid(np.arange(width), np.arange(height))
ones = np.ones((height, width), dtype=np.float32)
x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3)
# backprojection
x3d = Pinv * x2d.transpose()
x3d[0,:] = x3d[0,:] / x3d[3,:]
x3d[1,:] = x3d[1,:] / x3d[3,:]
x3d[2,:] = x3d[2,:] / x3d[3,:]
x3d = x3d[:3,:]
# compute the ray
R = x3d - Cmat
# compute the norm
N = np.linalg.norm(R, axis=0)
# normalization
R = np.divide(R, np.tile(N, (3,1)))
# compute the 3D points
X = Cmat + np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R)
points[y, x, 0] = X[0,:].reshape(height, width)
points[y, x, 1] = X[2,:].reshape(height, width)
points[y, x, 2] = X[1,:].reshape(height, width)
if 0:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
perm = np.random.permutation(np.arange(height*width))
index = perm[:10000]
X = points[:,:,0].flatten()
Y = points[:,:,1].flatten()
Z = points[:,:,2].flatten()
ax.scatter(X[index], Y[index], Z[index], c='r', marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_aspect('equal')
plt.show()
# naive way of computing the 3D points
#for x in range(width):
# for y in range(height):
# if (depth[y, x] < MAX_DEPTH):
# x2d = np.matrix([x, y, 1]).transpose()
# x3d = Pinv * x2d
# x3d = x3d / x3d[3]
# x3d = x3d[:3]
# # compute the ray
# R = x3d - C
# # normalization
# R = R / np.linalg.norm(R)
# # point in 3D
# X = C + depth[y, x] * R
# # reverse y and z
# points[y, x, 0] = X[0]
# points[y, x, 1] = X[2]
# points[y, x, 2] = X[1]
return points
def render(self, return_depth=True,
image_path=os.path.join(RENDERING_PATH, 'tmp.png')):
'''
Render the object
'''
if not self.model_loaded:
print('Model not loaded.')
return
self.result_fn = image_path
bpy.context.scene.render.filepath = image_path
bpy.ops.render.render(write_still=True) # save straight to file
# get viewer pixels
pixels = bpy.data.images['Viewer Node'].pixels
# compute depth map
depth = np.array(pixels[:])
width = bpy.data.images['Viewer Node'].size[0]
height = bpy.data.images['Viewer Node'].size[1]
depth = depth.reshape((height, width, 4))
depth = depth[::-1,:,0]
ind = np.where(depth > MAX_DEPTH)
depth[ind] = 0
# convert depth map
depth = depth * FACTOR_DEPTH
depth = depth.astype(np.uint16)
if return_depth:
return depth
def save_meta_data(self, filename):
P, RT, K = self.compute_projection_matrix()
meta_data = {'projection_matrix' : np.array(P),
'rotation_translation_matrix': np.array(RT),
'intrinsic_matrix': np.array(K),
'azimuth': self.azimuth,
'elevation': self.elevation,
'tilt': self.tilt,
'distance': self.distance,
'viewport_size_x': self.render_context.resolution_x,
'viewport_size_y': self.render_context.resolution_y,
'camera_location': np.array(self.camera.location),
'factor_depth': FACTOR_DEPTH,
'light_info': self.light_info}
scipy.io.savemat(filename+'.mat', meta_data)
def render_all():
'''Test function'''
# table, chair, sofa, 'lamp', tvmonitor, bottle, mug, bowl, can, keyboard, cap
synsets = ['04379243', '03001627', '04256520', '03636649', '03211117', '02876657', '03797390', '02880940', '02946921', '03085013', '02954340']
shapenet_root = '/var/Projects/ShapeNetCore.v1'
results_root = '/var/Projects/Deep_ISM/Rendering/images'
if not os.path.exists(results_root):
os.makedirs(results_root)
# load 3D shape paths
file_paths = []
model_ids = []
for i in range(len(synsets)):
synset = synsets[i]
dn = os.path.join(shapenet_root, synset)
model_id = [line.strip('\n') for line in open(dn + '/models.txt')]
file_paths.append( [os.path.join(dn, line, 'model.obj') for line in model_id] )
model_ids.append(model_id)
# initialize the blender render
renderer = BlenderRenderer(640, 480)
for i in range(len(synsets)):
synset = synsets[i]
dirname = os.path.join(results_root, synset)
if not os.path.exists(dirname):
os.makedirs(dirname)
file_path = file_paths[i]
model_id = model_ids[i]
for j in range(len(model_id)):
id = model_id[j]
n = len(id)
id = id[:n-1]
renderer.loadModel(file_path[j])
# set viewpoint
renderer.setViewpoint(45, 30, 0, 0.6, 25)
# set transparency
renderer.setTransparency('TRANSPARENT')
# rendering
filename = dirname + '/' + id + '.png'
print(filename)
renderer.render_context.use_textures = True
renderer.render(False, filename)
renderer.clearModel()
os.sys.exit(1)
def main():
'''Test function'''
synsets = ['04379243', '03211117', '02876657', '03797390', '02946921', '03085013', '02954340']
synset_names = ['table', 'tvmonitor', 'bottle', 'mug', 'can', 'keyboard', 'cap']
synset_scales = [1.0, 0.4, 0.2, 0.2, 0.2, 0.4, 0.3]
synset_colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (1, 0, 1), (0, 1, 1), (0.5, 0, 0)]
num_scene = 100
view_num = 100
delta = 2.5
distance = 0.6
shapenet_root = '/var/Projects/ShapeNetCore.v1'
models_root = '/var/Projects/Deep_ISM/Rendering/images_selected'
view_dists_root = '/var/Projects/Deep_ISM/ObjectNet3D/view_distributions'
results_root = '/var/Projects/Deep_ISM/Rendering/data'
if not os.path.exists(results_root):
os.makedirs(results_root)
# load 3D shape paths
file_paths = []
for i in range(len(synsets)):
synset = synsets[i]
dn = os.path.join(shapenet_root, synset)
model_dir = os.path.join(models_root, synset)
model_id = [line.strip('\n') for line in open(model_dir + '/models.txt')]
file_paths.append( [os.path.join(dn, line, 'model.obj') for line in model_id] )
# load viewpoint distributions
synset = synsets[0] # table
filename = os.path.join(view_dists_root, g_view_distribution_files[synset])
if not os.path.exists(filename):
print('Failed to read view distribution files from %s for synset %s' %
(filename, synset))
exit()
view_params = open(filename).readlines()
view_params = [[float(x) for x in line.strip().split(' ')] for line in view_params]
# initialize the blender render
renderer = BlenderRenderer(640, 480)
# for each scene
# for k in range(num_scene):
for k in range(80, 100):
renderer._set_lighting()
paths = []
scales = []
classes = []
class_indexes = []
# create materials
materials = []
for i in range(len(synsets)):
materials.append(renderer.makeMaterial('transparent', synset_colors[i], (0,0,0), 1))
material_plane = renderer.makeMaterial('transparent', (1,1,1), (0,0,0), 0)
# create output directory
dirname = os.path.join(results_root, '%04d' % k)
print(dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
# sample objects
while 1:
# choose a table
id = 0
model_id = file_paths[id]
if k < num_scene / 2:
index = random.randint(0, int(len(model_id) / 2))
else:
index = random.randint(int(len(model_id) / 2) + 1, len(model_id)-1)
paths.append(model_id[index])
scales.append(synset_scales[id])
classes.append(synset_names[id])
class_indexes.append(id)
# choose the number of objects on the table
num = 5
index_all = np.random.permutation(6) + 1
# choose objects
for i in range(num):
index = index_all[i]
model_id = file_paths[index]
scales.append(synset_scales[index])
classes.append(synset_names[index])
class_indexes.append(index)
if k < num_scene / 2:
index = random.randint(0, int(len(model_id) / 2))
else:
index = random.randint(int(len(model_id) / 2) + 1, len(model_id)-1)
paths.append(model_id[index])
print(classes)
# load model
filename = dirname + '/model.obj'
success = renderer.loadModels(paths, scales, classes, filename)
if success:
break
else:
paths = []
scales = []
classes = []
class_indexes = []
renderer.clearModel()
renderer._set_lighting()
# create materials
materials = []
for i in range(len(synsets)):
materials.append(renderer.makeMaterial('transparent', synset_colors[i], (0,0,0), 1))
material_plane = renderer.makeMaterial('transparent', (1,1,1), (0,0,0), 0)
# sample viewpoints
viewpoints = np.zeros((view_num, 3), dtype=np.float32)
while 1:
index = random.randint(0, len(view_params)-1)
azimuth = view_params[index][0]
elevation = view_params[index][1]
tilt = view_params[index][2]
tilt = 0
if elevation > 30 and elevation < 40:
break
viewpoints[0, 0] = azimuth
viewpoints[0, 1] = elevation
viewpoints[0, 2] = tilt
for i in range(1, view_num):
azimuth += delta + 0.1 * np.random.randn(1)
elevation += 0.1 * np.random.randn(1)
tilt += 0.1 * np.random.randn(1)
viewpoints[i, 0] = azimuth
viewpoints[i, 1] = elevation
viewpoints[i, 2] = tilt
# render rgb images
for i in range(view_num):
azimuth = viewpoints[i, 0]
elevation = viewpoints[i, 1]
tilt = viewpoints[i, 2]
# set viewpoint
renderer.setViewpoint(azimuth, elevation, tilt, distance, 25)
# set transparency
renderer.setTransparency('TRANSPARENT')
# rendering
filename = dirname + '/%04d_rgba.png' % i
renderer.render_context.use_textures = True
depth = renderer.render(True, filename)
# save depth image
filename = dirname + '/%04d_depth.png' % i
pngfile = open(filename, 'wb')
renderer.pngWriter.write(pngfile, depth)
# save meta data
filename = dirname + '/%04d_meta' % i
renderer.save_meta_data(filename)
# assign materials to models
for item in bpy.data.objects:
if item.type == 'MESH':
if item.name == 'plane':
mat = material_plane
else:
ind = renderer.mesh[item.name]
mat = materials[class_indexes[ind]]
if item.data.materials:
for i in range(len(item.data.materials)):
# item.data.materials[i] = mat
item.data.materials[i].diffuse_color = mat.diffuse_color
item.data.materials[i].diffuse_shader = mat.diffuse_shader
item.data.materials[i].diffuse_intensity = mat.diffuse_intensity
item.data.materials[i].specular_color = mat.specular_color
item.data.materials[i].specular_shader = mat.specular_shader
item.data.materials[i].specular_intensity = mat.specular_intensity
item.data.materials[i].alpha = mat.alpha
item.data.materials[i].ambient = mat.ambient
item.data.materials[i].use_transparency = mat.use_transparency
item.data.materials[i].transparency_method = mat.transparency_method
item.data.materials[i].use_shadeless = mat.use_shadeless
item.data.materials[i].use_face_texture = mat.use_face_texture
else:
item.data.materials.append(mat)
# render label image
for i in range(view_num):
azimuth = viewpoints[i][0]
elevation = viewpoints[i][1]
tilt = viewpoints[i][2]
# set viewpoint
renderer.setViewpoint(azimuth, elevation, tilt, distance, 25)
# set transparency
renderer.setTransparency('TRANSPARENT')
# rendering
filename = dirname + '/%04d_label.png' % i
renderer.render_context.use_textures = False
depth = renderer.render(True, filename)
renderer.clearModel()
os.sys.exit(1)
if __name__ == "__main__":
main()
# render_all()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python wrapper for Android uiautomator tool."""
import sys
import os
import subprocess
import time
import itertools
import json
import hashlib
import socket
import re
import collections
import xml.dom.minidom
DEVICE_PORT = int(os.environ.get('UIAUTOMATOR_DEVICE_PORT', '9008'))
LOCAL_PORT = int(os.environ.get('UIAUTOMATOR_LOCAL_PORT', '9008'))
if 'localhost' not in os.environ.get('no_proxy', ''):
os.environ['no_proxy'] = "localhost,%s" % os.environ.get('no_proxy', '')
try:
import urllib2
except ImportError:
import urllib.request as urllib2
try:
from httplib import HTTPException
except:
from http.client import HTTPException
try:
if os.name == 'nt':
import urllib3
except: # to fix python setup error on Windows.
pass
__author__ = "Xiaocong He"
__all__ = ["device", "Device", "rect", "point", "Selector", "JsonRPCError"]
def U(x):
if sys.version_info.major == 2:
return x.decode('utf-8') if type(x) is str else x
elif sys.version_info.major == 3:
return x
def param_to_property(*props, **kwprops):
if props and kwprops:
raise SyntaxError("Can not set both props and kwprops at the same time.")
class Wrapper(object):
def __init__(self, func):
self.func = func
self.kwargs, self.args = {}, []
def __getattr__(self, attr):
if kwprops:
for prop_name, prop_values in kwprops.items():
if attr in prop_values and prop_name not in self.kwargs:
self.kwargs[prop_name] = attr
return self
elif attr in props:
self.args.append(attr)
return self
raise AttributeError("%s parameter is duplicated or not allowed!" % attr)
def __call__(self, *args, **kwargs):
if kwprops:
kwargs.update(self.kwargs)
self.kwargs = {}
return self.func(*args, **kwargs)
else:
new_args, self.args = self.args + list(args), []
return self.func(*new_args, **kwargs)
return Wrapper
class JsonRPCError(Exception):
def __init__(self, code, message):
self.code = int(code)
self.message = message
def __str__(self):
return "JsonRPC Error code: %d, Message: %s" % (self.code, self.message)
class JsonRPCMethod(object):
if os.name == 'nt':
try:
pool = urllib3.PoolManager()
except:
pass
def __init__(self, url, method, timeout=30):
self.url, self.method, self.timeout = url, method, timeout
def __call__(self, *args, **kwargs):
if args and kwargs:
raise SyntaxError("Could not accept both *args and **kwargs as JSONRPC parameters.")
data = {"jsonrpc": "2.0", "method": self.method, "id": self.id()}
if args:
data["params"] = args
elif kwargs:
data["params"] = kwargs
jsonresult = {"result": ""}
if os.name == "nt":
res = self.pool.urlopen("POST",
self.url,
headers={"Content-Type": "application/json"},
body=json.dumps(data).encode("utf-8"),
timeout=self.timeout)
jsonresult = json.loads(res.data.decode("utf-8"))
else:
result = None
try:
req = urllib2.Request(self.url,
json.dumps(data).encode("utf-8"),
{"Content-type": "application/json"})
result = urllib2.urlopen(req, timeout=self.timeout)
jsonresult = json.loads(result.read().decode("utf-8"))
finally:
if result is not None:
result.close()
if "error" in jsonresult and jsonresult["error"]:
raise JsonRPCError(
jsonresult["error"]["code"],
"%s: %s" % (jsonresult["error"]["data"]["exceptionTypeName"], jsonresult["error"]["message"])
)
return jsonresult["result"]
def id(self):
m = hashlib.md5()
m.update(("%s at %f" % (self.method, time.time())).encode("utf-8"))
return m.hexdigest()
class JsonRPCClient(object):
def __init__(self, url, timeout=30, method_class=JsonRPCMethod):
self.url = url
self.timeout = timeout
self.method_class = method_class
def __getattr__(self, method):
return self.method_class(self.url, method, timeout=self.timeout)
class Selector(dict):
"""The class is to build parameters for UiSelector passed to Android device.
"""
__fields = {
"text": (0x01, None), # MASK_TEXT,
"textContains": (0x02, None), # MASK_TEXTCONTAINS,
"textMatches": (0x04, None), # MASK_TEXTMATCHES,
"textStartsWith": (0x08, None), # MASK_TEXTSTARTSWITH,
"className": (0x10, None), # MASK_CLASSNAME
"classNameMatches": (0x20, None), # MASK_CLASSNAMEMATCHES
"description": (0x40, None), # MASK_DESCRIPTION
"descriptionContains": (0x80, None), # MASK_DESCRIPTIONCONTAINS
"descriptionMatches": (0x0100, None), # MASK_DESCRIPTIONMATCHES
"descriptionStartsWith": (0x0200, None), # MASK_DESCRIPTIONSTARTSWITH
"checkable": (0x0400, False), # MASK_CHECKABLE
"checked": (0x0800, False), # MASK_CHECKED
"clickable": (0x1000, False), # MASK_CLICKABLE
"longClickable": (0x2000, False), # MASK_LONGCLICKABLE,
"scrollable": (0x4000, False), # MASK_SCROLLABLE,
"enabled": (0x8000, False), # MASK_ENABLED,
"focusable": (0x010000, False), # MASK_FOCUSABLE,
"focused": (0x020000, False), # MASK_FOCUSED,
"selected": (0x040000, False), # MASK_SELECTED,
"packageName": (0x080000, None), # MASK_PACKAGENAME,
"packageNameMatches": (0x100000, None), # MASK_PACKAGENAMEMATCHES,
"resourceId": (0x200000, None), # MASK_RESOURCEID,
"resourceIdMatches": (0x400000, None), # MASK_RESOURCEIDMATCHES,
"index": (0x800000, 0), # MASK_INDEX,
"instance": (0x01000000, 0) # MASK_INSTANCE,
}
__mask, __childOrSibling, __childOrSiblingSelector = "mask", "childOrSibling", "childOrSiblingSelector"
def __init__(self, **kwargs):
super(Selector, self).__setitem__(self.__mask, 0)
super(Selector, self).__setitem__(self.__childOrSibling, [])
super(Selector, self).__setitem__(self.__childOrSiblingSelector, [])
for k in kwargs:
self[k] = kwargs[k]
def __setitem__(self, k, v):
if k in self.__fields:
super(Selector, self).__setitem__(U(k), U(v))
super(Selector, self).__setitem__(self.__mask, self[self.__mask] | self.__fields[k][0])
else:
raise ReferenceError("%s is not allowed." % k)
def __delitem__(self, k):
if k in self.__fields:
super(Selector, self).__delitem__(k)
super(Selector, self).__setitem__(self.__mask, self[self.__mask] & ~self.__fields[k][0])
def clone(self):
kwargs = dict((k, self[k]) for k in self
if k not in [self.__mask, self.__childOrSibling, self.__childOrSiblingSelector])
selector = Selector(**kwargs)
for v in self[self.__childOrSibling]:
selector[self.__childOrSibling].append(v)
for s in self[self.__childOrSiblingSelector]:
selector[self.__childOrSiblingSelector].append(s.clone())
return selector
def child(self, **kwargs):
self[self.__childOrSibling].append("child")
self[self.__childOrSiblingSelector].append(Selector(**kwargs))
return self
def sibling(self, **kwargs):
self[self.__childOrSibling].append("sibling")
self[self.__childOrSiblingSelector].append(Selector(**kwargs))
return self
child_selector, from_parent = child, sibling
def rect(top=0, left=0, bottom=100, right=100):
return {"top": top, "left": left, "bottom": bottom, "right": right}
def intersect(rect1, rect2):
top = rect1["top"] if rect1["top"] > rect2["top"] else rect2["top"]
bottom = rect1["bottom"] if rect1["bottom"] < rect2["bottom"] else rect2["bottom"]
left = rect1["left"] if rect1["left"] > rect2["left"] else rect2["left"]
right = rect1["right"] if rect1["right"] < rect2["right"] else rect2["right"]
return left, top, right, bottom
def point(x=0, y=0):
return {"x": x, "y": y}
class Adb(object):
def __init__(self, serial=None, adb_server_host=None, adb_server_port=None):
self.__adb_cmd = None
self.default_serial = serial if serial else os.environ.get("ANDROID_SERIAL", None)
self.adb_server_host = str(adb_server_host if adb_server_host else 'localhost')
self.adb_server_port = str(adb_server_port if adb_server_port else '5037')
self.adbHostPortOptions = []
if self.adb_server_host not in ['localhost', '127.0.0.1']:
self.adbHostPortOptions += ["-H", self.adb_server_host]
if self.adb_server_port != '5037':
self.adbHostPortOptions += ["-P", self.adb_server_port]
def adb(self):
if self.__adb_cmd is None:
if "ANDROID_HOME" in os.environ:
filename = "adb.exe" if os.name == 'nt' else "adb"
adb_cmd = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", filename)
if not os.path.exists(adb_cmd):
raise EnvironmentError(
"Adb not found in $ANDROID_HOME path: %s." % os.environ["ANDROID_HOME"])
else:
import distutils
if "spawn" not in dir(distutils):
import distutils.spawn
adb_cmd = distutils.spawn.find_executable("adb")
if adb_cmd:
adb_cmd = os.path.realpath(adb_cmd)
else:
raise EnvironmentError("$ANDROID_HOME environment not set.")
self.__adb_cmd = adb_cmd
return self.__adb_cmd
def cmd(self, *args, **kwargs):
'''adb command, add -s serial by default. return the subprocess.Popen object.'''
serial = self.device_serial()
if serial:
if " " in serial: # TODO how to include special chars on command line
serial = "'%s'" % serial
return self.raw_cmd(*["-s", serial] + list(args))
else:
return self.raw_cmd(*args)
def raw_cmd(self, *args):
'''adb command. return the subprocess.Popen object.'''
cmd_line = [self.adb()] + self.adbHostPortOptions + list(args)
if os.name != "nt":
cmd_line = [" ".join(cmd_line)]
return subprocess.Popen(cmd_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def device_serial(self):
if not self.default_serial:
devices = self.devices()
if devices:
if len(devices) is 1:
self.default_serial = list(devices.keys())[0]
else:
raise EnvironmentError("Multiple devices attached but default android serial not set.")
else:
raise EnvironmentError("Device not attached.")
return self.default_serial
def devices(self):
'''get a dict of attached devices. key is the device serial, value is device name.'''
out = self.raw_cmd("devices").communicate()[0].decode("utf-8")
match = "List of devices attached"
index = out.find(match)
if index < 0:
raise EnvironmentError("adb is not working.")
return dict([s.split("\t") for s in out[index + len(match):].strip().splitlines() if s.strip()])
def forward(self, local_port, device_port):
'''adb port forward. return 0 if success, else non-zero.'''
return self.cmd("forward", "tcp:%d" % local_port, "tcp:%d" % device_port).wait()
def forward_list(self):
'''adb forward --list'''
version = self.version()
if int(version[1]) <= 1 and int(version[2]) <= 0 and int(version[3]) < 31:
raise EnvironmentError("Low adb version.")
lines = self.raw_cmd("forward", "--list").communicate()[0].decode("utf-8").strip().splitlines()
return [line.strip().split() for line in lines]
def version(self):
'''adb version'''
match = re.search(r"(\d+)\.(\d+)\.(\d+)", self.raw_cmd("version").communicate()[0].decode("utf-8"))
return [match.group(i) for i in range(4)]
_init_local_port = LOCAL_PORT - 1
def next_local_port(adbHost=None):
def is_port_listening(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((str(adbHost) if adbHost else '127.0.0.1', port))
s.close()
return result == 0
global _init_local_port
_init_local_port = _init_local_port + 1 if _init_local_port < 32764 else LOCAL_PORT
while is_port_listening(_init_local_port):
_init_local_port += 1
return _init_local_port
class NotFoundHandler(object):
'''
Handler for UI Object Not Found exception.
It's a replacement of UiAutomator watcher on device side.
'''
def __init__(self):
self.__handlers = collections.defaultdict(lambda: {'on': True, 'handlers': []})
def __get__(self, instance, type):
return self.__handlers[instance.adb.device_serial()]
class AutomatorServer(object):
"""start and quit rpc server on device.
"""
__jar_files = {
"bundle.jar": "libs/bundle.jar",
"uiautomator-stub.jar": "libs/uiautomator-stub.jar"
}
__apk_files = ["libs/app-uiautomator.apk", "libs/app-uiautomator-test.apk"]
__androidx_apk_files = ["libs/app-uiautomator-androidx.apk", "libs/app-uiautomator-test-androidx.apk"]
__sdk = 0
handlers = NotFoundHandler() # handler UI Not Found exception
def __init__(self, serial=None, local_port=None, device_port=None, adb_server_host=None, adb_server_port=None):
self.uiautomator_process = None
self.adb = Adb(serial=serial, adb_server_host=adb_server_host, adb_server_port=adb_server_port)
self.device_port = int(device_port) if device_port else DEVICE_PORT
if local_port:
self.local_port = local_port
else:
try: # first we will try to use the local port already adb forwarded
for s, lp, rp in self.adb.forward_list():
if s == self.adb.device_serial() and rp == 'tcp:%d' % self.device_port:
self.local_port = int(lp[4:])
break
else:
self.local_port = next_local_port(adb_server_host)
except:
self.local_port = next_local_port(adb_server_host)
def push(self):
base_dir = os.path.dirname(__file__)
for jar, url in self.__jar_files.items():
filename = os.path.join(base_dir, url)
self.adb.cmd("push", filename, "/data/local/tmp/").wait()
return list(self.__jar_files.keys())
def install(self):
base_dir = os.path.dirname(__file__)
for apk in self.__apk_files:
self.adb.cmd("install", "-r -t", os.path.join(base_dir, apk)).wait()
def install_androidx(self):
base_dir = os.path.dirname(__file__)
for apk in self.__androidx_apk_files:
self.adb.cmd("install", "-r -t", os.path.join(base_dir, apk)).wait()
@property
def jsonrpc(self):
return self.jsonrpc_wrap(timeout=int(os.environ.get("jsonrpc_timeout", 90)))
def jsonrpc_wrap(self, timeout):
server = self
ERROR_CODE_BASE = -32000
def _JsonRPCMethod(url, method, timeout, restart=True):
_method_obj = JsonRPCMethod(url, method, timeout)
def wrapper(*args, **kwargs):
URLError = urllib3.exceptions.HTTPError if os.name == "nt" else urllib2.URLError
try:
return _method_obj(*args, **kwargs)
except (URLError, socket.error, HTTPException) as e:
if restart:
server.stop()
server.start(timeout=30)
return _JsonRPCMethod(url, method, timeout, False)(*args, **kwargs)
else:
raise
except JsonRPCError as e:
if e.code >= ERROR_CODE_BASE - 1:
server.stop()
server.start()
return _method_obj(*args, **kwargs)
elif e.code == ERROR_CODE_BASE - 2 and self.handlers['on']: # Not Found
try:
self.handlers['on'] = False
# any handler returns True will break the left handlers
any(handler(self.handlers.get('device', None)) for handler in self.handlers['handlers'])
finally:
self.handlers['on'] = True
return _method_obj(*args, **kwargs)
raise
return wrapper
return JsonRPCClient(self.rpc_uri,
timeout=timeout,
method_class=_JsonRPCMethod)
def __jsonrpc(self):
return JsonRPCClient(self.rpc_uri, timeout=int(os.environ.get("JSONRPC_TIMEOUT", 90)))
def sdk_version(self):
'''sdk version of connected device.'''
if self.__sdk == 0:
try:
self.__sdk = int(self.adb.cmd("shell", "getprop", "ro.build.version.sdk").communicate()[0].decode("utf-8").strip())
except:
pass
return self.__sdk
def start(self, timeout=5):
if self.sdk_version() < 18:
files = self.push()
cmd = list(itertools.chain(
["shell", "uiautomator", "runtest"],
files,
["-c", "com.github.uiautomatorstub.Stub"]
))
elif self.sdk_version() >= 28:
self.install_androidx()
cmd = ["shell", "am", "instrument", "-w",
"com.github.uiautomator.test/androidx.test.runner.AndroidJUnitRunner"]
else:
self.install()
cmd = ["shell", "am", "instrument", "-w",
"com.github.uiautomator.test/android.support.test.runner.AndroidJUnitRunner"]
self.uiautomator_process = self.adb.cmd(*cmd)
self.adb.forward(self.local_port, self.device_port)
while not self.alive and timeout > 0:
time.sleep(0.1)
timeout -= 0.1
if not self.alive:
raise IOError("RPC server not started!")
def ping(self):
try:
return self.__jsonrpc().ping()
except:
return None
@property
def alive(self):
'''Check if the rpc server is alive.'''
return self.ping() == "pong"
def stop(self):
'''Stop the rpc server.'''
if self.uiautomator_process and self.uiautomator_process.poll() is None:
res = None
try:
res = urllib2.urlopen(self.stop_uri)
self.uiautomator_process.wait()
except:
self.uiautomator_process.kill()
finally:
if res is not None:
res.close()
self.uiautomator_process = None
try:
out = self.adb.cmd("shell", "ps", "-C", "uiautomator").communicate()[0].decode("utf-8").strip().splitlines()
if out:
index = out[0].split().index("PID")
for line in out[1:]:
if len(line.split()) > index:
self.adb.cmd("shell", "kill", "-9", line.split()[index]).wait()
except:
pass
@property
def stop_uri(self):
return "http://%s:%d/stop" % (self.adb.adb_server_host, self.local_port)
@property
def rpc_uri(self):
return "http://%s:%d/jsonrpc/0" % (self.adb.adb_server_host, self.local_port)
@property
def screenshot_uri(self):
return "http://%s:%d/screenshot/0" % (self.adb.adb_server_host, self.local_port)
def screenshot(self, filename=None, scale=1.0, quality=100):
if self.sdk_version() >= 18:
try:
req = urllib2.Request("%s?scale=%f&quality=%f" % (self.screenshot_uri, scale, quality))
result = urllib2.urlopen(req, timeout=30)
if filename:
with open(filename, 'wb') as f:
f.write(result.read())
return filename
else:
return result.read()
except:
pass
return None
class AutomatorDevice(object):
'''uiautomator wrapper of android device'''
__orientation = ( # device orientation
(0, "natural", "n", 0),
(1, "left", "l", 90),
(2, "upsidedown", "u", 180),
(3, "right", "r", 270)
)
__alias = {
"width": "displayWidth",
"height": "displayHeight"
}
def __init__(self, serial=None, local_port=None, adb_server_host=None, adb_server_port=None):
self.server = AutomatorServer(
serial=serial,
local_port=local_port,
adb_server_host=adb_server_host,
adb_server_port=adb_server_port
)
def __call__(self, **kwargs):
return AutomatorDeviceObject(self, Selector(**kwargs))
def __getattr__(self, attr):
'''alias of fields in info property.'''
info = self.info
if attr in info:
return info[attr]
elif attr in self.__alias:
return info[self.__alias[attr]]
else:
raise AttributeError("%s attribute not found!" % attr)
@property
def info(self):
'''Get the device info.'''
return self.server.jsonrpc.deviceInfo()
def click(self, x, y):
'''click at arbitrary coordinates.'''
return self.server.jsonrpc.click(x, y)
def long_click(self, x, y):
'''long click at arbitrary coordinates.'''
return self.swipe(x, y, x + 1, y + 1)
def swipe(self, sx, sy, ex, ey, steps=100):
return self.server.jsonrpc.swipe(sx, sy, ex, ey, steps)
def swipePoints(self, points, steps=100):
ppoints = []
for p in points:
ppoints.append(p[0])
ppoints.append(p[1])
return self.server.jsonrpc.swipePoints(ppoints, steps)
def drag(self, sx, sy, ex, ey, steps=100):
'''Swipe from one point to another point.'''
return self.server.jsonrpc.drag(sx, sy, ex, ey, steps)
def dump(self, filename=None, compressed=True, pretty=True):
'''dump device window and pull to local file.'''
content = self.server.jsonrpc.dumpWindowHierarchy(compressed, None)
if filename:
with open(filename, "wb") as f:
f.write(content.encode("utf-8"))
if pretty and "\n " not in content:
xml_text = xml.dom.minidom.parseString(content.encode("utf-8"))
content = U(xml_text.toprettyxml(indent=' '))
return content
def screenshot(self, filename, scale=1.0, quality=100):
'''take screenshot.'''
result = self.server.screenshot(filename, scale, quality)
if result:
return result
device_file = self.server.jsonrpc.takeScreenshot("screenshot.png",
scale, quality)
if not device_file:
return None
p = self.server.adb.cmd("pull", device_file, filename)
p.wait()
self.server.adb.cmd("shell", "rm", device_file).wait()
return filename if p.returncode is 0 else None
def freeze_rotation(self, freeze=True):
'''freeze or unfreeze the device rotation in current status.'''
self.server.jsonrpc.freezeRotation(freeze)
@property
def orientation(self):
'''
orienting the devie to left/right or natural.
left/l: rotation=90 , displayRotation=1
right/r: rotation=270, displayRotation=3
natural/n: rotation=0 , displayRotation=0
upsidedown/u: rotation=180, displayRotation=2
'''
return self.__orientation[self.info["displayRotation"]][1]
@orientation.setter
def orientation(self, value):
'''setter of orientation property.'''
for values in self.__orientation:
if value in values:
# can not set upside-down until api level 18.
self.server.jsonrpc.setOrientation(values[1])
break
else:
raise ValueError("Invalid orientation.")
@property
def last_traversed_text(self):
'''get last traversed text. used in webview for highlighted text.'''
return self.server.jsonrpc.getLastTraversedText()
def clear_traversed_text(self):
'''clear the last traversed text.'''
self.server.jsonrpc.clearLastTraversedText()
@property
def open(self):
'''
Open notification or quick settings.
Usage:
d.open.notification()
d.open.quick_settings()
'''
@param_to_property(action=["notification", "quick_settings"])
def _open(action):
if action == "notification":
return self.server.jsonrpc.openNotification()
else:
return self.server.jsonrpc.openQuickSettings()
return _open
@property
def handlers(self):
obj = self
class Handlers(object):
def on(self, fn):
if fn not in obj.server.handlers['handlers']:
obj.server.handlers['handlers'].append(fn)
obj.server.handlers['device'] = obj
return fn
def off(self, fn):
if fn in obj.server.handlers['handlers']:
obj.server.handlers['handlers'].remove(fn)
return Handlers()
@property
def watchers(self):
obj = self
class Watchers(list):
def __init__(self):
for watcher in obj.server.jsonrpc.getWatchers():
self.append(watcher)
@property
def triggered(self):
return obj.server.jsonrpc.hasAnyWatcherTriggered()
def remove(self, name=None):
if name:
obj.server.jsonrpc.removeWatcher(name)
else:
for name in self:
obj.server.jsonrpc.removeWatcher(name)
def reset(self):
obj.server.jsonrpc.resetWatcherTriggers()
return self
def run(self):
obj.server.jsonrpc.runWatchers()
return self
return Watchers()
def watcher(self, name):
obj = self
class Watcher(object):
def __init__(self):
self.__selectors = []
@property
def triggered(self):
return obj.server.jsonrpc.hasWatcherTriggered(name)
def remove(self):
obj.server.jsonrpc.removeWatcher(name)
def when(self, **kwargs):
self.__selectors.append(Selector(**kwargs))
return self
def click(self, **kwargs):
obj.server.jsonrpc.registerClickUiObjectWatcher(name, self.__selectors, Selector(**kwargs))
@property
def press(self):
@param_to_property(
"home", "back", "left", "right", "up", "down", "center",
"search", "enter", "delete", "del", "recent", "volume_up",
"menu", "volume_down", "volume_mute", "camera", "power")
def _press(*args):
obj.server.jsonrpc.registerPressKeyskWatcher(name, self.__selectors, args)
return _press
return Watcher()
@property
def press(self):
'''
press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode
'''
@param_to_property(
key=["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
)
def _press(key, meta=None):
if isinstance(key, int):
return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key)
else:
return self.server.jsonrpc.pressKey(str(key))
return _press
def wakeup(self):
'''turn on screen in case of screen off.'''
self.server.jsonrpc.wakeUp()
def sleep(self):
'''turn off screen in case of screen on.'''
self.server.jsonrpc.sleep()
@property
def screen(self):
'''
Turn on/off screen.
Usage:
d.screen.on()
d.screen.off()
d.screen == 'on' # Check if the screen is on, same as 'd.screenOn'
d.screen == 'off' # Check if the screen is off, same as 'not d.screenOn'
'''
devive_self = self
class _Screen(object):
def on(self):
return devive_self.wakeup()
def off(self):
return devive_self.sleep()
def __call__(self, action):
if action == "on":
return self.on()
elif action == "off":
return self.off()
else:
raise AttributeError("Invalid parameter: %s" % action)
def __eq__(self, value):
info = devive_self.info
if "screenOn" not in info:
raise EnvironmentError("Not supported on Android 4.3 and belows.")
if value in ["on", "On", "ON"]:
return info["screenOn"]
elif value in ["off", "Off", "OFF"]:
return not info["screenOn"]
raise ValueError("Invalid parameter. It can only be compared with on/off.")
def __ne__(self, value):
return not self.__eq__(value)
return _Screen()
@property
def wait(self):
'''
Waits for the current application to idle or window update event occurs.
Usage:
d.wait.idle(timeout=1000)
d.wait.update(timeout=1000, package_name="com.android.settings")
'''
@param_to_property(action=["idle", "update"])
def _wait(action, timeout=1000, package_name=None):
if timeout / 1000 + 5 > int(os.environ.get("JSONRPC_TIMEOUT", 90)):
http_timeout = timeout / 1000 + 5
else:
http_timeout = int(os.environ.get("JSONRPC_TIMEOUT", 90))
if action == "idle":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForIdle(timeout)
elif action == "update":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForWindowUpdate(package_name, timeout)
return _wait
def exists(self, **kwargs):
'''Check if the specified ui object by kwargs exists.'''
return self(**kwargs).exists
Device = AutomatorDevice
class AutomatorDeviceUiObject(object):
'''Represent a UiObject, on which user can perform actions, such as click, set text
'''
__alias = {'description': "contentDescription"}
def __init__(self, device, selector):
self.device = device
self.jsonrpc = device.server.jsonrpc
self.selector = selector
@property
def exists(self):
'''check if the object exists in current window.'''
return self.jsonrpc.exist(self.selector)
def __getattr__(self, attr):
'''alias of fields in info property.'''
info = self.info
if attr in info:
return info[attr]
elif attr in self.__alias:
return info[self.__alias[attr]]
else:
raise AttributeError("%s attribute not found!" % attr)
@property
def info(self):
'''ui object info.'''
return self.jsonrpc.objInfo(self.selector)
def set_text(self, text):
'''set the text field.'''
if text in [None, ""]:
return self.jsonrpc.clearTextField(self.selector) # TODO no return
else:
return self.jsonrpc.setText(self.selector, text)
def clear_text(self):
'''clear text. alias for set_text(None).'''
self.set_text(None)
@property
def click(self):
'''
click on the ui object.
Usage:
d(text="Clock").click() # click on the center of the ui object
d(text="OK").click.wait(timeout=3000) # click and wait for the new window update
d(text="John").click.topleft() # click on the topleft of the ui object
d(text="John").click.bottomright() # click on the bottomright of the ui object
'''
@param_to_property(action=["tl", "topleft", "br", "bottomright", "wait"])
def _click(action=None, timeout=3000):
if action is None:
return self.jsonrpc.click(self.selector)
elif action in ["tl", "topleft", "br", "bottomright"]:
return self.jsonrpc.click(self.selector, action)
else:
return self.jsonrpc.clickAndWaitForNewWindow(self.selector, timeout)
return _click
@property
def long_click(self):
'''
Perform a long click action on the object.
Usage:
d(text="Image").long_click() # long click on the center of the ui object
d(text="Image").long_click.topleft() # long click on the topleft of the ui object
d(text="Image").long_click.bottomright() # long click on the topleft of the ui object
'''
@param_to_property(corner=["tl", "topleft", "br", "bottomright"])
def _long_click(corner=None):
info = self.info
if info["longClickable"]:
if corner:
return self.jsonrpc.longClick(self.selector, corner)
else:
return self.jsonrpc.longClick(self.selector)
else:
bounds = info.get("visibleBounds") or info.get("bounds")
if corner in ["tl", "topleft"]:
x = (5 * bounds["left"] + bounds["right"]) / 6
y = (5 * bounds["top"] + bounds["bottom"]) / 6
elif corner in ["br", "bottomright"]:
x = (bounds["left"] + 5 * bounds["right"]) / 6
y = (bounds["top"] + 5 * bounds["bottom"]) / 6
else:
x = (bounds["left"] + bounds["right"]) / 2
y = (bounds["top"] + bounds["bottom"]) / 2
return self.device.long_click(x, y)
return _long_click
@property
def drag(self):
'''
Drag the ui object to other point or ui object.
Usage:
d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y)
d(text="Clock").drag.to(text="Remove") # drag to another object
'''
def to(obj, *args, **kwargs):
if len(args) >= 2 or "x" in kwargs or "y" in kwargs:
drag_to = lambda x, y, steps=100: self.jsonrpc.dragTo(self.selector, x, y, steps)
else:
drag_to = lambda steps=100, **kwargs: self.jsonrpc.dragTo(self.selector, Selector(**kwargs), steps)
return drag_to(*args, **kwargs)
return type("Drag", (object,), {"to": to})()
def gesture(self, start1, start2, *args, **kwargs):
'''
perform two point gesture.
Usage:
d().gesture(startPoint1, startPoint2).to(endPoint1, endPoint2, steps)
d().gesture(startPoint1, startPoint2, endPoint1, endPoint2, steps)
'''
def to(obj_self, end1, end2, steps=100):
ctp = lambda pt: point(*pt) if type(pt) == tuple else pt # convert tuple to point
s1, s2, e1, e2 = ctp(start1), ctp(start2), ctp(end1), ctp(end2)
return self.jsonrpc.gesture(self.selector, s1, s2, e1, e2, steps)
obj = type("Gesture", (object,), {"to": to})()
return obj if len(args) == 0 else to(None, *args, **kwargs)
def gestureM(self, start1, start2, start3, *args, **kwargs):
'''
perform 3 point gesture.
Usage:
d().gestureM((100,200),(100,300),(100,400),(100,400),(100,400),(100,400))
d().gestureM((100,200),(100,300),(100,400)).to((100,400),(100,400),(100,400))
'''
def to(obj_self, end1, end2, end3, steps=100):
ctp = lambda pt: point(*pt) if type(pt) == tuple else pt # convert tuple to point
s1, s2, s3, e1, e2, e3 = ctp(start1), ctp(start2), ctp(start3), ctp(end1), ctp(end2), ctp(end3)
return self.jsonrpc.gesture(self.selector, s1, s2, s3, e1, e2, e3, steps)
obj = type("Gesture", (object,), {"to": to})()
return obj if len(args) == 0 else to(None, *args, **kwargs)
@property
def pinch(self):
'''
Perform two point gesture from edge to center(in) or center to edge(out).
Usages:
d().pinch.In(percent=100, steps=10)
d().pinch.Out(percent=100, steps=100)
'''
@param_to_property(in_or_out=["In", "Out"])
def _pinch(in_or_out="Out", percent=100, steps=50):
if in_or_out in ["Out", "out"]:
return self.jsonrpc.pinchOut(self.selector, percent, steps)
elif in_or_out in ["In", "in"]:
return self.jsonrpc.pinchIn(self.selector, percent, steps)
return _pinch
@property
def swipe(self):
'''
Perform swipe action. if device platform greater than API 18, percent can be used and value between 0 and 1
Usages:
d().swipe.right()
d().swipe.left(steps=10)
d().swipe.up(steps=10)
d().swipe.down()
d().swipe("right", steps=20)
d().swipe("right", steps=20, percent=0.5)
'''
@param_to_property(direction=["up", "down", "right", "left"])
def _swipe(direction="left", steps=10, percent=1):
if percent == 1:
return self.jsonrpc.swipe(self.selector, direction, steps)
else:
return self.jsonrpc.swipe(self.selector, direction, percent, steps)
return _swipe
@property
def wait(self):
'''
Wait until the ui object gone or exist.
Usage:
d(text="Clock").wait.gone() # wait until it's gone.
d(text="Settings").wait.exists() # wait until it appears.
'''
@param_to_property(action=["exists", "gone"])
def _wait(action, timeout=3000):
if timeout / 1000 + 5 > int(os.environ.get("JSONRPC_TIMEOUT", 90)):
http_timeout = timeout / 1000 + 5
else:
http_timeout = int(os.environ.get("JSONRPC_TIMEOUT", 90))
method = self.device.server.jsonrpc_wrap(
timeout=http_timeout
).waitUntilGone if action == "gone" else self.device.server.jsonrpc_wrap(timeout=http_timeout).waitForExists
return method(self.selector, timeout)
return _wait
class AutomatorDeviceNamedUiObject(AutomatorDeviceUiObject):
def __init__(self, device, name):
super(AutomatorDeviceNamedUiObject, self).__init__(device, name)
def child(self, **kwargs):
return AutomatorDeviceNamedUiObject(
self.device,
self.jsonrpc.getChild(self.selector, Selector(**kwargs))
)
def sibling(self, **kwargs):
return AutomatorDeviceNamedUiObject(
self.device,
self.jsonrpc.getFromParent(self.selector, Selector(**kwargs))
)
class AutomatorDeviceObject(AutomatorDeviceUiObject):
'''Represent a generic UiObject/UiScrollable/UiCollection,
on which user can perform actions, such as click, set text
'''
def __init__(self, device, selector):
super(AutomatorDeviceObject, self).__init__(device, selector)
def child(self, **kwargs):
'''set childSelector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().child(**kwargs)
)
def sibling(self, **kwargs):
'''set fromParent selector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().sibling(**kwargs)
)
child_selector, from_parent = child, sibling
def child_by_text(self, txt, **kwargs):
if "allow_scroll_search" in kwargs:
allow_scroll_search = kwargs.pop("allow_scroll_search")
name = self.jsonrpc.childByText(
self.selector,
Selector(**kwargs),
txt,
allow_scroll_search
)
else:
name = self.jsonrpc.childByText(
self.selector,
Selector(**kwargs),
txt
)
return AutomatorDeviceNamedUiObject(self.device, name)
def child_by_description(self, txt, **kwargs):
if "allow_scroll_search" in kwargs:
allow_scroll_search = kwargs.pop("allow_scroll_search")
name = self.jsonrpc.childByDescription(
self.selector,
Selector(**kwargs),
txt,
allow_scroll_search
)
else:
name = self.jsonrpc.childByDescription(
self.selector,
Selector(**kwargs),
txt
)
return AutomatorDeviceNamedUiObject(self.device, name)
def child_by_instance(self, inst, **kwargs):
return AutomatorDeviceNamedUiObject(
self.device,
self.jsonrpc.childByInstance(self.selector, Selector(**kwargs), inst)
)
@property
def count(self):
return self.jsonrpc.count(self.selector)
def __len__(self):
return self.count
def __getitem__(self, index):
count = self.count
if index >= count:
raise IndexError()
elif count == 1:
return self
else:
selector = self.selector.clone()
selector["instance"] = index
return AutomatorDeviceObject(self.device, selector)
def __iter__(self):
obj, length = self, self.count
class Iter(object):
def __init__(self):
self.index = -1
def next(self):
self.index += 1
if self.index < length:
return obj[self.index]
else:
raise StopIteration()
__next__ = next
return Iter()
def right(self, **kwargs):
def onrightof(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect2["left"] - rect1["right"] if top < bottom else -1
return self.__view_beside(onrightof, **kwargs)
def left(self, **kwargs):
def onleftof(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect1["left"] - rect2["right"] if top < bottom else -1
return self.__view_beside(onleftof, **kwargs)
def up(self, **kwargs):
def above(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect1["top"] - rect2["bottom"] if left < right else -1
return self.__view_beside(above, **kwargs)
def down(self, **kwargs):
def under(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect2["top"] - rect1["bottom"] if left < right else -1
return self.__view_beside(under, **kwargs)
def __view_beside(self, onsideof, **kwargs):
bounds = self.info["bounds"]
min_dist, found = -1, None
for ui in AutomatorDeviceObject(self.device, Selector(**kwargs)):
dist = onsideof(bounds, ui.info["bounds"])
if dist >= 0 and (min_dist < 0 or dist < min_dist):
min_dist, found = dist, ui
return found
@property
def fling(self):
'''
Perform fling action.
Usage:
d().fling() # default vertically, forward
d().fling.horiz.forward()
d().fling.vert.backward()
d().fling.toBeginning(max_swipes=100) # vertically
d().fling.horiz.toEnd()
'''
@param_to_property(
dimention=["vert", "vertically", "vertical", "horiz", "horizental", "horizentally"],
action=["forward", "backward", "toBeginning", "toEnd"]
)
def _fling(dimention="vert", action="forward", max_swipes=1000):
vertical = dimention in ["vert", "vertically", "vertical"]
if action == "forward":
return self.jsonrpc.flingForward(self.selector, vertical)
elif action == "backward":
return self.jsonrpc.flingBackward(self.selector, vertical)
elif action == "toBeginning":
return self.jsonrpc.flingToBeginning(self.selector, vertical, max_swipes)
elif action == "toEnd":
return self.jsonrpc.flingToEnd(self.selector, vertical, max_swipes)
return _fling
@property
def scroll(self):
'''
Perfrom scroll action.
Usage:
d().scroll(steps=50) # default vertically and forward
d().scroll.horiz.forward(steps=100)
d().scroll.vert.backward(steps=100)
d().scroll.horiz.toBeginning(steps=100, max_swipes=100)
d().scroll.vert.toEnd(steps=100)
d().scroll.horiz.to(text="Clock")
'''
def __scroll(vertical, forward, steps=100):
method = self.jsonrpc.scrollForward if forward else self.jsonrpc.scrollBackward
return method(self.selector, vertical, steps)
def __scroll_to_beginning(vertical, steps=100, max_swipes=1000):
return self.jsonrpc.scrollToBeginning(self.selector, vertical, max_swipes, steps)
def __scroll_to_end(vertical, steps=100, max_swipes=1000):
return self.jsonrpc.scrollToEnd(self.selector, vertical, max_swipes, steps)
def __scroll_to(vertical, **kwargs):
return self.jsonrpc.scrollTo(self.selector, Selector(**kwargs), vertical)
@param_to_property(
dimention=["vert", "vertically", "vertical", "horiz", "horizental", "horizentally"],
action=["forward", "backward", "toBeginning", "toEnd", "to"])
def _scroll(dimention="vert", action="forward", **kwargs):
vertical = dimention in ["vert", "vertically", "vertical"]
if action in ["forward", "backward"]:
return __scroll(vertical, action == "forward", **kwargs)
elif action == "toBeginning":
return __scroll_to_beginning(vertical, **kwargs)
elif action == "toEnd":
return __scroll_to_end(vertical, **kwargs)
elif action == "to":
return __scroll_to(vertical, **kwargs)
return _scroll
device = AutomatorDevice()
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os.path
import boto3.session
import botocore.exceptions
import freezegun
import pretend
import pytest
import redis
from zope.interface.verify import verifyClass
from warehouse.packaging.interfaces import IDownloadStatService, IFileStorage
from warehouse.packaging.services import (
RedisDownloadStatService, LocalFileStorage, S3FileStorage,
)
@freezegun.freeze_time("2012-01-14")
class TestRedisDownloadStatService:
def test_verify_service(self):
assert verifyClass(IDownloadStatService, RedisDownloadStatService)
def test_creates_redis(self, monkeypatch):
redis_obj = pretend.stub()
redis_cls = pretend.stub(
from_url=pretend.call_recorder(lambda u: redis_obj),
)
monkeypatch.setattr(redis, "StrictRedis", redis_cls)
url = pretend.stub()
svc = RedisDownloadStatService(url)
assert svc.redis is redis_obj
assert redis_cls.from_url.calls == [pretend.call(url)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_daily_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = (
["downloads:hour:12-01-14-00:foo"] +
[
"downloads:hour:12-01-13-{:02d}:foo".format(i)
for i in reversed(range(24))
] +
["downloads:hour:12-01-12-23:foo"]
)
assert svc.get_daily_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_weekly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i + 7)
for i in reversed(range(8))
]
assert svc.get_weekly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
@pytest.mark.parametrize(
("keys", "result"),
[
([], 0),
([5, 7, 8], 20),
]
)
def test_get_monthly_stats(self, keys, result):
svc = RedisDownloadStatService("")
svc.redis = pretend.stub(mget=pretend.call_recorder(lambda *a: keys))
call_keys = [
"downloads:daily:12-01-{:02d}:foo".format(i)
for i in reversed(range(1, 15))
] + [
"downloads:daily:11-12-{:02d}:foo".format(i + 15)
for i in reversed(range(17))
]
assert svc.get_monthly_stats("foo") == result
assert svc.redis.mget.calls == [pretend.call(*call_keys)]
class TestLocalFileStorage:
def test_verify_service(self):
assert verifyClass(IFileStorage, LocalFileStorage)
def test_basic_init(self):
storage = LocalFileStorage("/foo/bar/")
assert storage.base == "/foo/bar/"
def test_create_service(self):
request = pretend.stub(
registry=pretend.stub(
settings={"files.path": "/the/one/two/"},
),
)
storage = LocalFileStorage.create_service(None, request)
assert storage.base == "/the/one/two/"
def test_gets_file(self, tmpdir):
with open(str(tmpdir.join("file.txt")), "wb") as fp:
fp.write(b"my test file contents")
storage = LocalFileStorage(str(tmpdir))
file_object = storage.get("file.txt")
assert file_object.read() == b"my test file contents"
def test_raises_when_file_non_existant(self, tmpdir):
storage = LocalFileStorage(str(tmpdir))
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
def test_stores_file(self, tmpdir):
storage = LocalFileStorage(str(tmpdir))
storage.store("foo/bar.txt", io.BytesIO(b"Test File!"))
with open(os.path.join(str(tmpdir), "foo/bar.txt"), "rb") as fp:
assert fp.read() == b"Test File!"
def test_stores_two_files(self, tmpdir):
storage = LocalFileStorage(str(tmpdir))
storage.store("foo/first.txt", io.BytesIO(b"First Test File!"))
storage.store("foo/second.txt", io.BytesIO(b"Second Test File!"))
with open(os.path.join(str(tmpdir), "foo/first.txt"), "rb") as fp:
assert fp.read() == b"First Test File!"
with open(os.path.join(str(tmpdir), "foo/second.txt"), "rb") as fp:
assert fp.read() == b"Second Test File!"
class TestS3FileStorage:
def test_verify_service(self):
assert verifyClass(IFileStorage, S3FileStorage)
def test_basic_init(self):
bucket = pretend.stub()
storage = S3FileStorage(bucket)
assert storage.bucket is bucket
def test_create_service(self):
session = boto3.session.Session()
request = pretend.stub(
find_service=pretend.call_recorder(lambda name: session),
registry=pretend.stub(settings={"files.bucket": "froblob"}),
)
storage = S3FileStorage.create_service(None, request)
assert request.find_service.calls == [pretend.call(name="aws.session")]
assert storage.bucket.name == "froblob"
def test_gets_file(self):
s3key = pretend.stub(get=lambda: {"Body": io.BytesIO(b"my contents")})
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
file_object = storage.get("file.txt")
assert file_object.read() == b"my contents"
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_raises_when_key_non_existant(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "NoSuchKey", "Message": "No Key!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: s3key))
storage = S3FileStorage(bucket)
with pytest.raises(FileNotFoundError):
storage.get("file.txt")
assert bucket.Object.calls == [pretend.call("file.txt")]
def test_passes_up_error_when_not_no_such_key(self):
def raiser():
raise botocore.exceptions.ClientError(
{"Error": {"Code": "SomeOtherError", "Message": "Who Knows!"}},
"some operation",
)
s3key = pretend.stub(get=raiser)
bucket = pretend.stub(Object=lambda path: s3key)
storage = S3FileStorage(bucket)
with pytest.raises(botocore.exceptions.ClientError):
storage.get("file.txt")
def test_stores_file(self):
obj = pretend.stub(put=pretend.call_recorder(lambda Body: None))
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: obj))
storage = S3FileStorage(bucket)
storage.store("foo/bar.txt", io.BytesIO(b"Test File!"))
assert bucket.Object.calls == [pretend.call("foo/bar.txt")]
assert obj.put.calls == [pretend.call(Body=b"Test File!")]
def test_stores_two_files(self):
obj = pretend.stub(put=pretend.call_recorder(lambda Body: None))
bucket = pretend.stub(Object=pretend.call_recorder(lambda path: obj))
storage = S3FileStorage(bucket)
storage.store("foo/first.txt", io.BytesIO(b"First Test File!"))
storage.store("foo/second.txt", io.BytesIO(b"Second Test File!"))
assert bucket.Object.calls == [
pretend.call("foo/first.txt"),
pretend.call("foo/second.txt"),
]
assert obj.put.calls == [
pretend.call(Body=b"First Test File!"),
pretend.call(Body=b"Second Test File!"),
]
|
|
#!/usr/bin/env python
"""
Sentry
======
Sentry is a realtime event logging and aggregation platform. It specializes
in monitoring errors and extracting all the information needed to do a proper
post-mortem without any of the hassle of the standard user feedback loop.
Sentry is a Server
------------------
The Sentry package, at its core, is just a simple server and web UI. It will
handle authentication clients (such as `Raven <https://github.com/getsentry/raven-python>`_)
and all of the logic behind storage and aggregation.
That said, Sentry is not limited to Python. The primary implementation is in
Python, but it contains a full API for sending events from any language, in
any application.
:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import json
import os.path
from distutils import log
from distutils.core import Command
from setuptools.command.install import install
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
from setuptools import setup, find_packages
from subprocess import check_output
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__)))
dev_requires = [
'flake8>=2.0,<2.1',
]
tests_require = [
'blist', # used by cassandra
'casscache',
'cqlsh',
'elasticsearch',
'httpretty',
'pytest-cov>=1.4',
'pytest-timeout',
'python-coveralls',
'responses',
'riak',
]
install_requires = [
'BeautifulSoup>=3.2.1,<3.3.0',
'celery>=3.1.8,<3.2.0',
'cssutils>=0.9.9,<0.10.0',
'Django>=1.6.0,<1.7',
'django-bitfield>=1.7.0,<1.8.0',
'django-crispy-forms>=1.4.0,<1.5.0',
'django-paging>=0.2.5,<0.3.0',
'django-jsonfield>=0.9.13,<0.9.14',
'django-picklefield>=0.3.0,<0.4.0',
'django-recaptcha>=1.0.0,<1.1.0',
'django-social-auth>=0.7.28,<0.8.0',
'django-statsd-mozilla>=0.3.14.0,<0.3.15.0',
'django-sudo>=1.1.3,<1.2.0',
'django-templatetag-sugar>=0.1.0',
'djangorestframework>=2.3.8,<2.4.0',
'email-reply-parser>=0.2.0,<0.3.0',
'enum34>=0.9.18,<0.10.0',
'exam>=0.5.1',
'gunicorn>=19.2.1,<20.0.0',
'ipaddr>=2.1.11,<2.2.0',
'logan>=0.7.1,<0.8.0',
'lxml>=3.4.1',
'mock>=0.8.0',
'nydus>=0.11.0,<0.12.0',
'markdown>=2.4.1,<2.5.0',
'petname>=1.7,<1.8',
'progressbar>=2.2,<2.4',
'pytest',
'pytest-django',
'python-dateutil>=2.0.0,<3.0.0',
'python-memcached>=1.53,<2.0.0',
'raven>=5.3.0',
'redis>=2.7.0,<2.11.0',
'requests[security]>=2.7.0,<2.8.0',
'simplejson>=3.1.0,<3.4.0',
'six>=1.6.0,<2.0.0',
'setproctitle>=1.1.7,<1.2.0',
'statsd>=3.1.0,<3.2.0',
'South==1.0.1',
'toronado>=0.0.4,<0.1.0',
'ua-parser>=0.3.5',
'urllib3>=1.7.1,<1.8.0',
]
postgres_requires = [
'psycopg2>=2.5.0,<2.6.0',
]
postgres_pypy_requires = [
'psycopg2cffi',
]
mysql_requires = [
'MySQL-python>=1.2.0,<1.3.0',
]
class DevelopWithBuildStatic(develop):
def install_for_development(self):
self.run_command('build_static')
return develop.install_for_development(self)
class SdistWithBuildStatic(sdist):
def make_release_tree(self, *a, **kw):
dist_path = self.distribution.get_fullname()
sdist.make_release_tree(self, *a, **kw)
self.reinitialize_command('build_static', work_path=dist_path)
self.run_command('build_static')
with open(os.path.join(dist_path, 'sentry-package.json'), 'w') as fp:
json.dump({
'createdAt': datetime.datetime.utcnow().isoformat() + 'Z',
}, fp)
class BuildStatic(Command):
user_options = [
('work-path=', 'w',
"The working directory for source files. Defaults to ."),
]
def initialize_options(self):
self.work_path = None
def finalize_options(self):
if self.work_path is None:
self.work_path = ROOT
def run(self):
work_path = self.work_path
log.info("initializing git submodules")
check_output(['git', 'submodule', 'init'], cwd=work_path)
check_output(['git', 'submodule', 'update'], cwd=work_path)
log.info("running [npm install --quiet]")
check_output(['npm', 'install', '--quiet'], cwd=work_path)
log.info("running [gulp dist]")
check_output([os.path.join('node_modules', '.bin', 'gulp'), 'dist'],
cwd=work_path)
class SmartInstall(install):
"""
Installs Sentry into the Python environment.
If the package indicator is missing, this will also force a run of
`build_static` which is required for JavaScript assets and other things.
"""
def _needs_static(self):
return not os.path.exists(os.path.join(ROOT, 'sentry-package.json'))
def run(self):
if self._needs_static():
self.run_command('build_static')
install.run(self)
setup(
name='sentry',
version='7.8.0.dev0',
author='David Cramer',
author_email='[email protected]',
url='https://www.getsentry.com',
description='A realtime logging and aggregation server.',
long_description=open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
extras_require={
'tests': tests_require,
'dev': dev_requires,
'postgres': install_requires + postgres_requires,
'postgres_pypy': install_requires + postgres_pypy_requires,
'mysql': install_requires + mysql_requires,
},
cmdclass={
'build_static': BuildStatic,
'develop': DevelopWithBuildStatic,
'sdist': SdistWithBuildStatic,
'install': SmartInstall,
},
license='BSD',
include_package_data=True,
entry_points={
'console_scripts': [
'sentry = sentry.utils.runner:main',
],
},
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
|
|
#!/usr/bin/python
import binascii, re, json, copy, sys
from bitcoin.main import *
from _functools import reduce
### Hex to bin converter and vice versa for objects
def json_is_base(obj, base):
if not is_python2 and isinstance(obj, bytes):
return False
alpha = get_code_string(base)
if isinstance(obj, string_types):
for i in range(len(obj)):
if alpha.find(obj[i]) == -1:
return False
return True
elif isinstance(obj, int_types) or obj is None:
return True
elif isinstance(obj, list):
for i in range(len(obj)):
if not json_is_base(obj[i], base):
return False
return True
else:
for x in obj:
if not json_is_base(obj[x], base):
return False
return True
def json_changebase(obj, changer):
if isinstance(obj, string_or_bytes_types):
return changer(obj)
elif isinstance(obj, int_types) or obj is None:
return obj
elif isinstance(obj, list):
return [json_changebase(x, changer) for x in obj]
return dict((x, json_changebase(obj[x], changer)) for x in obj)
# Transaction serialization and deserialization
def deserialize(tx):
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
#tx = bytes(bytearray.fromhex(tx))
return json_changebase(deserialize(binascii.unhexlify(tx)),
lambda x: safe_hexlify(x))
# http://stackoverflow.com/questions/4851463/python-closure-write-to-variable-in-parent-scope
# Python's scoping rules are demented, requiring me to make pos an object
# so that it is call-by-reference
pos = [0]
def read_as_int(bytez):
pos[0] += bytez
return decode(tx[pos[0]-bytez:pos[0]][::-1], 256)
def read_var_int():
pos[0] += 1
val = from_byte_to_int(tx[pos[0]-1])
if val < 253:
return val
return read_as_int(pow(2, val - 252))
def read_bytes(bytez):
pos[0] += bytez
return tx[pos[0]-bytez:pos[0]]
def read_var_string():
size = read_var_int()
return read_bytes(size)
obj = {"ins": [], "outs": []}
obj["version"] = read_as_int(4)
ins = read_var_int()
for i in range(ins):
obj["ins"].append({
"outpoint": {
"hash": read_bytes(32)[::-1],
"index": read_as_int(4)
},
"script": read_var_string(),
"sequence": read_as_int(4)
})
outs = read_var_int()
for i in range(outs):
obj["outs"].append({
"value": read_as_int(8),
"script": read_var_string()
})
obj["locktime"] = read_as_int(4)
return obj
def serialize(txobj):
#if isinstance(txobj, bytes):
# txobj = bytes_to_hex_string(txobj)
o = []
if json_is_base(txobj, 16):
json_changedbase = json_changebase(txobj, lambda x: binascii.unhexlify(x))
hexlified = safe_hexlify(serialize(json_changedbase))
return hexlified
o.append(encode(txobj["version"], 256, 4)[::-1])
o.append(num_to_var_int(len(txobj["ins"])))
for inp in txobj["ins"]:
o.append(inp["outpoint"]["hash"][::-1])
o.append(encode(inp["outpoint"]["index"], 256, 4)[::-1])
o.append(num_to_var_int(len(inp["script"]))+(inp["script"] if inp["script"] or is_python2 else bytes()))
o.append(encode(inp["sequence"], 256, 4)[::-1])
o.append(num_to_var_int(len(txobj["outs"])))
for out in txobj["outs"]:
o.append(encode(out["value"], 256, 8)[::-1])
o.append(num_to_var_int(len(out["script"]))+out["script"])
o.append(encode(txobj["locktime"], 256, 4)[::-1])
return ''.join(o) if is_python2 else reduce(lambda x,y: x+y, o, bytes())
# Hashing transactions for signing
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
# this works like SIGHASH_ANYONECANPAY | SIGHASH_ALL, might as well make it explicit while
# we fix the constant
SIGHASH_ANYONECANPAY = 0x81
def signature_form(tx, i, script, hashcode=SIGHASH_ALL):
i, hashcode = int(i), int(hashcode)
if isinstance(tx, string_or_bytes_types):
return serialize(signature_form(deserialize(tx), i, script, hashcode))
newtx = copy.deepcopy(tx)
for inp in newtx["ins"]:
inp["script"] = ""
newtx["ins"][i]["script"] = script
if hashcode == SIGHASH_NONE:
newtx["outs"] = []
elif hashcode == SIGHASH_SINGLE:
newtx["outs"] = newtx["outs"][:len(newtx["ins"])]
for out in newtx["outs'][:len(newtx["ins"]) - 1)]:
out['value'] = 2**64 - 1
out['script'] = ""
elif hashcode == SIGHASH_ANYONECANPAY:
newtx["ins"] = [newtx["ins"][i]]
else:
pass
return newtx
# Making the actual signatures
def der_encode_sig(v, r, s):
b1, b2 = safe_hexlify(encode(r, 256)), safe_hexlify(encode(s, 256))
if len(b1) and b1[0] in '89abcdef':
b1 = '00' + b1
if len(b2) and b2[0] in '89abcdef':
b2 = '00' + b2
left = '02'+encode(len(b1)//2, 16, 2)+b1
right = '02'+encode(len(b2)//2, 16, 2)+b2
return '30'+encode(len(left+right)//2, 16, 2)+left+right
def der_decode_sig(sig):
leftlen = decode(sig[6:8], 16)*2
left = sig[8:8+leftlen]
rightlen = decode(sig[10+leftlen:12+leftlen], 16)*2
right = sig[12+leftlen:12+leftlen+rightlen]
return (None, decode(left, 16), decode(right, 16))
def is_bip66(sig):
"""Checks hex DER sig for BIP66 consistency"""
#https://raw.githubusercontent.com/bitcoin/bips/master/bip-0066.mediawiki
#0x30 [total-len] 0x02 [R-len] [R] 0x02 [S-len] [S] [sighash]
sig = bytearray.fromhex(sig) if re.match('^[0-9a-fA-F]*$', sig) else bytearray(sig)
if (sig[0] == 0x30) and (sig[1] == len(sig)-2): # check if sighash is missing
sig.extend(b"\1") # add SIGHASH_ALL for testing
#assert (sig[-1] & 124 == 0) and (not not sig[-1]), "Bad SIGHASH value"
if len(sig) < 9 or len(sig) > 73: return False
if (sig[0] != 0x30): return False
if (sig[1] != len(sig)-3): return False
rlen = sig[3]
if (5+rlen >= len(sig)): return False
slen = sig[5+rlen]
if (rlen + slen + 7 != len(sig)): return False
if (sig[2] != 0x02): return False
if (rlen == 0): return False
if (sig[4] & 0x80): return False
if (rlen > 1 and (sig[4] == 0x00) and not (sig[5] & 0x80)): return False
if (sig[4+rlen] != 0x02): return False
if (slen == 0): return False
if (sig[rlen+6] & 0x80): return False
if (slen > 1 and (sig[6+rlen] == 0x00) and not (sig[7+rlen] & 0x80)):
return False
return True
def txhash(tx, hashcode=None):
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
tx = changebase(tx, 16, 256)
if hashcode:
return dbl_sha256(from_string_to_bytes(tx) + encode(int(hashcode), 256, 4)[::-1])
else:
return safe_hexlify(bin_dbl_sha256(tx)[::-1])
def bin_txhash(tx, hashcode=None):
return binascii.unhexlify(txhash(tx, hashcode))
def ecdsa_tx_sign(tx, priv, hashcode=SIGHASH_ALL):
rawsig = ecdsa_raw_sign(bin_txhash(tx, hashcode), priv)
return der_encode_sig(*rawsig)+encode(hashcode, 16, 2)
def ecdsa_tx_verify(tx, sig, pub, hashcode=SIGHASH_ALL):
return ecdsa_raw_verify(bin_txhash(tx, hashcode), der_decode_sig(sig), pub)
def ecdsa_tx_recover(tx, sig, hashcode=SIGHASH_ALL):
z = bin_txhash(tx, hashcode)
_, r, s = der_decode_sig(sig)
left = ecdsa_raw_recover(z, (0, r, s))
right = ecdsa_raw_recover(z, (1, r, s))
return (encode_pubkey(left, 'hex'), encode_pubkey(right, 'hex'))
# Scripts
def mk_pubkey_script(addr):
# Keep the auxiliary functions around for altcoins' sake
return '76a914' + b58check_to_hex(addr) + '88ac'
def mk_scripthash_script(addr):
return 'a914' + b58check_to_hex(addr) + '87'
# Address representation to output script
def address_to_script(addr):
if addr[0] == '3' or addr[0] == '2':
return mk_scripthash_script(addr)
else:
return mk_pubkey_script(addr)
# Output script to address representation
def script_to_address(script, vbyte=0):
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
if script[:3] == b'\x76\xa9\x14' and script[-2:] == b'\x88\xac' and len(script) == 25:
return bin_to_b58check(script[3:-2], vbyte) # pubkey hash addresses
else:
if vbyte in [111, 196]:
# Testnet
scripthash_byte = 196
else:
scripthash_byte = vbyte
# BIP0016 scripthash addresses
return bin_to_b58check(script[2:-1], scripthash_byte)
def p2sh_scriptaddr(script, magicbyte=5):
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
return hex_to_b58check(hash160(script), magicbyte)
scriptaddr = p2sh_scriptaddr
def deserialize_script(script):
if isinstance(script, str) and re.match('^[0-9a-fA-F]*$', script):
return json_changebase(deserialize_script(binascii.unhexlify(script)),
lambda x: safe_hexlify(x))
out, pos = [], 0
while pos < len(script):
code = from_byte_to_int(script[pos])
if code == 0:
out.append(None)
pos += 1
elif code <= 75:
out.append(script[pos+1:pos+1+code])
pos += 1 + code
elif code <= 78:
szsz = pow(2, code - 76)
sz = decode(script[pos+szsz: pos:-1], 256)
out.append(script[pos + 1 + szsz:pos + 1 + szsz + sz])
pos += 1 + szsz + sz
elif code <= 96:
out.append(code - 80)
pos += 1
else:
out.append(code)
pos += 1
return out
def serialize_script_unit(unit):
if isinstance(unit, int):
if unit < 16:
return from_int_to_byte(unit + 80)
else:
return from_int_to_byte(unit)
elif unit is None:
return b'\x00'
else:
if len(unit) <= 75:
return from_int_to_byte(len(unit))+unit
elif len(unit) < 256:
return from_int_to_byte(76)+from_int_to_byte(len(unit))+unit
elif len(unit) < 65536:
return from_int_to_byte(77)+encode(len(unit), 256, 2)[::-1]+unit
else:
return from_int_to_byte(78)+encode(len(unit), 256, 4)[::-1]+unit
if is_python2:
def serialize_script(script):
if json_is_base(script, 16):
return binascii.hexlify(serialize_script(json_changebase(script,
lambda x: binascii.unhexlify(x))))
return ''.join(map(serialize_script_unit, script))
else:
def serialize_script(script):
if json_is_base(script, 16):
return safe_hexlify(serialize_script(json_changebase(script,
lambda x: binascii.unhexlify(x))))
result = bytes()
for b in map(serialize_script_unit, script):
result += b if isinstance(b, bytes) else bytes(b, 'utf-8')
return result
def mk_multisig_script(*args): # [pubs],k or pub1,pub2...pub[n],k
if isinstance(args[0], list):
pubs, k = args[0], int(args[1])
else:
pubs = list(filter(lambda x: len(str(x)) >= 32, args))
k = int(args[len(pubs)])
return serialize_script([k]+pubs+[len(pubs)]+[0xae])
# Signing and verifying
def verify_tx_input(tx, i, script, sig, pub):
if re.match('^[0-9a-fA-F]*$', tx):
tx = binascii.unhexlify(tx)
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
if not re.match('^[0-9a-fA-F]*$', sig):
sig = safe_hexlify(sig)
hashcode = decode(sig[-2:], 16)
modtx = signature_form(tx, int(i), script, hashcode)
return ecdsa_tx_verify(modtx, sig, pub, hashcode)
def sign(tx, i, priv, hashcode=SIGHASH_ALL):
i = int(i)
if (not is_python2 and isinstance(re, bytes)) or not re.match('^[0-9a-fA-F]*$', tx):
return binascii.unhexlify(sign(safe_hexlify(tx), i, priv))
if len(priv) <= 33:
priv = safe_hexlify(priv)
pub = privkey_to_pubkey(priv)
address = pubkey_to_address(pub)
signing_tx = signature_form(tx, i, mk_pubkey_script(address), hashcode)
sig = ecdsa_tx_sign(signing_tx, priv, hashcode)
txobj = deserialize(tx)
txobj["ins"][i]["script"] = serialize_script([sig, pub])
return serialize(txobj)
def signall(tx, priv):
# if priv is a dictionary, assume format is
# { 'txinhash:txinidx' : privkey }
if isinstance(priv, dict):
for e, i in enumerate(deserialize(tx)["ins"]):
k = priv["%s:%d" % (i["outpoint"]["hash"], i["outpoint"]["index"])]
tx = sign(tx, e, k)
else:
for i in range(len(deserialize(tx)["ins"])):
tx = sign(tx, i, priv)
return tx
def multisign(tx, i, script, pk, hashcode=SIGHASH_ALL):
if re.match('^[0-9a-fA-F]*$', tx):
tx = binascii.unhexlify(tx)
if re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
modtx = signature_form(tx, i, script, hashcode)
return ecdsa_tx_sign(modtx, pk, hashcode)
def apply_multisignatures(*args):
# tx,i,script,sigs OR tx,i,script,sig1,sig2...,sig[n]
tx, i, script = args[0], int(args[1]), args[2]
sigs = args[3] if isinstance(args[3], list) else list(args[3:])
if isinstance(script, str) and re.match('^[0-9a-fA-F]*$', script):
script = binascii.unhexlify(script)
sigs = [binascii.unhexlify(x) if x[:2] == '30' else x for x in sigs]
if isinstance(tx, str) and re.match('^[0-9a-fA-F]*$', tx):
return safe_hexlify(apply_multisignatures(binascii.unhexlify(tx), i, script, sigs))
# Not pushing empty elements on the top of the stack if passing no
# script (in case of bare multisig inputs there is no script)
script_blob = [] if script.__len__() == 0 else [script]
txobj = deserialize(tx)
txobj["ins"][i]["script"] = serialize_script([None]+sigs+script_blob)
return serialize(txobj)
def is_inp(arg):
return len(arg) > 64 or "output" in arg or "outpoint" in arg
def mktx(*args):
# [in0, in1...],[out0, out1...] or in0, in1 ... out0 out1 ...
ins, outs = [], []
for arg in args:
if isinstance(arg, list):
for a in arg: (ins if is_inp(a) else outs).append(a)
else:
(ins if is_inp(arg) else outs).append(arg)
txobj = {"locktime": 0, "version": 1, "ins": [], "outs": []}
for i in ins:
if isinstance(i, dict) and "outpoint" in i:
txobj["ins"].append(i)
else:
if isinstance(i, dict) and "output" in i:
i = i["output"]
txobj["ins"].append({
"outpoint": {"hash": i[:64], "index": int(i[65:])},
"script": "",
"sequence": 4294967295
})
for o in outs:
if isinstance(o, string_or_bytes_types):
addr = o[:o.find(':')]
val = int(o[o.find(':')+1:])
o = {}
if re.match('^[0-9a-fA-F]*$', addr):
o["script"] = addr
else:
o["address"] = addr
o["value"] = val
outobj = {}
if "address" in o:
outobj["script"] = address_to_script(o["address"])
elif "script" in o:
outobj["script"] = o["script"]
else:
raise Exception("Could not find 'address' or 'script' in output.")
outobj["value"] = o["value"]
txobj["outs"].append(outobj)
return serialize(txobj)
def select(unspent, value):
value = int(value)
high = [u for u in unspent if u["value"] >= value]
high.sort(key=lambda u: u["value"])
low = [u for u in unspent if u["value"] < value]
low.sort(key=lambda u: -u["value"])
if len(high):
return [high[0]]
i, tv = 0, 0
while tv < value and i < len(low):
tv += low[i]["value"]
i += 1
if tv < value:
raise Exception("Not enough funds")
return low[:i]
# Only takes inputs of the form { "output": blah, "value": foo }
def mksend(*args):
argz, change, fee = args[:-2], args[-2], int(args[-1])
ins, outs = [], []
for arg in argz:
if isinstance(arg, list):
for a in arg:
(ins if is_inp(a) else outs).append(a)
else:
(ins if is_inp(arg) else outs).append(arg)
isum = sum([i["value"] for i in ins])
osum, outputs2 = 0, []
for o in outs:
if isinstance(o, string_types):
o2 = {
"address": o[:o.find(':')],
"value": int(o[o.find(':')+1:])
}
else:
o2 = o
outputs2.append(o2)
osum += o2["value"]
if isum < osum+fee:
raise Exception("Not enough money")
elif isum > osum+fee+5430:
outputs2 += [{"address": change, "value": isum-osum-fee}]
return mktx(ins, outputs2)
|
|
import logging
import math
import os
import secrets
from datetime import datetime, timedelta
from decimal import Decimal
from functools import wraps
from typing import Any, Callable, Dict, Generator, Optional, Tuple, TypeVar, Union, cast
import orjson
import stripe
from django.conf import settings
from django.core.signing import Signer
from django.db import transaction
from django.urls import reverse
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from django.utils.translation import override as override_language
from corporate.models import (
Customer,
CustomerPlan,
LicenseLedger,
get_current_plan_by_customer,
get_current_plan_by_realm,
get_customer_by_realm,
)
from zerver.lib.exceptions import JsonableError
from zerver.lib.logging_util import log_to_file
from zerver.lib.send_email import FromAddress, send_email_to_billing_admins_and_realm_owners
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.utils import assert_is_not_none
from zerver.models import Realm, RealmAuditLog, UserProfile, get_system_bot
from zilencer.models import RemoteZulipServer, RemoteZulipServerAuditLog
from zproject.config import get_secret
stripe.api_key = get_secret("stripe_secret_key")
BILLING_LOG_PATH = os.path.join(
"/var/log/zulip" if not settings.DEVELOPMENT else settings.DEVELOPMENT_LOG_DIRECTORY,
"billing.log",
)
billing_logger = logging.getLogger("corporate.stripe")
log_to_file(billing_logger, BILLING_LOG_PATH)
log_to_file(logging.getLogger("stripe"), BILLING_LOG_PATH)
CallableT = TypeVar("CallableT", bound=Callable[..., object])
MIN_INVOICED_LICENSES = 30
MAX_INVOICED_LICENSES = 1000
DEFAULT_INVOICE_DAYS_UNTIL_DUE = 30
# The version of Stripe API the billing system supports.
STRIPE_API_VERSION = "2020-08-27"
def get_latest_seat_count(realm: Realm) -> int:
non_guests = (
UserProfile.objects.filter(realm=realm, is_active=True, is_bot=False)
.exclude(role=UserProfile.ROLE_GUEST)
.count()
)
guests = UserProfile.objects.filter(
realm=realm, is_active=True, is_bot=False, role=UserProfile.ROLE_GUEST
).count()
return max(non_guests, math.ceil(guests / 5))
def sign_string(string: str) -> Tuple[str, str]:
salt = secrets.token_hex(32)
signer = Signer(salt=salt)
return signer.sign(string), salt
def unsign_string(signed_string: str, salt: str) -> str:
signer = Signer(salt=salt)
return signer.unsign(signed_string)
def validate_licenses(charge_automatically: bool, licenses: Optional[int], seat_count: int) -> None:
min_licenses = seat_count
max_licenses = None
if not charge_automatically:
min_licenses = max(seat_count, MIN_INVOICED_LICENSES)
max_licenses = MAX_INVOICED_LICENSES
if licenses is None or licenses < min_licenses:
raise BillingError(
"not enough licenses", _("You must invoice for at least {} users.").format(min_licenses)
)
if max_licenses is not None and licenses > max_licenses:
message = _(
"Invoices with more than {} licenses can't be processed from this page. To complete "
"the upgrade, please contact {}."
).format(max_licenses, settings.ZULIP_ADMINISTRATOR)
raise BillingError("too many licenses", message)
# Be extremely careful changing this function. Historical billing periods
# are not stored anywhere, and are just computed on the fly using this
# function. Any change you make here should return the same value (or be
# within a few seconds) for basically any value from when the billing system
# went online to within a year from now.
def add_months(dt: datetime, months: int) -> datetime:
assert months >= 0
# It's fine that the max day in Feb is 28 for leap years.
MAX_DAY_FOR_MONTH = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31,
}
year = dt.year
month = dt.month + months
while month > 12:
year += 1
month -= 12
day = min(dt.day, MAX_DAY_FOR_MONTH[month])
# datetimes don't support leap seconds, so don't need to worry about those
return dt.replace(year=year, month=month, day=day)
def next_month(billing_cycle_anchor: datetime, dt: datetime) -> datetime:
estimated_months = round((dt - billing_cycle_anchor).days * 12.0 / 365)
for months in range(max(estimated_months - 1, 0), estimated_months + 2):
proposed_next_month = add_months(billing_cycle_anchor, months)
if 20 < (proposed_next_month - dt).days < 40:
return proposed_next_month
raise AssertionError(
"Something wrong in next_month calculation with "
f"billing_cycle_anchor: {billing_cycle_anchor}, dt: {dt}"
)
def start_of_next_billing_cycle(plan: CustomerPlan, event_time: datetime) -> datetime:
months_per_period = {
CustomerPlan.ANNUAL: 12,
CustomerPlan.MONTHLY: 1,
}[plan.billing_schedule]
periods = 1
dt = plan.billing_cycle_anchor
while dt <= event_time:
dt = add_months(plan.billing_cycle_anchor, months_per_period * periods)
periods += 1
return dt
def next_invoice_date(plan: CustomerPlan) -> Optional[datetime]:
if plan.status == CustomerPlan.ENDED:
return None
assert plan.next_invoice_date is not None # for mypy
months_per_period = {
CustomerPlan.ANNUAL: 12,
CustomerPlan.MONTHLY: 1,
}[plan.billing_schedule]
if plan.automanage_licenses:
months_per_period = 1
periods = 1
dt = plan.billing_cycle_anchor
while dt <= plan.next_invoice_date:
dt = add_months(plan.billing_cycle_anchor, months_per_period * periods)
periods += 1
return dt
def renewal_amount(plan: CustomerPlan, event_time: datetime) -> int: # nocoverage: TODO
if plan.fixed_price is not None:
return plan.fixed_price
new_plan, last_ledger_entry = make_end_of_cycle_updates_if_needed(plan, event_time)
if last_ledger_entry is None:
return 0
if last_ledger_entry.licenses_at_next_renewal is None:
return 0
if new_plan is not None:
plan = new_plan
assert plan.price_per_license is not None # for mypy
return plan.price_per_license * last_ledger_entry.licenses_at_next_renewal
def get_idempotency_key(ledger_entry: LicenseLedger) -> Optional[str]:
if settings.TEST_SUITE:
return None
return f"ledger_entry:{ledger_entry.id}" # nocoverage
def cents_to_dollar_string(cents: int) -> str:
return f"{cents / 100.:,.2f}"
class BillingError(JsonableError):
data_fields = ["error_description"]
# error messages
CONTACT_SUPPORT = gettext_lazy("Something went wrong. Please contact {email}.")
TRY_RELOADING = gettext_lazy("Something went wrong. Please reload the page.")
# description is used only for tests
def __init__(self, description: str, message: Optional[str] = None) -> None:
self.error_description = description
if message is None:
message = BillingError.CONTACT_SUPPORT.format(email=settings.ZULIP_ADMINISTRATOR)
super().__init__(message)
class LicenseLimitError(Exception):
pass
class StripeCardError(BillingError):
pass
class StripeConnectionError(BillingError):
pass
class UpgradeWithExistingPlanError(BillingError):
def __init__(self) -> None:
super().__init__(
"subscribing with existing subscription",
"The organization is already subscribed to a plan. Please reload the billing page.",
)
class InvalidBillingSchedule(Exception):
def __init__(self, billing_schedule: int) -> None:
self.message = f"Unknown billing_schedule: {billing_schedule}"
super().__init__(self.message)
class InvalidTier(Exception):
def __init__(self, tier: int) -> None:
self.message = f"Unknown tier: {tier}"
super().__init__(self.message)
def catch_stripe_errors(func: CallableT) -> CallableT:
@wraps(func)
def wrapped(*args: object, **kwargs: object) -> object:
try:
return func(*args, **kwargs)
# See https://stripe.com/docs/api/python#error_handling, though
# https://stripe.com/docs/api/ruby#error_handling suggests there are additional fields, and
# https://stripe.com/docs/error-codes gives a more detailed set of error codes
except stripe.error.StripeError as e:
err = e.json_body.get("error", {})
if isinstance(e, stripe.error.CardError):
billing_logger.info(
"Stripe card error: %s %s %s %s",
e.http_status,
err.get("type"),
err.get("code"),
err.get("param"),
)
# TODO: Look into i18n for this
raise StripeCardError("card error", err.get("message"))
billing_logger.error(
"Stripe error: %s %s %s %s",
e.http_status,
err.get("type"),
err.get("code"),
err.get("param"),
)
if isinstance(
e, (stripe.error.RateLimitError, stripe.error.APIConnectionError)
): # nocoverage TODO
raise StripeConnectionError(
"stripe connection error",
_("Something went wrong. Please wait a few seconds and try again."),
)
raise BillingError("other stripe error")
return cast(CallableT, wrapped)
@catch_stripe_errors
def stripe_get_customer(stripe_customer_id: str) -> stripe.Customer:
return stripe.Customer.retrieve(
stripe_customer_id, expand=["invoice_settings", "invoice_settings.default_payment_method"]
)
@catch_stripe_errors
def do_create_stripe_customer(user: UserProfile, payment_method: Optional[str] = None) -> Customer:
realm = user.realm
# We could do a better job of handling race conditions here, but if two
# people from a realm try to upgrade at exactly the same time, the main
# bad thing that will happen is that we will create an extra stripe
# customer that we can delete or ignore.
stripe_customer = stripe.Customer.create(
description=f"{realm.string_id} ({realm.name})",
email=user.delivery_email,
metadata={"realm_id": realm.id, "realm_str": realm.string_id},
payment_method=payment_method,
)
stripe.Customer.modify(
stripe_customer.id, invoice_settings={"default_payment_method": payment_method}
)
event_time = timestamp_to_datetime(stripe_customer.created)
with transaction.atomic():
RealmAuditLog.objects.create(
realm=user.realm,
acting_user=user,
event_type=RealmAuditLog.STRIPE_CUSTOMER_CREATED,
event_time=event_time,
)
if payment_method is not None:
RealmAuditLog.objects.create(
realm=user.realm,
acting_user=user,
event_type=RealmAuditLog.STRIPE_CARD_CHANGED,
event_time=event_time,
)
customer, created = Customer.objects.update_or_create(
realm=realm, defaults={"stripe_customer_id": stripe_customer.id}
)
from zerver.lib.actions import do_make_user_billing_admin
do_make_user_billing_admin(user)
return customer
@catch_stripe_errors
def do_replace_payment_method(
user: UserProfile, payment_method: str, pay_invoices: bool = False
) -> None:
customer = get_customer_by_realm(user.realm)
assert customer is not None # for mypy
assert customer.stripe_customer_id is not None # for mypy
stripe.Customer.modify(
customer.stripe_customer_id, invoice_settings={"default_payment_method": payment_method}
)
RealmAuditLog.objects.create(
realm=user.realm,
acting_user=user,
event_type=RealmAuditLog.STRIPE_CARD_CHANGED,
event_time=timezone_now(),
)
if pay_invoices:
for stripe_invoice in stripe.Invoice.list(
collection_method="charge_automatically",
customer=customer.stripe_customer_id,
status="open",
):
# The user will get either a receipt or a "failed payment" email, but the in-app
# messaging could be clearer here (e.g. it could explicitly tell the user that there
# were payment(s) and that they succeeded or failed).
# Worth fixing if we notice that a lot of cards end up failing at this step.
stripe.Invoice.pay(stripe_invoice)
def stripe_customer_has_credit_card_as_default_payment_method(
stripe_customer: stripe.Customer,
) -> bool:
if not stripe_customer.invoice_settings.default_payment_method:
return False
return stripe_customer.invoice_settings.default_payment_method.type == "card"
def customer_has_credit_card_as_default_payment_method(customer: Customer) -> bool:
if not customer.stripe_customer_id:
return False
stripe_customer = stripe_get_customer(customer.stripe_customer_id)
return stripe_customer_has_credit_card_as_default_payment_method(stripe_customer)
# event_time should roughly be timezone_now(). Not designed to handle
# event_times in the past or future
@transaction.atomic
def make_end_of_cycle_updates_if_needed(
plan: CustomerPlan, event_time: datetime
) -> Tuple[Optional[CustomerPlan], Optional[LicenseLedger]]:
last_ledger_entry = LicenseLedger.objects.filter(plan=plan).order_by("-id").first()
last_ledger_renewal = (
LicenseLedger.objects.filter(plan=plan, is_renewal=True).order_by("-id").first()
)
assert last_ledger_renewal is not None
last_renewal = last_ledger_renewal.event_time
if plan.is_free_trial() or plan.status == CustomerPlan.SWITCH_NOW_FROM_STANDARD_TO_PLUS:
assert plan.next_invoice_date is not None
next_billing_cycle = plan.next_invoice_date
else:
next_billing_cycle = start_of_next_billing_cycle(plan, last_renewal)
if next_billing_cycle <= event_time and last_ledger_entry is not None:
licenses_at_next_renewal = last_ledger_entry.licenses_at_next_renewal
assert licenses_at_next_renewal is not None
if plan.status == CustomerPlan.ACTIVE:
return None, LicenseLedger.objects.create(
plan=plan,
is_renewal=True,
event_time=next_billing_cycle,
licenses=licenses_at_next_renewal,
licenses_at_next_renewal=licenses_at_next_renewal,
)
if plan.is_free_trial():
plan.invoiced_through = last_ledger_entry
plan.billing_cycle_anchor = next_billing_cycle.replace(microsecond=0)
plan.status = CustomerPlan.ACTIVE
plan.save(update_fields=["invoiced_through", "billing_cycle_anchor", "status"])
return None, LicenseLedger.objects.create(
plan=plan,
is_renewal=True,
event_time=next_billing_cycle,
licenses=licenses_at_next_renewal,
licenses_at_next_renewal=licenses_at_next_renewal,
)
if plan.status == CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE:
if plan.fixed_price is not None: # nocoverage
raise NotImplementedError("Can't switch fixed priced monthly plan to annual.")
plan.status = CustomerPlan.ENDED
plan.save(update_fields=["status"])
discount = plan.customer.default_discount or plan.discount
_, _, _, price_per_license = compute_plan_parameters(
tier=plan.tier,
automanage_licenses=plan.automanage_licenses,
billing_schedule=CustomerPlan.ANNUAL,
discount=plan.discount,
)
new_plan = CustomerPlan.objects.create(
customer=plan.customer,
billing_schedule=CustomerPlan.ANNUAL,
automanage_licenses=plan.automanage_licenses,
charge_automatically=plan.charge_automatically,
price_per_license=price_per_license,
discount=discount,
billing_cycle_anchor=next_billing_cycle,
tier=plan.tier,
status=CustomerPlan.ACTIVE,
next_invoice_date=next_billing_cycle,
invoiced_through=None,
invoicing_status=CustomerPlan.INITIAL_INVOICE_TO_BE_SENT,
)
new_plan_ledger_entry = LicenseLedger.objects.create(
plan=new_plan,
is_renewal=True,
event_time=next_billing_cycle,
licenses=licenses_at_next_renewal,
licenses_at_next_renewal=licenses_at_next_renewal,
)
realm = new_plan.customer.realm
assert realm is not None
RealmAuditLog.objects.create(
realm=realm,
event_time=event_time,
event_type=RealmAuditLog.CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN,
extra_data=orjson.dumps(
{
"monthly_plan_id": plan.id,
"annual_plan_id": new_plan.id,
}
).decode(),
)
return new_plan, new_plan_ledger_entry
if plan.status == CustomerPlan.SWITCH_NOW_FROM_STANDARD_TO_PLUS:
standard_plan = plan
standard_plan.end_date = next_billing_cycle
standard_plan.status = CustomerPlan.ENDED
standard_plan.save(update_fields=["status", "end_date"])
(_, _, _, plus_plan_price_per_license) = compute_plan_parameters(
CustomerPlan.PLUS,
standard_plan.automanage_licenses,
standard_plan.billing_schedule,
standard_plan.customer.default_discount,
)
plus_plan_billing_cycle_anchor = standard_plan.end_date.replace(microsecond=0)
plus_plan = CustomerPlan.objects.create(
customer=standard_plan.customer,
status=CustomerPlan.ACTIVE,
automanage_licenses=standard_plan.automanage_licenses,
charge_automatically=standard_plan.charge_automatically,
price_per_license=plus_plan_price_per_license,
discount=standard_plan.customer.default_discount,
billing_schedule=standard_plan.billing_schedule,
tier=CustomerPlan.PLUS,
billing_cycle_anchor=plus_plan_billing_cycle_anchor,
invoicing_status=CustomerPlan.INITIAL_INVOICE_TO_BE_SENT,
next_invoice_date=plus_plan_billing_cycle_anchor,
)
standard_plan_last_ledger = (
LicenseLedger.objects.filter(plan=standard_plan).order_by("id").last()
)
licenses_for_plus_plan = standard_plan_last_ledger.licenses_at_next_renewal
plus_plan_ledger_entry = LicenseLedger.objects.create(
plan=plus_plan,
is_renewal=True,
event_time=plus_plan_billing_cycle_anchor,
licenses=licenses_for_plus_plan,
licenses_at_next_renewal=licenses_for_plus_plan,
)
return plus_plan, plus_plan_ledger_entry
if plan.status == CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE:
process_downgrade(plan)
return None, None
return None, last_ledger_entry
# Returns Customer instead of stripe_customer so that we don't make a Stripe
# API call if there's nothing to update
@catch_stripe_errors
def update_or_create_stripe_customer(
user: UserProfile, payment_method: Optional[str] = None
) -> Customer:
realm = user.realm
customer = get_customer_by_realm(realm)
if customer is None or customer.stripe_customer_id is None:
return do_create_stripe_customer(user, payment_method=payment_method)
if payment_method is not None:
do_replace_payment_method(user, payment_method, True)
return customer
def calculate_discounted_price_per_license(
original_price_per_license: int, discount: Decimal
) -> int:
# There are no fractional cents in Stripe, so round down to nearest integer.
return int(float(original_price_per_license * (1 - discount / 100)) + 0.00001)
def get_price_per_license(
tier: int, billing_schedule: int, discount: Optional[Decimal] = None
) -> int:
price_per_license: Optional[int] = None
if tier == CustomerPlan.STANDARD:
if billing_schedule == CustomerPlan.ANNUAL:
price_per_license = 8000
elif billing_schedule == CustomerPlan.MONTHLY:
price_per_license = 800
else: # nocoverage
raise InvalidBillingSchedule(billing_schedule)
elif tier == CustomerPlan.PLUS:
if billing_schedule == CustomerPlan.ANNUAL:
price_per_license = 16000
elif billing_schedule == CustomerPlan.MONTHLY:
price_per_license = 1600
else: # nocoverage
raise InvalidBillingSchedule(billing_schedule)
else:
raise InvalidTier(tier)
if discount is not None:
price_per_license = calculate_discounted_price_per_license(price_per_license, discount)
return price_per_license
def compute_plan_parameters(
tier: int,
automanage_licenses: bool,
billing_schedule: int,
discount: Optional[Decimal],
free_trial: bool = False,
) -> Tuple[datetime, datetime, datetime, int]:
# Everything in Stripe is stored as timestamps with 1 second resolution,
# so standardize on 1 second resolution.
# TODO talk about leapseconds?
billing_cycle_anchor = timezone_now().replace(microsecond=0)
if billing_schedule == CustomerPlan.ANNUAL:
period_end = add_months(billing_cycle_anchor, 12)
elif billing_schedule == CustomerPlan.MONTHLY:
period_end = add_months(billing_cycle_anchor, 1)
else: # nocoverage
raise InvalidBillingSchedule(billing_schedule)
price_per_license = get_price_per_license(tier, billing_schedule, discount)
next_invoice_date = period_end
if automanage_licenses:
next_invoice_date = add_months(billing_cycle_anchor, 1)
if free_trial:
period_end = billing_cycle_anchor + timedelta(
days=assert_is_not_none(settings.FREE_TRIAL_DAYS)
)
next_invoice_date = period_end
return billing_cycle_anchor, next_invoice_date, period_end, price_per_license
def decimal_to_float(obj: object) -> object:
if isinstance(obj, Decimal):
return float(obj)
raise TypeError # nocoverage
def is_free_trial_offer_enabled() -> bool:
return settings.FREE_TRIAL_DAYS not in (None, 0)
def ensure_realm_does_not_have_active_plan(realm: Customer) -> None:
if get_current_plan_by_realm(realm) is not None:
# Unlikely race condition from two people upgrading (clicking "Make payment")
# at exactly the same time. Doesn't fully resolve the race condition, but having
# a check here reduces the likelihood.
billing_logger.warning(
"Upgrade of %s failed because of existing active plan.",
realm.string_id,
)
raise UpgradeWithExistingPlanError()
@transaction.atomic
def do_change_remote_server_plan_type(remote_server: RemoteZulipServer, plan_type: int) -> None:
old_value = remote_server.plan_type
remote_server.plan_type = plan_type
remote_server.save(update_fields=["plan_type"])
RemoteZulipServerAuditLog.objects.create(
event_type=RealmAuditLog.REMOTE_SERVER_PLAN_TYPE_CHANGED,
server=remote_server,
event_time=timezone_now(),
extra_data={"old_value": old_value, "new_value": plan_type},
)
@transaction.atomic
def do_deactivate_remote_server(remote_server: RemoteZulipServer) -> None:
if remote_server.deactivated:
billing_logger.warning(
f"Cannot deactivate remote server with ID {remote_server.id}, "
"server has already been deactivated."
)
return
remote_server.deactivated = True
remote_server.save(update_fields=["deactivated"])
RemoteZulipServerAuditLog.objects.create(
event_type=RealmAuditLog.REMOTE_SERVER_DEACTIVATED,
server=remote_server,
event_time=timezone_now(),
)
# Only used for cloud signups
@catch_stripe_errors
def process_initial_upgrade(
user: UserProfile,
licenses: int,
automanage_licenses: bool,
billing_schedule: int,
charge_automatically: bool,
free_trial: bool,
) -> None:
realm = user.realm
customer = update_or_create_stripe_customer(user)
assert customer.stripe_customer_id is not None # for mypy
assert customer.realm is not None
ensure_realm_does_not_have_active_plan(customer.realm)
(
billing_cycle_anchor,
next_invoice_date,
period_end,
price_per_license,
) = compute_plan_parameters(
CustomerPlan.STANDARD,
automanage_licenses,
billing_schedule,
customer.default_discount,
free_trial,
)
# TODO: The correctness of this relies on user creation, deactivation, etc being
# in a transaction.atomic() with the relevant RealmAuditLog entries
with transaction.atomic():
# billed_licenses can greater than licenses if users are added between the start of
# this function (process_initial_upgrade) and now
billed_licenses = max(get_latest_seat_count(realm), licenses)
plan_params = {
"automanage_licenses": automanage_licenses,
"charge_automatically": charge_automatically,
"price_per_license": price_per_license,
"discount": customer.default_discount,
"billing_cycle_anchor": billing_cycle_anchor,
"billing_schedule": billing_schedule,
"tier": CustomerPlan.STANDARD,
}
if free_trial:
plan_params["status"] = CustomerPlan.FREE_TRIAL
plan = CustomerPlan.objects.create(
customer=customer, next_invoice_date=next_invoice_date, **plan_params
)
ledger_entry = LicenseLedger.objects.create(
plan=plan,
is_renewal=True,
event_time=billing_cycle_anchor,
licenses=billed_licenses,
licenses_at_next_renewal=billed_licenses,
)
plan.invoiced_through = ledger_entry
plan.save(update_fields=["invoiced_through"])
RealmAuditLog.objects.create(
realm=realm,
acting_user=user,
event_time=billing_cycle_anchor,
event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED,
extra_data=orjson.dumps(plan_params, default=decimal_to_float).decode(),
)
if not free_trial:
stripe.InvoiceItem.create(
currency="usd",
customer=customer.stripe_customer_id,
description="Zulip Standard",
discountable=False,
period={
"start": datetime_to_timestamp(billing_cycle_anchor),
"end": datetime_to_timestamp(period_end),
},
quantity=billed_licenses,
unit_amount=price_per_license,
)
if charge_automatically:
collection_method = "charge_automatically"
days_until_due = None
else:
collection_method = "send_invoice"
days_until_due = DEFAULT_INVOICE_DAYS_UNTIL_DUE
stripe_invoice = stripe.Invoice.create(
auto_advance=True,
collection_method=collection_method,
customer=customer.stripe_customer_id,
days_until_due=days_until_due,
statement_descriptor="Zulip Standard",
)
stripe.Invoice.finalize_invoice(stripe_invoice)
from zerver.lib.actions import do_change_realm_plan_type
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_STANDARD, acting_user=user)
def update_license_ledger_for_manual_plan(
plan: CustomerPlan,
event_time: datetime,
licenses: Optional[int] = None,
licenses_at_next_renewal: Optional[int] = None,
) -> None:
if licenses is not None:
assert plan.customer.realm is not None
assert get_latest_seat_count(plan.customer.realm) <= licenses
assert licenses > plan.licenses()
LicenseLedger.objects.create(
plan=plan, event_time=event_time, licenses=licenses, licenses_at_next_renewal=licenses
)
elif licenses_at_next_renewal is not None:
assert plan.customer.realm is not None
assert get_latest_seat_count(plan.customer.realm) <= licenses_at_next_renewal
LicenseLedger.objects.create(
plan=plan,
event_time=event_time,
licenses=plan.licenses(),
licenses_at_next_renewal=licenses_at_next_renewal,
)
else:
raise AssertionError("Pass licenses or licenses_at_next_renewal")
def update_license_ledger_for_automanaged_plan(
realm: Realm, plan: CustomerPlan, event_time: datetime
) -> None:
new_plan, last_ledger_entry = make_end_of_cycle_updates_if_needed(plan, event_time)
if last_ledger_entry is None:
return
if new_plan is not None:
plan = new_plan
licenses_at_next_renewal = get_latest_seat_count(realm)
licenses = max(licenses_at_next_renewal, last_ledger_entry.licenses)
LicenseLedger.objects.create(
plan=plan,
event_time=event_time,
licenses=licenses,
licenses_at_next_renewal=licenses_at_next_renewal,
)
def update_license_ledger_if_needed(realm: Realm, event_time: datetime) -> None:
plan = get_current_plan_by_realm(realm)
if plan is None:
return
if not plan.automanage_licenses:
return
update_license_ledger_for_automanaged_plan(realm, plan, event_time)
def get_plan_renewal_or_end_date(plan: CustomerPlan, event_time: datetime) -> datetime:
billing_period_end = start_of_next_billing_cycle(plan, event_time)
if plan.end_date is not None and plan.end_date < billing_period_end:
return plan.end_date
return billing_period_end
def invoice_plan(plan: CustomerPlan, event_time: datetime) -> None:
if plan.invoicing_status == CustomerPlan.STARTED:
raise NotImplementedError("Plan with invoicing_status==STARTED needs manual resolution.")
if not plan.customer.stripe_customer_id:
assert plan.customer.realm is not None
raise BillingError(
f"Realm {plan.customer.realm.string_id} has a paid plan without a Stripe customer."
)
make_end_of_cycle_updates_if_needed(plan, event_time)
if plan.invoicing_status == CustomerPlan.INITIAL_INVOICE_TO_BE_SENT:
invoiced_through_id = -1
licenses_base = None
else:
assert plan.invoiced_through is not None
licenses_base = plan.invoiced_through.licenses
invoiced_through_id = plan.invoiced_through.id
invoice_item_created = False
for ledger_entry in LicenseLedger.objects.filter(
plan=plan, id__gt=invoiced_through_id, event_time__lte=event_time
).order_by("id"):
price_args: Dict[str, int] = {}
if ledger_entry.is_renewal:
if plan.fixed_price is not None:
price_args = {"amount": plan.fixed_price}
else:
assert plan.price_per_license is not None # needed for mypy
price_args = {
"unit_amount": plan.price_per_license,
"quantity": ledger_entry.licenses,
}
description = f"{plan.name} - renewal"
elif licenses_base is not None and ledger_entry.licenses != licenses_base:
assert plan.price_per_license
last_ledger_entry_renewal = (
LicenseLedger.objects.filter(
plan=plan, is_renewal=True, event_time__lte=ledger_entry.event_time
)
.order_by("-id")
.first()
)
assert last_ledger_entry_renewal is not None
last_renewal = last_ledger_entry_renewal.event_time
billing_period_end = start_of_next_billing_cycle(plan, ledger_entry.event_time)
plan_renewal_or_end_date = get_plan_renewal_or_end_date(plan, ledger_entry.event_time)
proration_fraction = (plan_renewal_or_end_date - ledger_entry.event_time) / (
billing_period_end - last_renewal
)
price_args = {
"unit_amount": int(plan.price_per_license * proration_fraction + 0.5),
"quantity": ledger_entry.licenses - licenses_base,
}
description = "Additional license ({} - {})".format(
ledger_entry.event_time.strftime("%b %-d, %Y"),
plan_renewal_or_end_date.strftime("%b %-d, %Y"),
)
if price_args:
plan.invoiced_through = ledger_entry
plan.invoicing_status = CustomerPlan.STARTED
plan.save(update_fields=["invoicing_status", "invoiced_through"])
stripe.InvoiceItem.create(
currency="usd",
customer=plan.customer.stripe_customer_id,
description=description,
discountable=False,
period={
"start": datetime_to_timestamp(ledger_entry.event_time),
"end": datetime_to_timestamp(
get_plan_renewal_or_end_date(plan, ledger_entry.event_time)
),
},
idempotency_key=get_idempotency_key(ledger_entry),
**price_args,
)
invoice_item_created = True
plan.invoiced_through = ledger_entry
plan.invoicing_status = CustomerPlan.DONE
plan.save(update_fields=["invoicing_status", "invoiced_through"])
licenses_base = ledger_entry.licenses
if invoice_item_created:
if plan.charge_automatically:
collection_method = "charge_automatically"
days_until_due = None
else:
collection_method = "send_invoice"
days_until_due = DEFAULT_INVOICE_DAYS_UNTIL_DUE
stripe_invoice = stripe.Invoice.create(
auto_advance=True,
collection_method=collection_method,
customer=plan.customer.stripe_customer_id,
days_until_due=days_until_due,
statement_descriptor=plan.name,
)
stripe.Invoice.finalize_invoice(stripe_invoice)
plan.next_invoice_date = next_invoice_date(plan)
plan.save(update_fields=["next_invoice_date"])
def invoice_plans_as_needed(event_time: datetime = timezone_now()) -> None:
for plan in CustomerPlan.objects.filter(next_invoice_date__lte=event_time):
invoice_plan(plan, event_time)
def is_realm_on_free_trial(realm: Realm) -> bool:
plan = get_current_plan_by_realm(realm)
return plan is not None and plan.is_free_trial()
def attach_discount_to_realm(
realm: Realm, discount: Decimal, *, acting_user: Optional[UserProfile]
) -> None:
customer = get_customer_by_realm(realm)
old_discount: Optional[Decimal] = None
if customer is not None:
old_discount = customer.default_discount
customer.default_discount = discount
customer.save(update_fields=["default_discount"])
else:
Customer.objects.create(realm=realm, default_discount=discount)
plan = get_current_plan_by_realm(realm)
if plan is not None:
plan.price_per_license = get_price_per_license(plan.tier, plan.billing_schedule, discount)
plan.discount = discount
plan.save(update_fields=["price_per_license", "discount"])
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_DISCOUNT_CHANGED,
event_time=timezone_now(),
extra_data={"old_discount": old_discount, "new_discount": discount},
)
def update_sponsorship_status(
realm: Realm, sponsorship_pending: bool, *, acting_user: Optional[UserProfile]
) -> None:
customer, _ = Customer.objects.get_or_create(realm=realm)
customer.sponsorship_pending = sponsorship_pending
customer.save(update_fields=["sponsorship_pending"])
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SPONSORSHIP_PENDING_STATUS_CHANGED,
event_time=timezone_now(),
extra_data={
"sponsorship_pending": sponsorship_pending,
},
)
def approve_sponsorship(realm: Realm, *, acting_user: Optional[UserProfile]) -> None:
from zerver.lib.actions import do_change_realm_plan_type, internal_send_private_message
do_change_realm_plan_type(realm, Realm.PLAN_TYPE_STANDARD_FREE, acting_user=acting_user)
customer = get_customer_by_realm(realm)
if customer is not None and customer.sponsorship_pending:
customer.sponsorship_pending = False
customer.save(update_fields=["sponsorship_pending"])
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_SPONSORSHIP_APPROVED,
event_time=timezone_now(),
)
notification_bot = get_system_bot(settings.NOTIFICATION_BOT, realm.id)
for user in realm.get_human_billing_admin_and_realm_owner_users():
with override_language(user.default_language):
# Using variable to make life easier for translators if these details change.
plan_name = "Zulip Cloud Standard"
emoji = ":tada:"
message = _(
f"Your organization's request for sponsored hosting has been approved! {emoji}.\n"
f"You have been upgraded to {plan_name}, free of charge."
)
internal_send_private_message(notification_bot, user, message)
def is_sponsored_realm(realm: Realm) -> bool:
return realm.plan_type == Realm.PLAN_TYPE_STANDARD_FREE
def get_discount_for_realm(realm: Realm) -> Optional[Decimal]:
customer = get_customer_by_realm(realm)
if customer is not None:
return customer.default_discount
return None
def do_change_plan_status(plan: CustomerPlan, status: int) -> None:
plan.status = status
plan.save(update_fields=["status"])
billing_logger.info(
"Change plan status: Customer.id: %s, CustomerPlan.id: %s, status: %s",
plan.customer.id,
plan.id,
status,
)
def process_downgrade(plan: CustomerPlan) -> None:
from zerver.lib.actions import do_change_realm_plan_type
assert plan.customer.realm is not None
do_change_realm_plan_type(plan.customer.realm, Realm.PLAN_TYPE_LIMITED, acting_user=None)
plan.status = CustomerPlan.ENDED
plan.save(update_fields=["status"])
def estimate_annual_recurring_revenue_by_realm() -> Dict[str, int]: # nocoverage
annual_revenue = {}
for plan in CustomerPlan.objects.filter(status=CustomerPlan.ACTIVE).select_related(
"customer__realm"
):
# TODO: figure out what to do for plans that don't automatically
# renew, but which probably will renew
renewal_cents = renewal_amount(plan, timezone_now())
if plan.billing_schedule == CustomerPlan.MONTHLY:
renewal_cents *= 12
# TODO: Decimal stuff
annual_revenue[plan.customer.realm.string_id] = int(renewal_cents / 100)
return annual_revenue
def get_realms_to_default_discount_dict() -> Dict[str, Decimal]:
realms_to_default_discount: Dict[str, Any] = {}
customers = Customer.objects.exclude(default_discount=None).exclude(default_discount=0)
for customer in customers:
realms_to_default_discount[customer.realm.string_id] = assert_is_not_none(
customer.default_discount
)
return realms_to_default_discount
# During realm deactivation we instantly downgrade the plan to Limited.
# Extra users added in the final month are not charged. Also used
# for the cancellation of Free Trial.
def downgrade_now_without_creating_additional_invoices(realm: Realm) -> None:
plan = get_current_plan_by_realm(realm)
if plan is None:
return
process_downgrade(plan)
plan.invoiced_through = LicenseLedger.objects.filter(plan=plan).order_by("id").last()
plan.next_invoice_date = next_invoice_date(plan)
plan.save(update_fields=["invoiced_through", "next_invoice_date"])
def downgrade_at_the_end_of_billing_cycle(realm: Realm) -> None:
plan = get_current_plan_by_realm(realm)
assert plan is not None
do_change_plan_status(plan, CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE)
def get_all_invoices_for_customer(customer: Customer) -> Generator[stripe.Invoice, None, None]:
if customer.stripe_customer_id is None:
return
invoices = stripe.Invoice.list(customer=customer.stripe_customer_id, limit=100)
while len(invoices):
for invoice in invoices:
yield invoice
last_invoice = invoice
invoices = stripe.Invoice.list(
customer=customer.stripe_customer_id, starting_after=last_invoice, limit=100
)
def void_all_open_invoices(realm: Realm) -> int:
customer = get_customer_by_realm(realm)
if customer is None:
return 0
invoices = get_all_invoices_for_customer(customer)
voided_invoices_count = 0
for invoice in invoices:
if invoice.status == "open":
stripe.Invoice.void_invoice(invoice.id)
voided_invoices_count += 1
return voided_invoices_count
def customer_has_last_n_invoices_open(customer: Customer, n: int) -> bool:
if customer.stripe_customer_id is None: # nocoverage
return False
open_invoice_count = 0
for invoice in stripe.Invoice.list(customer=customer.stripe_customer_id, limit=n):
if invoice.status == "open":
open_invoice_count += 1
return open_invoice_count == n
def downgrade_small_realms_behind_on_payments_as_needed() -> None:
customers = Customer.objects.all().exclude(stripe_customer_id=None)
for customer in customers:
realm = customer.realm
# For larger realms, we generally want to talk to the customer
# before downgrading or cancelling invoices; so this logic only applies with 5.
if get_latest_seat_count(realm) >= 5:
continue
if get_current_plan_by_customer(customer) is not None:
# Only customers with last 2 invoices open should be downgraded.
if not customer_has_last_n_invoices_open(customer, 2):
continue
# We've now decided to downgrade this customer and void all invoices, and the below will execute this.
downgrade_now_without_creating_additional_invoices(realm)
void_all_open_invoices(realm)
context: Dict[str, Union[str, Realm]] = {
"upgrade_url": f"{realm.uri}{reverse('initial_upgrade')}",
"realm": realm,
}
send_email_to_billing_admins_and_realm_owners(
"zerver/emails/realm_auto_downgraded",
realm,
from_name=FromAddress.security_email_from_name(language=realm.default_language),
from_address=FromAddress.tokenized_no_reply_address(),
language=realm.default_language,
context=context,
)
else:
if customer_has_last_n_invoices_open(customer, 1):
void_all_open_invoices(realm)
def switch_realm_from_standard_to_plus_plan(realm: Realm) -> None:
standard_plan = get_current_plan_by_realm(realm)
if (
not standard_plan
or standard_plan.status != CustomerPlan.ACTIVE
or standard_plan.tier != CustomerPlan.STANDARD
):
raise BillingError("Organization does not have an active Standard plan")
if not standard_plan.customer.stripe_customer_id:
raise BillingError("Organization missing Stripe customer.")
plan_switch_time = timezone_now()
standard_plan.status = CustomerPlan.SWITCH_NOW_FROM_STANDARD_TO_PLUS
standard_plan.next_invoice_date = plan_switch_time
standard_plan.save(update_fields=["status", "next_invoice_date"])
standard_plan_next_renewal_date = start_of_next_billing_cycle(standard_plan, plan_switch_time)
standard_plan_last_renewal_ledger = (
LicenseLedger.objects.filter(is_renewal=True, plan=standard_plan).order_by("id").last()
)
standard_plan_last_renewal_amount = (
standard_plan_last_renewal_ledger.licenses * standard_plan.price_per_license
)
standard_plan_last_renewal_date = standard_plan_last_renewal_ledger.event_time
unused_proration_fraction = 1 - (plan_switch_time - standard_plan_last_renewal_date) / (
standard_plan_next_renewal_date - standard_plan_last_renewal_date
)
amount_to_credit_back_to_realm = math.ceil(
standard_plan_last_renewal_amount * unused_proration_fraction
)
stripe.Customer.create_balance_transaction(
standard_plan.customer.stripe_customer_id,
amount=-1 * amount_to_credit_back_to_realm,
currency="usd",
description="Credit from early termination of Standard plan",
)
invoice_plan(standard_plan, plan_switch_time)
plus_plan = get_current_plan_by_realm(realm)
assert plus_plan is not None # for mypy
invoice_plan(plus_plan, plan_switch_time)
def update_billing_method_of_current_plan(
realm: Realm, charge_automatically: bool, *, acting_user: Optional[UserProfile]
) -> None:
plan = get_current_plan_by_realm(realm)
if plan is not None:
plan.charge_automatically = charge_automatically
plan.save(update_fields=["charge_automatically"])
RealmAuditLog.objects.create(
realm=realm,
acting_user=acting_user,
event_type=RealmAuditLog.REALM_BILLING_METHOD_CHANGED,
event_time=timezone_now(),
extra_data={
"charge_automatically": charge_automatically,
},
)
|
|
# -*- coding: utf-8 -*-
#
# Random/_UserFriendlyRNG.py : A user-friendly random number generator
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
from Crypto.Util.python_compat import *
import os
import threading
import struct
import time
from math import floor
from Crypto.Random import OSRNG
from Crypto.Random.Fortuna import FortunaAccumulator
class _EntropySource(object):
def __init__(self, accumulator, src_num):
self._fortuna = accumulator
self._src_num = src_num
self._pool_num = 0
def feed(self, data):
self._fortuna.add_random_event(self._src_num, self._pool_num, data)
self._pool_num = (self._pool_num + 1) & 31
class _EntropyCollector(object):
def __init__(self, accumulator):
self._osrng = OSRNG.new()
self._osrng_es = _EntropySource(accumulator, 255)
self._time_es = _EntropySource(accumulator, 254)
self._clock_es = _EntropySource(accumulator, 253)
def reinit(self):
# Add 256 bits to each of the 32 pools, twice. (For a total of 16384
# bits collected from the operating system.)
for i in range(2):
block = self._osrng.read(32*32)
for p in range(32):
self._osrng_es.feed(block[p*32:(p+1)*32])
block = None
self._osrng.flush()
def collect(self):
# Collect 64 bits of entropy from the operating system and feed it to Fortuna.
self._osrng_es.feed(self._osrng.read(8))
# Add the fractional part of time.time()
t = time.time()
self._time_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
# Add the fractional part of time.clock()
t = time.clock()
self._clock_es.feed(struct.pack("@I", int(2**30 * (t - floor(t)))))
class _UserFriendlyRNG(object):
def __init__(self):
self.closed = False
self._fa = FortunaAccumulator.FortunaAccumulator()
self._ec = _EntropyCollector(self._fa)
self.reinit()
def reinit(self):
"""Initialize the random number generator and seed it with entropy from
the operating system.
"""
self._pid = os.getpid()
self._ec.reinit()
def close(self):
self.closed = True
self._osrng = None
self._fa = None
def flush(self):
pass
def read(self, N):
"""Return N bytes from the RNG."""
if self.closed:
raise ValueError("I/O operation on closed file")
if not isinstance(N, (long, int)):
raise TypeError("an integer is required")
if N < 0:
raise ValueError("cannot read to end of infinite stream")
# Collect some entropy and feed it to Fortuna
self._ec.collect()
# Ask Fortuna to generate some bytes
retval = self._fa.random_data(N)
# Check that we haven't forked in the meantime. (If we have, we don't
# want to use the data, because it might have been duplicated in the
# parent process.
self._check_pid()
# Return the random data.
return retval
def _check_pid(self):
# Lame fork detection to remind developers to invoke Random.atfork()
# after every call to os.fork(). Note that this check is not reliable,
# since process IDs can be reused on most operating systems.
#
# You need to do Random.atfork() in the child process after every call
# to os.fork() to avoid reusing PRNG state. If you want to avoid
# leaking PRNG state to child processes (for example, if you are using
# os.setuid()) then you should also invoke Random.atfork() in the
# *parent* process.
if os.getpid() != self._pid:
raise AssertionError("PID check failed. RNG must be re-initialized after fork(). Hint: Try Random.atfork()")
class _LockingUserFriendlyRNG(_UserFriendlyRNG):
def __init__(self):
self._lock = threading.Lock()
_UserFriendlyRNG.__init__(self)
def close(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.close(self)
finally:
self._lock.release()
def reinit(self):
self._lock.acquire()
try:
return _UserFriendlyRNG.reinit(self)
finally:
self._lock.release()
def read(self, bytes):
self._lock.acquire()
try:
return _UserFriendlyRNG.read(self, bytes)
finally:
self._lock.release()
class RNGFile(object):
def __init__(self, singleton):
self.closed = False
self._singleton = singleton
# PEP 343: Support for the "with" statement
def __enter__(self):
"""PEP 343 support"""
def __exit__(self):
"""PEP 343 support"""
self.close()
def close(self):
# Don't actually close the singleton, just close this RNGFile instance.
self.closed = True
self._singleton = None
def read(self, bytes):
if self.closed:
raise ValueError("I/O operation on closed file")
return self._singleton.read(bytes)
def flush(self):
if self.closed:
raise ValueError("I/O operation on closed file")
_singleton_lock = threading.Lock()
_singleton = None
def _get_singleton():
global _singleton
_singleton_lock.acquire()
try:
if _singleton is None:
_singleton = _LockingUserFriendlyRNG()
return _singleton
finally:
_singleton_lock.release()
def new():
return RNGFile(_get_singleton())
def reinit():
_get_singleton().reinit()
def get_random_bytes(n):
"""Return the specified number of cryptographically-strong random bytes."""
return _get_singleton().read(n)
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
from PyQt4.QtGui import (QDockWidget, QMenu, QAction, QMessageBox,
QItemSelectionRange, QItemSelection,
QStandardItemModel, QStandardItem,
QItemSelectionModel)
from PyQt4.QtCore import Qt, pyqtSignal
from spyderlib.utils.qthelpers import get_icon
import neo
from spykeutils.plugin.data_provider_neo import NeoDataProvider
import spykeutils.tools
from neo_navigation_ui import Ui_neoNavigationDock
from .. import api
class NeoNavigationDock(QDockWidget, Ui_neoNavigationDock):
""" Implements a navigation dock for Neo hierarchies. Tightly coupled
with :class:`main_window_neo.MainWindowNeo`, the main reason for this
class to exist is to keep the dock out of the general ui file.
"""
object_removed = pyqtSignal() # Signal to remove an object
def __init__(self, parent):
QDockWidget.__init__(self, parent)
self.parent = parent
self.setupUi(self)
self.block_model = QStandardItemModel()
self.segment_model = QStandardItemModel()
self.channelgroup_model = QStandardItemModel()
self.channel_model = QStandardItemModel()
self.unit_model = QStandardItemModel()
self.neoBlockList.setModel(self.block_model)
self.neoSegmentList.setModel(self.segment_model)
self.neoChannelGroupList.setModel(self.channelgroup_model)
self.neoChannelList.setModel(self.channel_model)
self.neoUnitList.setModel(self.unit_model)
self.neoBlockList.doubleClicked.connect(
lambda x: self._edit_item_annotations(x, self.block_model))
self.neoSegmentList.doubleClicked.connect(
lambda x: self._edit_item_annotations(x, self.segment_model))
self.neoChannelGroupList.doubleClicked.connect(
lambda x: self._edit_item_annotations(x, self.channelgroup_model))
self.neoChannelList.doubleClicked.connect(
lambda x: self._edit_item_annotations(x, self.channel_model))
self.neoUnitList.doubleClicked.connect(
lambda x: self._edit_item_annotations(x, self.unit_model))
self.neoBlockList.selectionModel().selectionChanged.connect(
self.selected_blocks_changed)
self.neoChannelGroupList.selectionModel().selectionChanged.connect(
self.selected_channel_groups_changed)
self.neoChannelList.selectionModel().selectionChanged.connect(
self.selected_channels_changed)
self.neoUnitList.selectionModel().selectionChanged.connect(
self.selected_units_changed)
self.neoSegmentList.selectionModel().selectionChanged.connect(
self.selected_segments_changed)
def clear(self):
""" Clear all lists
"""
self.neoBlockList.clearSelection()
self.block_model.clear()
def get_letter_id(self, id_, small=False):
""" Return a name consisting of letters given an integer
"""
if id_ < 0:
return ''
name = ''
id_ += 1
if small:
start = ord('a')
else:
start = ord('A')
while id_ > 26:
id_ -= 1
name += chr(start + (id_ % 26))
id_ /= 26
name += chr(start + id_ - 1)
return name[::-1]
def ensure_not_filtered(self, objects, all_objects, filters):
""" Deactivates all filters that prevent the the given sequence
of objects to be displayed. The passed filter tuple list is
modified to only include valid filters.
:param sequence objects: The objects that need to be visible.
:param sequence all_objects: The whole object list to be filtered,
including objects that are allowed to be hidden.
:param sequence filters: A sequence of (Filter, name) tuples.
"""
objset = set(objects)
if not objset.issubset(
set(self.parent.filter_list(all_objects, filters))):
i = 1
while i <= len(filters):
test_filters = filters[:i]
if objset.issubset(set(self.parent.filter_list(
all_objects, test_filters))):
i += 1
else:
test_filters[-1][0].active = False
filters.pop(i - 1)
for o in objects:
i = 0
while i < len(filters):
if self.parent.is_filtered(o, [filters[i]]):
filters[i][0].active = False
filters.pop(i)
else:
i += 1
def filter_ordered(self, objects, filters):
""" Filter a sequence of objects with a sequence of filters. Apply
the filters in the order given by the sequence. Return the filtered
list.
"""
for f in filters:
if f[0].combined:
objects = self.parent.filter_list(objects, [f])
else:
objects = [o for o in objects
if not self.parent.is_filtered(o, [f])]
return objects
def populate_neo_block_list(self):
""" Fill the block list with appropriate entries.
Qt.UserRole: The :class:`neo.Block` object
"""
self.block_model.clear()
filters = self.parent.get_active_filters('Block')
blocks = self.filter_ordered(
self.parent.block_names.keys(), filters)
for b in blocks:
item = QStandardItem(self.parent.block_names[b])
item.setData(b, Qt.UserRole)
self.block_model.appendRow(item)
self.neoBlockList.setCurrentIndex(self.block_model.index(0, 0))
self.set_blocks_label()
if not blocks:
self.selected_blocks_changed()
def populate_neo_segment_list(self):
""" Fill the segment list with appropriate entries.
Qt.UserRole: The :class:`neo.Segment` object
"""
self.segment_model.clear()
segments = []
for b in self.blocks():
segments.extend(b.segments)
filters = self.parent.get_active_filters('Segment')
segments = self.filter_ordered(segments, filters)
for i, s in enumerate(segments):
if s.name:
name = s.name + ' (%s-%i)' % \
(self.parent.block_ids[s.block], i)
else:
name = '%s-%i' % (self.parent.block_ids[s.block], i)
new_item = QStandardItem(name)
new_item.setData(s, Qt.UserRole)
self.segment_model.appendRow(new_item)
self.neoSegmentList.setCurrentIndex(self.segment_model.index(0, 0))
if api.config.autoselect_segments:
self.neoSegmentList.selectAll()
self.selected_segments_changed()
def populate_neo_channel_group_list(self):
""" Fill the channel group list with appropriate entries.
Qt.UserRole: The :class:`neo.RecordingChannelGroup` object
"""
self.neoChannelGroupList.clearSelection()
self.channelgroup_model.clear()
self.parent.channel_group_names.clear()
rcgs = []
for b in self.blocks():
rcgs.extend(b.recordingchannelgroups)
filters = self.parent.get_active_filters(
'Recording Channel Group')
rcgs = self.filter_ordered(rcgs, filters)
for i, rcg in enumerate(rcgs):
self.parent.channel_group_names[rcg] = '%s-%s' % (
self.parent.block_ids[rcg.block],
self.get_letter_id(i, True))
if rcg.name:
name = rcg.name + ' (%s)' % \
self.parent.channel_group_names[rcg]
else:
name = self.parent.channel_group_names[rcg]
new_item = QStandardItem(name)
new_item.setData(rcg, Qt.UserRole)
self.channelgroup_model.appendRow(new_item)
self.neoChannelGroupList.setCurrentIndex(
self.channelgroup_model.index(0, 0))
if api.config.autoselect_channel_groups:
self.neoChannelGroupList.selectAll()
elif not rcgs:
self.selected_channel_groups_changed()
def populate_neo_channel_list(self):
""" Fill the channel list with appropriate entries. Data slots:
Qt.UserRole: The :class:`neo.RecordingChannel`
Qt.UserRole+1: The channel index
"""
self.channel_model.clear()
channels = set()
rcs = []
rc_group_name = {}
for rcg in self.recording_channel_groups():
for rc in rcg.recordingchannels:
if not api.config.duplicate_channels and rc in channels:
continue
channels.add(rc)
rcs.append(rc)
rc_group_name[rc] = self.parent.channel_group_names[rcg]
filters = self.parent.get_active_filters(
'Recording Channel')
rcs = self.filter_ordered(rcs, filters)
for rc in rcs:
identifier = '%s.%d' % \
(rc_group_name[rc],
rc.index)
if rc.name:
name = rc.name + ' (%s)' % identifier
else:
name = identifier
new_item = QStandardItem(name)
new_item.setData(rc, Qt.UserRole)
new_item.setData(rc.index, Qt.UserRole + 1)
self.channel_model.appendRow(new_item)
if api.config.autoselect_channels:
self.neoChannelList.selectAll()
self.selected_channels_changed()
def populate_neo_unit_list(self):
""" Fill the unit list with appropriate entries.
Qt.UserRole: The :class:`neo.Unit` object
"""
self.unit_model.clear()
units = []
for rcg in self.recording_channel_groups():
units.extend(rcg.units)
filters = self.parent.get_active_filters('Unit')
units = self.filter_ordered(units, filters)
for i, u in enumerate(units):
if self.parent.is_filtered(u, filters):
continue
if u.name:
name = u.name + ' (%s-%d)' % \
(self.parent.channel_group_names[rcg], i)
else:
name = '%s-%d' % (self.parent.channel_group_names[rcg], i)
new_item = QStandardItem(name)
new_item.setData(u, Qt.UserRole)
self.unit_model.appendRow(new_item)
if api.config.autoselect_units:
self.neoUnitList.selectAll()
self.selected_units_changed()
def set_blocks_label(self):
self.blocksLabel.setText(
'Blocks (%d/%d):' % (len(self.neoBlockList.selectedIndexes()),
self.block_model.rowCount()))
def set_channel_groups_label(self):
self.channelGroupsLabel.setText(
'Channel Groups (%d/%d):' % (
len(self.neoChannelGroupList.selectedIndexes()),
self.channelgroup_model.rowCount()))
def selected_blocks_changed(self):
self.set_blocks_label()
self.populate_neo_channel_group_list()
self.populate_neo_segment_list()
def selected_channel_groups_changed(self):
self.set_channel_groups_label()
self.populate_neo_channel_list()
self.populate_neo_unit_list()
def selected_channels_changed(self):
self.channelsLabel.setText(
'Channels (%d/%d):' % (
len(self.neoChannelList.selectedIndexes()),
self.channel_model.rowCount()))
def selected_units_changed(self):
self.unitsLabel.setText(
'Units (%d/%d):' % (
len(self.neoUnitList.selectedIndexes()),
self.unit_model.rowCount()))
def selected_segments_changed(self):
self.segmentsLabel.setText(
'Segments (%d/%d):' % (
len(self.neoSegmentList.selectedIndexes()),
self.segment_model.rowCount()))
def _edit_item_annotations(self, index, model):
api.annotation_editor(model.data(index, Qt.UserRole))
def remove_selected(self, list_widget):
""" Remove all selected objects from the given list widget.
"""
items = list_widget.selectedIndexes()
if len(items) < 1:
return
model = list_widget.model()
question = ('Do you really want to remove %d %s' %
(len(items),
type(model.data(items[0], Qt.UserRole)).__name__))
if len(items) > 1:
question += 's'
question += '?'
if QMessageBox.question(
self, 'Please confirm', question,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return
for i in list_widget.selectedIndexes():
data = model.data(i, Qt.UserRole)
if isinstance(data, neo.Block):
self.parent.block_names.pop(data)
else:
spykeutils.tools.remove_from_hierarchy(data)
list_widget.selectionModel().select(i, QItemSelectionModel.Deselect)
self.object_removed.emit()
def _context_actions(self, list_widget):
idx = list_widget.currentIndex()
if not idx:
return []
data = list_widget.model().data(idx, Qt.UserRole)
edit_action = QAction(get_icon('edit.png'),
'Edit annotations...', self)
edit_action.triggered.connect(
lambda x: self._edit_item_annotations(idx, list_widget.model()))
delete_name = 'Delete %s' % type(data).__name__
if len(list_widget.selectedIndexes()) > 1:
delete_name += 's'
delete_action = QAction(get_icon('editdelete.png'),
delete_name, self)
delete_action.triggered.connect(
lambda x:
self.remove_selected(list_widget))
return [edit_action, delete_action]
def on_neoBlockList_customContextMenuRequested(self, pos):
if not self.neoBlockList.selectedIndexes():
return
context_menu = QMenu(self)
context_menu.addActions(self._context_actions(self.neoBlockList))
context_menu.popup(self.neoBlockList.mapToGlobal(pos))
def on_neoSegmentList_customContextMenuRequested(self, pos):
if not self.neoSegmentList.selectedIndexes():
return
context_menu = QMenu(self)
context_menu.addActions(self._context_actions(self.neoSegmentList))
context_menu.popup(self.neoSegmentList.mapToGlobal(pos))
def on_neoChannelGroupList_customContextMenuRequested(self, pos):
if not self.neoChannelGroupList.selectedIndexes():
return
context_menu = QMenu(self)
context_menu.addActions(self._context_actions(self.neoChannelGroupList))
context_menu.popup(self.neoChannelGroupList.mapToGlobal(pos))
def on_neoChannelList_customContextMenuRequested(self, pos):
if not self.neoChannelList.selectedIndexes():
return
context_menu = QMenu(self)
context_menu.addActions(self._context_actions(self.neoChannelList))
context_menu.popup(self.neoChannelList.mapToGlobal(pos))
def on_neoUnitList_customContextMenuRequested(self, pos):
if not self.neoUnitList.selectedIndexes():
return
context_menu = QMenu(self)
context_menu.addActions(self._context_actions(self.neoUnitList))
context_menu.popup(self.neoUnitList.mapToGlobal(pos))
def blocks(self):
""" Return selected :class:`neo.Block` objects.
"""
return [self.block_model.data(i, Qt.UserRole) for i in
self.neoBlockList.selectedIndexes()]
def segments(self):
""" Return selected :class:`neo.Segment` objects.
"""
return [self.segment_model.data(i, Qt.UserRole) for i in
self.neoSegmentList.selectedIndexes()]
def recording_channel_groups(self):
""" Return selected :class:`neo.RecordingChannelGroup` objects.
"""
return [self.channelgroup_model.data(i, Qt.UserRole) for i in
self.neoChannelGroupList.selectedIndexes()]
def recording_channels(self):
""" Return selected :class:`neo.RecordingChannel` objects.
"""
return [self.channel_model.data(i, Qt.UserRole) for i in
self.neoChannelList.selectedIndexes()]
def units(self):
""" Return selected :class:`neo.Unit` objects.
"""
return [self.unit_model.data(i, Qt.UserRole) for i in
self.neoUnitList.selectedIndexes()]
def set_selection(self, data):
""" Set the selected data.
"""
block_list = []
for b in data['blocks']:
cl = None
rp = None
if len(b) > 2:
cl = NeoDataProvider.find_io_class(b[2])
if len(b) > 3:
rp = b[3]
loaded = NeoDataProvider.get_block(
b[1], b[0], force_io=cl, read_params=rp)
if loaded is None:
raise IOError('One of the files contained in the '
'selection could not be loaded!')
block_list.append(loaded)
block_set = set([(b[0], b[1]) for b in data['blocks']])
# Select blocks
self.ensure_not_filtered(block_list, self.parent.block_names.keys(),
self.parent.get_active_filters('Block'))
self.populate_neo_block_list()
selection = QItemSelection()
for i in self.block_model.findItems(
'*', Qt.MatchWrap | Qt.MatchWildcard):
block = i.data(Qt.UserRole)
t = (NeoDataProvider.block_indices[block],
self.parent.block_files[block])
if t in block_set:
selection.append(QItemSelectionRange(
self.block_model.indexFromItem(i)))
self.neoBlockList.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
# Select segments
seg_list = [block_list[idx[1]].segments[idx[0]]
for idx in data['segments']]
all_segs = []
for b in self.blocks():
all_segs.extend(b.segments)
self.ensure_not_filtered(seg_list, all_segs,
self.parent.get_active_filters('Segment'))
self.populate_neo_segment_list()
selection = QItemSelection()
for i in self.segment_model.findItems(
'*', Qt.MatchWrap | Qt.MatchWildcard):
segment = i.data(Qt.UserRole)
if not segment.block in block_list:
continue
seg_idx = segment.block.segments.index(segment)
block_idx = block_list.index(segment.block)
if [seg_idx, block_idx] in data['segments']:
selection.append(QItemSelectionRange(
self.segment_model.indexFromItem(i)))
self.neoSegmentList.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
# Select recording channel groups
rcg_list = [block_list[rcg[1]].recordingchannelgroups[rcg[0]]
for rcg in data['channel_groups']]
all_rcgs = []
for b in self.blocks():
all_rcgs.extend(b.recordingchannelgroups)
self.ensure_not_filtered(
rcg_list, all_rcgs,
self.parent.get_active_filters('Recording Channel Group'))
self.populate_neo_channel_group_list()
selection = QItemSelection()
for i in self.channelgroup_model.findItems(
'*', Qt.MatchWrap | Qt.MatchWildcard):
rcg = i.data(Qt.UserRole)
if not rcg.block in block_list:
continue
rcg_idx = rcg.block.recordingchannelgroups.index(rcg)
block_idx = block_list.index(rcg.block)
if [rcg_idx, block_idx] in data['channel_groups']:
selection.append(QItemSelectionRange(
self.channelgroup_model.indexFromItem(i)))
self.neoChannelGroupList.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
# Select channels
rc_list = [rcg_list[rc[1]].recordingchannels[rc[0]]
for rc in data['channels']]
all_rcs = []
for rcg in self.recording_channel_groups():
for rc in rcg.recordingchannels:
if not api.config.duplicate_channels and rc in all_rcs:
continue
all_rcs.append(rc)
self.ensure_not_filtered(
rc_list, all_rcs,
self.parent.get_active_filters('Recording Channel'))
self.populate_neo_channel_list()
selection = QItemSelection()
rcg_set = set(rcg_list)
for i in self.channel_model.findItems(
'*', Qt.MatchWrap | Qt.MatchWildcard):
channel = i.data(Qt.UserRole)
if not set(channel.recordingchannelgroups).intersection(rcg_set):
continue
for rcg in channel.recordingchannelgroups:
if [rcg.recordingchannels.index(channel),
rcg_list.index(rcg)] in data['channels']:
selection.append(QItemSelectionRange(
self.channel_model.indexFromItem(i)))
break
self.neoChannelList.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
# Select units
unit_list = [rcg_list[u[1]].units[u[0]]
for u in data['units']]
all_units = []
for rcg in self.recording_channel_groups():
all_units.extend(rcg.units)
self.ensure_not_filtered(
unit_list, all_units,
self.parent.get_active_filters('Unit'))
self.populate_neo_unit_list()
selection = QItemSelection()
for i in self.unit_model.findItems(
'*', Qt.MatchWrap | Qt.MatchWildcard):
unit = i.data(Qt.UserRole)
if unit.recordingchannelgroup not in rcg_list:
continue
rcg_idx = rcg_list.index(unit.recordingchannelgroup)
unit_idx = unit.recordingchannelgroup.units.index(unit)
if [unit_idx, rcg_idx] in data['units']:
selection.append(QItemSelectionRange(
self.unit_model.indexFromItem(i)))
self.neoUnitList.selectionModel().select(
selection, QItemSelectionModel.ClearAndSelect)
self.parent.refresh_filters()
|
|
# -*- coding: utf-8 -*-
"""
Video object
"""
import logging
import numpy as np
import cv2
from .utils import _mean_squared_error
logger = logging.getLogger(__name__)
class Video:
"""
OpenCV Video.
"""
def __init__(self, filepath, grayscale=False):
# OpenCV VideoCapture object
self.filepath = filepath
self._capture = cv2.VideoCapture(filepath)
self.grayscale = grayscale
self.bgmodel = None
def __iter__(self):
for i in self.frames:
yield self.read_frame(number=i)
def __getitem__(self, key):
if isinstance(key, slice):
return self._frame_generator(key)
elif isinstance(key, int):
return self.read_frame(number=key)
else:
raise TypeError
def __len__(self):
return self.nframes
def __str__(self):
return "Video: size={s}, nframes={n}, fps={fps}".format(
s=self.size,
n=self.nframes,
fps=self.fps
)
def __del__(self):
self._capture.release()
def _frame_generator(self, slice):
"""Auxiliary generator to return specific frames."""
for i in self.frames[slice]:
yield self.read_frame(number=i)
@property
def fourcc(self):
"""4-character code of codec."""
fourcc = int(self._capture.get(cv2.CAP_PROP_FOURCC))
return "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)])
@property
def nframes(self):
"""Returns the total number of frames."""
return int(self._capture.get(cv2.CAP_PROP_FRAME_COUNT))
@property
def size(self):
"""Returns the size of the video frames: (width, height)."""
width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
return (width, height)
@property
def fps(self):
"""Frames per second."""
return int(self._capture.get(cv2.CAP_PROP_FPS))
@property
def frame_number(self):
"""Number of the frame that will be read next."""
return int(self._capture.get(cv2.CAP_PROP_POS_FRAMES))
@frame_number.setter
def frame_number(self, value):
self._capture.set(cv2.CAP_PROP_POS_FRAMES, value)
@property
def frames(self):
"""Returns an iterator with all frames."""
return range(self.nframes)
def generate_background_model(self, step=None, end=None, mse_min=50):
"""Generates a background model using the median.
Only sufficiently different frames are considered, using the
mean squared error method.
Parameters:
step: Step to iterate through the video. Default is video FPS rate.
end: Last frame to consider. Default is 2/3 of video length.
mse_min: The minimum error at wich the frame is selected. The
lower the error, the more *similar* the two images are.
"""
step = step or self.fps
end = end or int(self.nframes * (2 / 3))
# Select the good frames to compute the background model
logger.info(
"Selecting frames (step={}, end={}, mse_min={})".format(
step, end, mse_min)
)
first_frame = self.read_frame(number=0, grayscale=True)
selected_frames = [first_frame.image]
for i in range(1, end, step):
frame = self.read_frame(number=i, grayscale=True)
mse = _mean_squared_error(frame.image, selected_frames[-1])
if mse < mse_min:
continue
else:
selected_frames.append(frame.image)
logger.info(
"Generating the background model using {} frames".format(
len(selected_frames))
)
bgmodel = np.median(
np.dstack(selected_frames), axis=2).astype(np.uint8)
return bgmodel
def read_frame(self, number=None, grayscale=False):
"""Reads the current frame and returns it.
You can also ask for a specific frame.
Returns a Frame object.
:param int number: Number of the frame desired. If None, reads
the current one.
:param bool grayscale: Convert the frame read to grayscale.
"""
assert(self._capture.isOpened())
if number is not None:
self.frame_number = number
else:
number = self.frame_number
logger.debug('Reading frame %d' % number)
reading_success, image = self._capture.read()
if reading_success is True:
if grayscale or self.grayscale:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return Frame(number, image)
else:
raise Exception("Failed to read frame.")
def show_frame(self, number=None, window=None, resize=False):
"""Shows frame `number` in an OpenCV window.
Returns the frame read.
"""
if number is not None:
self.frame_number = number
else:
number = self.frame_number
if window is None:
window = 'Frame {}'.format(number)
frame = self.read_frame(number)
if resize:
image = cv2.resize(frame.image,
dsize=(0, 0),
fx=0.5,
fy=0.5,
interpolation=cv2.INTER_AREA)
else:
image = frame.image
cv2.imshow(window, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return frame
def play(self, begin=None, end=None, step=1, window=None, wait_time=None):
if begin is None:
begin = 0
if end is None:
end = self.nframes
if window is None:
window = 'Playing Video'
if wait_time is None:
wait_time = int(1000 / self.fps)
for i in self.frames[begin:end:step]:
frame = self.read_frame(i)
cv2.putText(frame.image,
"Frame " + str(i),
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255, 0, 0),
1,
cv2.LINE_AA)
cv2.imshow(window, frame.image)
key = cv2.waitKey(wait_time) & 0xff
if key == 27:
break
cv2.destroyAllWindows()
class Frame:
def __init__(self, number, image=None):
self._number = number
self._image = image
def __repr__(self):
return('Frame({})'.format(self.number))
@property
def number(self):
return self._number
@property
def image(self):
return self._image
|
|
import numpy as np
from typing import Optional, Union
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.gaussian_noise import GaussianNoise
from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
get_variable, TensorType
from ray.rllib.utils.schedules import Schedule
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class OrnsteinUhlenbeckNoise(GaussianNoise):
"""An exploration that adds Ornstein-Uhlenbeck noise to continuous actions.
If explore=True, returns sampled actions plus a noise term X,
which changes according to this formula:
Xt+1 = -theta*Xt + sigma*N[0,stddev], where theta, sigma and stddev are
constants. Also, some completely random period is possible at the
beginning.
If explore=False, returns the deterministic action.
"""
def __init__(self,
action_space,
*,
framework: str,
ou_theta: float = 0.15,
ou_sigma: float = 0.2,
ou_base_scale: float = 0.1,
random_timesteps: int = 1000,
initial_scale: float = 1.0,
final_scale: float = 0.02,
scale_timesteps: int = 10000,
scale_schedule: Optional[Schedule] = None,
**kwargs):
"""Initializes an Ornstein-Uhlenbeck Exploration object.
Args:
action_space (Space): The gym action space used by the environment.
ou_theta (float): The theta parameter of the Ornstein-Uhlenbeck
process.
ou_sigma (float): The sigma parameter of the Ornstein-Uhlenbeck
process.
ou_base_scale (float): A fixed scaling factor, by which all OU-
noise is multiplied. NOTE: This is on top of the parent
GaussianNoise's scaling.
random_timesteps (int): The number of timesteps for which to act
completely randomly. Only after this number of timesteps, the
`self.scale` annealing process will start (see below).
initial_scale (float): The initial scaling weight to multiply
the noise with.
final_scale (float): The final scaling weight to multiply
the noise with.
scale_timesteps (int): The timesteps over which to linearly anneal
the scaling factor (after(!) having used random actions for
`random_timesteps` steps.
scale_schedule (Optional[Schedule]): An optional Schedule object
to use (instead of constructing one from the given parameters).
framework (Optional[str]): One of None, "tf", "torch".
"""
super().__init__(
action_space,
framework=framework,
random_timesteps=random_timesteps,
initial_scale=initial_scale,
final_scale=final_scale,
scale_timesteps=scale_timesteps,
scale_schedule=scale_schedule,
stddev=1.0, # Force `self.stddev` to 1.0.
**kwargs)
self.ou_theta = ou_theta
self.ou_sigma = ou_sigma
self.ou_base_scale = ou_base_scale
# The current OU-state value (gets updated each time, an eploration
# action is computed).
self.ou_state = get_variable(
np.array(self.action_space.low.size * [.0], dtype=np.float32),
framework=self.framework,
tf_name="ou_state",
torch_tensor=True,
device=self.device)
@override(GaussianNoise)
def _get_tf_exploration_action_op(self, action_dist: ActionDistribution,
explore: Union[bool, TensorType],
timestep: Union[int, TensorType]):
ts = timestep if timestep is not None else self.last_timestep
scale = self.scale_schedule(ts)
# The deterministic actions (if explore=False).
deterministic_actions = action_dist.deterministic_sample()
# Apply base-scaled and time-annealed scaled OU-noise to
# deterministic actions.
gaussian_sample = tf.random.normal(
shape=[self.action_space.low.size], stddev=self.stddev)
ou_new = self.ou_theta * -self.ou_state + \
self.ou_sigma * gaussian_sample
if self.framework in ["tf2", "tfe"]:
self.ou_state.assign_add(ou_new)
ou_state_new = self.ou_state
else:
ou_state_new = tf1.assign_add(self.ou_state, ou_new)
high_m_low = self.action_space.high - self.action_space.low
high_m_low = tf.where(
tf.math.is_inf(high_m_low), tf.ones_like(high_m_low), high_m_low)
noise = scale * self.ou_base_scale * ou_state_new * high_m_low
stochastic_actions = tf.clip_by_value(
deterministic_actions + noise,
self.action_space.low * tf.ones_like(deterministic_actions),
self.action_space.high * tf.ones_like(deterministic_actions))
# Stochastic actions could either be: random OR action + noise.
random_actions, _ = \
self.random_exploration.get_tf_exploration_action_op(
action_dist, explore)
exploration_actions = tf.cond(
pred=tf.convert_to_tensor(ts < self.random_timesteps),
true_fn=lambda: random_actions,
false_fn=lambda: stochastic_actions)
# Chose by `explore` (main exploration switch).
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool) else explore,
true_fn=lambda: exploration_actions,
false_fn=lambda: deterministic_actions)
# Logp=always zero.
batch_size = tf.shape(deterministic_actions)[0]
logp = tf.zeros(shape=(batch_size, ), dtype=tf.float32)
# Increment `last_timestep` by 1 (or set to `timestep`).
if self.framework in ["tf2", "tfe"]:
if timestep is None:
self.last_timestep.assign_add(1)
else:
self.last_timestep.assign(timestep)
return action, logp
else:
assign_op = (tf1.assign_add(self.last_timestep, 1)
if timestep is None else tf1.assign(
self.last_timestep, timestep))
with tf1.control_dependencies([assign_op, ou_state_new]):
return action, logp
@override(GaussianNoise)
def _get_torch_exploration_action(self, action_dist: ActionDistribution,
explore: bool,
timestep: Union[int, TensorType]):
# Set last timestep or (if not given) increase by one.
self.last_timestep = timestep if timestep is not None else \
self.last_timestep + 1
# Apply exploration.
if explore:
# Random exploration phase.
if self.last_timestep < self.random_timesteps:
action, _ = \
self.random_exploration.get_torch_exploration_action(
action_dist, explore=True)
# Apply base-scaled and time-annealed scaled OU-noise to
# deterministic actions.
else:
det_actions = action_dist.deterministic_sample()
scale = self.scale_schedule(self.last_timestep)
gaussian_sample = scale * torch.normal(
mean=torch.zeros(self.ou_state.size()), std=1.0) \
.to(self.device)
ou_new = self.ou_theta * -self.ou_state + \
self.ou_sigma * gaussian_sample
self.ou_state += ou_new
high_m_low = torch.from_numpy(
self.action_space.high - self.action_space.low). \
to(self.device)
high_m_low = torch.where(
torch.isinf(high_m_low),
torch.ones_like(high_m_low).to(self.device), high_m_low)
noise = scale * self.ou_base_scale * self.ou_state * high_m_low
action = torch.min(
torch.max(
det_actions + noise,
torch.tensor(
self.action_space.low,
dtype=torch.float32,
device=self.device)),
torch.tensor(
self.action_space.high,
dtype=torch.float32,
device=self.device))
# No exploration -> Return deterministic actions.
else:
action = action_dist.deterministic_sample()
# Logp=always zero.
logp = torch.zeros(
(action.size()[0], ), dtype=torch.float32, device=self.device)
return action, logp
|
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
from __future__ import print_function
import argparse
import logging
import os
# colorama, BSD 3-Clause license, cross-platform terminal colours, pip install colorama
import colorama
# validate, , validate things, internal
from .lib import validate
# Target, , represents an installed target, internal
from .lib import target
# access, , get components (and check versions), internal
from .lib import access
from .lib import access_common
# fsutils, , misc filesystem utils, internal
from .lib import fsutils
def addOptions(parser):
parser.add_argument('--all', '-a', dest='show_all', default=False, action='store_true',
help='Show all dependencies (including repeats, and test-only dependencies)'
)
def execCommand(args, following_args):
c = validate.currentDirectoryModule()
if not c:
return 1
if not args.target:
logging.error('No target has been set, use "yotta target" to set one.')
return 1
target, errors = c.satisfyTarget(args.target)
if errors:
for error in errors:
logging.error(error)
return 1
dependencies = c.getDependenciesRecursive(
target = target,
available_components = [(c.getName(), c)],
test = True
)
putln(
ComponentDepsFormatter(
target = target,
available_components = dependencies,
plain = args.plain,
list_all = args.show_all
).format(
c, [c.getName()]
)
)
def islast(generator):
next_x = None
first = True
for x in generator:
if not first:
yield (next_x, False)
next_x = x
first = False
if not first:
yield (next_x, True)
def putln(x):
if u'unicode' in str(type(x)):
# python 2.7
print(x.encode('utf-8'))
else:
print(x)
def relpathIfSubdir(path):
relpath = os.path.relpath(path)
if relpath.startswith('..'):
return path
else:
return relpath
class ComponentDepsFormatter(object):
def __init__(self, target=None, available_components=None, list_all=False, plain=False):
# don't even try to do Unicode on windows. Even if we can encode it
# correctly, the default terminal fonts don't support Unicode
# characters :(
self.use_unicode = not ((os.name == 'nt') or plain)
self.use_colours = not plain
self.target = target
self.list_all = list_all
self.available = available_components
if plain:
self.L_Char = u' '
self.T_Char = u' '
self.Dash_Char = u' '
self.Pipe_Char = u' '
elif self.use_unicode:
self.L_Char = u'\u2517'
self.T_Char = u'\u2523'
self.Dash_Char = u'\u2500'
self.Pipe_Char = u'\u2503'
else:
self.L_Char = u'\\'
self.T_Char = u'|'
self.Dash_Char = u'_'
self.Pipe_Char = u'|'
super(ComponentDepsFormatter, self).__init__()
def format(
self,
component,
processed,
indent=u'',
tee=u'',
installed_at=u'',
test_dep=False
):
r = u''
if self.use_colours:
DIM = colorama.Style.DIM
BRIGHT = colorama.Style.BRIGHT
GREEN = colorama.Fore.GREEN
RED = colorama.Fore.RED
RESET = colorama.Style.RESET_ALL
else:
DIM = BRIGHT = GREEN = RED = RESET = u''
mods_path = component.modulesPath()
deps = component.getDependencies(
available_components = self.available,
target = self.target,
test = True,
warnings = False
)
specs = dict([(x.name, x) for x in component.getDependencySpecs(target=self.target)])
def isTestOnly(name):
return specs[name].is_test_dependency
def shouldDisplay(x):
if self.list_all:
# list everything everywhere (apart from test dependencies of test
# dependencies, which should be considered irrelevant)
if component.isTestDependency() and isTestOnly(x[0]):
return False
else:
return True
if (not isTestOnly(x[0]) or not len(indent)):
# this is non-test dependency, or a top-level test dependency
if not x[1]:
# if it's missing, display it
return True
if x[1].path == os.path.join(mods_path, x[0]):
# if it's installed in this module, display it
return True
if x[0] in deps_here:
# if it's first depended on by this module, then display it
return True
# everything else shouldn't be displayed here
return False
line = indent[:-2] + tee + component.getName() + u' ' + DIM + str(component.getVersion()) + RESET
if test_dep:
line += u' ' + DIM + u'(test dependency)' + RESET
if len(installed_at):
line += u' ' + DIM + installed_at + RESET
if component.installedLinked():
line += GREEN + BRIGHT + u' -> ' + RESET + GREEN + fsutils.realpath(component.path) + RESET
r += line + '\n'
deps_here = [x for x in list(deps.keys()) if (x not in processed)]
print_deps = [x for x in list(deps.items()) if shouldDisplay(x)]
processed += [x[0] for x in print_deps]
for (name, dep), last in islast(print_deps):
if last:
next_indent = indent + u' '
tee = self.L_Char + self.Dash_Char + u' '
next_tee = self.L_Char + self.Dash_Char + u' '
else:
next_indent = indent + self.Pipe_Char + u' '
tee = self.T_Char + self.Dash_Char + u' '
next_tee = self.T_Char + self.Dash_Char + u' '
test_dep_status = u''
if isTestOnly(name):
test_dep_status = u' (test dependency)'
if not dep:
r += indent + tee + name + u' ' + specs[name].version_req + test_dep_status + BRIGHT + RED + ' missing' + RESET + '\n'
else:
spec = access.remoteComponentFor(name, specs[name].version_req, 'modules').versionSpec()
if not spec:
spec_descr = u''
elif spec.match(dep.getVersion()):
spec_descr = u' ' + str(spec)
else:
spec_descr = u' ' + RESET + BRIGHT + RED + str(spec)
spec_descr += test_dep_status
if name in deps_here:
# dependencies that are first used here may actually be
# installed higher up our dependency tree, if they are,
# illustrate that:
if dep.path == os.path.join(mods_path, name):
r += self.format(
dep,
processed,
next_indent,
next_tee,
test_dep = isTestOnly(name)
)
else:
r += self.format(
dep,
processed,
next_indent,
next_tee,
installed_at = relpathIfSubdir(dep.path),
test_dep = isTestOnly(name)
)
else:
r += indent + tee + DIM + name + spec_descr + RESET + '\n'
return r
|
|
#
# PATCHED VERSION of django-filebrowser 'sites' module that replaces the
# 'staff_member_required' decorator with the 'login_required' decorator
# Also, it used the COG-specific decorator 'filebrowser_check_role'
# to enforce a specific role for the project associated with a specific target folder
# COG imports
from django.contrib.auth.decorators import login_required
from cog.cog_filebrowser import filebrowser_check, project_filter, get_browsable_projects
from cog.models.project import Project, getProjectsForUser
from cog.models.doc import Doc
from django.shortcuts import get_object_or_404
# coding: utf-8
# PYTHON IMPORTS
import os, re
from types import MethodType
# DJANGO IMPORTS
from django.shortcuts import render, HttpResponse
from django.template import RequestContext as Context
from django.http import HttpResponseRedirect, Http404
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.cache import never_cache
from django.utils.translation import ugettext as _
from django import forms
from django.core.urlresolvers import reverse, get_urlconf, get_resolver
from django.dispatch import Signal
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.utils.encoding import smart_unicode
from django.contrib import messages
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.core.files.base import ContentFile
from django.core.files.storage import DefaultStorage, default_storage, FileSystemStorage
from django.core.exceptions import ImproperlyConfigured
# FILEBROWSER IMPORTS
from django.conf.urls import url, include
from filebrowser.functions import get_breadcrumbs, get_filterdate, get_settings_var, handle_file_upload, convert_filename
from filebrowser.templatetags.fb_tags import query_helper
from filebrowser.base import FileListing, FileObject
from filebrowser.decorators import path_exists, file_exists
from filebrowser.storage import FileSystemStorageMixin, StorageMixin
from filebrowser.settings import *
# Add some required methods to FileSystemStorage
if FileSystemStorageMixin not in FileSystemStorage.__bases__:
FileSystemStorage.__bases__ += (FileSystemStorageMixin,)
# PIL import
if STRICT_PIL:
from PIL import Image
else:
try:
from PIL import Image
except ImportError:
import Image
# JSON import
import json
# This cache contains all *instantiated* FileBrowser sites
_sites_cache = {}
def get_site_dict(app_name='filebrowser'):
"""
Return a dict with all *deployed* FileBrowser sites that have
a given app_name.
"""
if not _sites_cache.has_key(app_name):
return {}
# Get names of all deployed filebrowser sites with a give app_name
deployed = get_resolver(get_urlconf()).app_dict[app_name]
# Get the deployed subset from the cache
return dict((k,v) for k, v in _sites_cache[app_name].iteritems() if k in deployed)
def register_site(app_name, site_name, site):
"""
Add a site into the site dict.
"""
if not _sites_cache.has_key(app_name):
_sites_cache[app_name] = {}
_sites_cache[app_name][site_name] = site
def get_default_site(app_name='filebrowser'):
"""
Returns the default site. This function uses Django's url resolution method to
obtain the name of the default site.
"""
# Get the name of the default site:
resolver = get_resolver(get_urlconf())
name = 'filebrowser'
# Django's default name resolution method (see django.core.urlresolvers.reverse())
app_list = resolver.app_dict[app_name]
if not name in app_list:
name = app_list[0]
return get_site_dict()[name]
class FileBrowserSite(object):
def __init__(self, name=None, app_name='filebrowser', storage=default_storage):
self.name = name
self.app_name = app_name
self.storage = storage
self._actions = {}
self._global_actions = self._actions.copy()
# Register this site in the global site cache
register_site(self.app_name, self.name, self)
# Per-site settings:
self.directory = DIRECTORY
def _directory_get(self):
return self._directory
def _directory_set(self, val):
self._directory = val
directory = property(_directory_get, _directory_set)
def filebrowser_view(self, view):
#return staff_member_required(never_cache(view))
return login_required(never_cache(view))
def get_urls(self):
from django.conf.urls import url, include
urlpatterns = [
# filebrowser urls (views)
url(r'^browse/$', path_exists(self, self.filebrowser_view(self.browse)), name="fb_browse"),
url(r'^createdir/', path_exists(self, self.filebrowser_view(self.createdir)), name="fb_createdir"),
url(r'^upload/', path_exists(self, self.filebrowser_view(self.upload)), name="fb_upload"),
url(r'^delete_confirm/$', file_exists(self, path_exists(self, self.filebrowser_view(self.delete_confirm))), name="fb_delete_confirm"),
url(r'^delete/$', file_exists(self, path_exists(self, self.filebrowser_view(self.delete))), name="fb_delete"),
url(r'^detail/$', file_exists(self, path_exists(self, self.filebrowser_view(self.detail))), name="fb_detail"),
url(r'^version/$', file_exists(self, path_exists(self, self.filebrowser_view(self.version))), name="fb_version"),
# non-views
#url(r'^upload_file/$', staff_member_required(csrf_exempt(self._upload_file)), name="fb_do_upload"),
url(r'^upload_file/$', login_required(csrf_exempt(self._upload_file)), name="fb_do_upload"),
]
return urlpatterns
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
# Check/create short description
if not hasattr(action, 'short_description'):
action.short_description = action.__name__.replace("_", " ").capitalize()
# Check/create applies-to filter
if not hasattr(action, 'applies_to'):
action.applies_to = lambda x: True
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
def applicable_actions(self, fileobject):
"""
Return a list of tuples (name, action) of actions applicable to a given fileobject.
"""
res = []
for name, action in self.actions:
if action.applies_to(fileobject):
res.append((name, action))
return res
@property
def actions(self):
"""
Get all the enabled actions as a list of (name, func). The list
is sorted alphabetically by actions names
"""
res = self._actions.items()
res.sort(key=lambda name_func: name_func[0])
return res
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
def browse(self, request):
"""
Browse Files/Directories.
"""
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
for k,v in VERSIONS.iteritems():
exp = (r'_%s(%s)') % (k, '|'.join(EXTENSION_LIST))
filter_re.append(re.compile(exp))
# COG: retrieve all active projects for authenticated user
projects = get_browsable_projects(request)
def filter_browse(item):
filtered = item.filename.startswith('.')
# COG: additional filtering by project
filtered = filtered or not project_filter(item, request.user, projects)
for re_prefix in filter_re:
if re_prefix.search(item.filename):
filtered = True
if filtered:
return False
return True
query = request.GET.copy()
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
filelisting = FileListing(path,
filter_func=filter_browse,
sorting_by=query.get('o', DEFAULT_SORTING_BY),
sorting_order=query.get('ot', DEFAULT_SORTING_ORDER),
site=self)
files = []
if SEARCH_TRAVERSE and query.get("q"):
listing = filelisting.files_walk_filtered()
else:
listing = filelisting.files_listing_filtered()
# If we do a search, precompile the search pattern now
do_search = query.get("q")
if do_search:
re_q = re.compile(query.get("q").lower(), re.M)
filter_type = query.get('filter_type')
filter_date = query.get('filter_date')
for fileobject in listing:
# date/type filter
append = False
if (not filter_type or fileobject.filetype == filter_type) and (not filter_date or get_filterdate(filter_date, fileobject.date or 0)):
append = True
# search
if do_search and not re_q.search(fileobject.filename.lower()):
append = False
# append
if append:
files.append(fileobject)
filelisting.results_total = len(listing)
filelisting.results_current = len(files)
p = Paginator(files, LIST_PER_PAGE)
page_nr = request.GET.get('p', '1')
try:
page = p.page(page_nr)
except (EmptyPage, InvalidPage):
page = p.page(p.num_pages)
return render(request,
'filebrowser/index.html',
{'p': p,
'page': page,
'filelisting': filelisting,
'query': query,
'title': _(u'FileBrowser'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': "",
'filebrowser_site': self })
#context_instance=Context(request, current_app=self.name))
# mkdir signals
filebrowser_pre_createdir = Signal(providing_args=["path", "name"])
filebrowser_post_createdir = Signal(providing_args=["path", "name"])
@filebrowser_check()
def createdir(self, request):
"""
Create Directory.
"""
from filebrowser.forms import CreateDirForm
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
if request.method == 'POST':
form = CreateDirForm(path, request.POST, filebrowser_site=self)
if form.is_valid():
server_path = os.path.join(path, form.cleaned_data['name'])
try:
self.filebrowser_pre_createdir.send(sender=request, path=server_path, name=form.cleaned_data['name'])
self.storage.makedirs(server_path)
# os.mkdir(server_path)
# os.chmod(server_path, 0775) # ??? PERMISSIONS
self.filebrowser_post_createdir.send(sender=request, path=server_path, name=form.cleaned_data['name'])
messages.add_message(request, messages.SUCCESS, _('The Folder %s was successfully created.') % form.cleaned_data['name'])
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "ot=desc,o=date", "ot,o,filter_type,filter_date,q,p")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
if errno == 13:
form.errors['name'] = forms.util.ErrorList([_('Permission denied.')])
else:
form.errors['name'] = forms.util.ErrorList([_('Error creating folder.')])
else:
form = CreateDirForm(path, filebrowser_site=self)
return render(request,
'filebrowser/createdir.html',
{'form': form,
'query': query,
'title': _(u'New Folder'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'New Folder'),
'filebrowser_site': self } ) #context_instance=Context(request, current_app=self.name))
@filebrowser_check()
def upload(self, request):
"""
Multipe File Upload.
"""
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
return render(request,
'filebrowser/upload.html',
{ 'query': query,
'title': _(u'Select files to upload'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'Upload'),
'filebrowser_site': self }) #context_instance=Context(request, current_app=self.name))
@filebrowser_check()
def delete_confirm(self, request):
"""
Delete existing File/Directory.
"""
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if fileobject.filetype == "Folder":
filelisting = FileListing(os.path.join(path, fileobject.filename),
sorting_by=query.get('o', 'filename'),
sorting_order=query.get('ot', DEFAULT_SORTING_ORDER),
site=self)
filelisting = filelisting.files_walk_total()
if len(filelisting) > 100:
additional_files = len(filelisting) - 100
filelisting = filelisting[:100]
else:
additional_files = None
else:
filelisting = None
additional_files = None
return render(request, 'filebrowser/delete_confirm.html', {
'fileobject': fileobject,
'filelisting': filelisting,
'additional_files': additional_files,
'query': query,
'title': _(u'Confirm delete'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'Confirm delete'),
'filebrowser_site': self
}) #, context_instance=Context(request, current_app=self.name))
# delete signals
filebrowser_pre_delete = Signal(providing_args=["path", "name"])
filebrowser_post_delete = Signal(providing_args=["path", "name"])
@filebrowser_check()
def delete(self, request):
"""
Delete existing File/Directory.
"""
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if request.GET:
try:
# COG: must delete Doc objects
docs = Doc.objects.filter(file=fileobject.path)
for doc in docs:
print 'Deleting doc=%s' % doc
doc.delete()
self.filebrowser_pre_delete.send(sender=request, path=fileobject.path, name=fileobject.filename)
fileobject.delete_versions()
fileobject.delete()
self.filebrowser_post_delete.send(sender=request, path=fileobject.path, name=fileobject.filename)
messages.add_message(request, messages.SUCCESS, _('Successfully deleted %s') % fileobject.filename)
except OSError, (errno, strerror):
# TODO: define error-message
pass
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
# rename signals
filebrowser_pre_rename = Signal(providing_args=["path", "name", "new_name"])
filebrowser_post_rename = Signal(providing_args=["path", "name", "new_name"])
filebrowser_actions_pre_apply = Signal(providing_args=['action_name', 'fileobjects',])
filebrowser_actions_post_apply = Signal(providing_args=['action_name', 'filebjects', 'result'])
# this view must be public as there is no 'dir' information
def detail(self, request):
"""
Show detail page for a file.
Rename existing File/Directory (deletes existing Image Versions/Thumbnails).
"""
from filebrowser.forms import ChangeForm
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if request.method == 'POST':
form = ChangeForm(request.POST, path=path, fileobject=fileobject, filebrowser_site=self)
if form.is_valid():
new_name = form.cleaned_data['name']
action_name = form.cleaned_data['custom_action']
try:
action_response = None
if action_name:
action = self.get_action(action_name)
# Pre-action signal
self.filebrowser_actions_pre_apply.send(sender=request, action_name=action_name, fileobject=[fileobject])
# Call the action to action
action_response = action(request=request, fileobjects=[fileobject])
# Post-action signal
self.filebrowser_actions_post_apply.send(sender=request, action_name=action_name, fileobject=[fileobject], result=action_response)
if new_name != fileobject.filename:
self.filebrowser_pre_rename.send(sender=request, path=fileobject.path, name=fileobject.filename, new_name=new_name)
fileobject.delete_versions()
self.storage.move(fileobject.path, os.path.join(fileobject.head, new_name))
self.filebrowser_post_rename.send(sender=request, path=fileobject.path, name=fileobject.filename, new_name=new_name)
messages.add_message(request, messages.SUCCESS, _('Renaming was successful.'))
if isinstance(action_response, HttpResponse):
return action_response
if "_continue" in request.POST:
redirect_url = reverse("filebrowser:fb_detail", current_app=self.name) + query_helper(query, "filename="+new_name, "filename")
else:
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "", "filename")
return HttpResponseRedirect(redirect_url)
except OSError, (errno, strerror):
form.errors['name'] = forms.util.ErrorList([_('Error.')])
else:
form = ChangeForm(initial={"name": fileobject.filename}, path=path, fileobject=fileobject, filebrowser_site=self)
return render(request, 'filebrowser/detail.html', {
'form': form,
'fileobject': fileobject,
'query': query,
'title': u'%s' % fileobject.filename,
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': u'%s' % fileobject.filename,
'filebrowser_site': self
}) # context_instance=Context(request, current_app=self.name))
def version(self, request):
"""
Version detail.
"""
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
return render(request, 'filebrowser/version.html', {
'fileobject': fileobject,
'query': query,
'settings_var': get_settings_var(directory=self.directory),
'filebrowser_site': self
}) # context_instance=Context(request, current_app=self.name))
# upload signals
filebrowser_pre_upload = Signal(providing_args=["path", "file"])
filebrowser_post_upload = Signal(providing_args=["path", "file"])
def _upload_file(self, request):
"""
Upload file to the server.
"""
if request.method == "POST":
if request.is_ajax(): # Advanced (AJAX) submission
folder = request.GET.get('folder')
filedata = ContentFile(request.raw_post_data)
try:
filedata.name = convert_filename(request.GET['qqfile'])
except KeyError:
return HttpResponseBadRequest('Invalid request! No filename given.')
else: # Basic (iframe) submission
# TODO: This needs some attention, do we use this at all?
folder = request.POST.get('folder')
if len(request.FILES) == 1:
filedata = request.FILES.values()[0]
else:
raise Http404('Invalid request! Multiple files included.')
# filedata.name = convert_filename(upload.name)
filedata.name = convert_filename(request.POST.get('file_name'))
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("filebrowser:fb_upload", current_app=self.name))
folder = fb_uploadurl_re.sub('', folder)
path = os.path.join(self.directory, folder)
file_name = os.path.join(path, filedata.name)
file_already_exists = self.storage.exists(file_name)
# Check for name collision with a directory
if file_already_exists and self.storage.isdir(file_name):
ret_json = {'success': False, 'filename': filedata.name}
return HttpResponse(json.dumps(ret_json))
self.filebrowser_pre_upload.send(sender=request, path=request.POST.get('folder'), file=filedata)
uploadedfile = handle_file_upload(path, filedata, site=self)
if file_already_exists:
old_file = smart_unicode(file_name)
new_file = smart_unicode(uploadedfile)
self.storage.move(new_file, old_file, allow_overwrite=True)
self.filebrowser_post_upload.send(sender=request, path=request.POST.get('folder'), file=FileObject(smart_unicode(file_name), site=self))
# let Ajax Upload know whether we saved it or not
ret_json = {'success': True, 'filename': filedata.name}
return HttpResponse(json.dumps(ret_json))
storage = DefaultStorage()
storage.location = MEDIA_ROOT
storage.base_url = MEDIA_URL
# Default FileBrowser site
site = FileBrowserSite(name='filebrowser', storage=storage)
# Default actions
from actions import *
site.add_action(flip_horizontal)
site.add_action(flip_vertical)
site.add_action(rotate_90_clockwise)
site.add_action(rotate_90_counterclockwise)
site.add_action(rotate_180)
|
|
"""
Parser for the datashape grammar.
"""
from __future__ import absolute_import, division, print_function
from . import lexer, error
# TODO: Remove coretypes dependency, make 100% of interaction through
# the type symbol table
from . import coretypes
__all__ = ['parse']
class DataShapeParser(object):
"""A DataShape parser object."""
def __init__(self, ds_str, sym):
# The datashape string being parsed
self.ds_str = ds_str
# Symbol tables for dimensions, dtypes, and type constructors for each
self.sym = sym
# The lexer
self.lex = lexer.lex(ds_str)
# The array of tokens self.lex has already produced
self.tokens = []
# The token currently being examined, and
# the end position, set when self.lex is exhausted
self.pos = -1
self.end_pos = None
# Advance to the first token
self.advance_tok()
def advance_tok(self):
"""Advances self.pos by one, if it is not already at the end."""
if self.pos != self.end_pos:
self.pos = self.pos + 1
try:
# If self.pos has not been backtracked,
# we need to request a new token from the lexer
if self.pos >= len(self.tokens):
self.tokens.append(next(self.lex))
except StopIteration:
# Create an EOF token, whose span starts at the
# end of the last token to use for error messages
if len(self.tokens) > 0:
span = (self.tokens[self.pos-1].span[1],)*2
else:
span = (0, 0)
self.tokens.append(lexer.Token(None, None, span, None))
self.end_pos = self.pos
@property
def tok(self):
return self.tokens[self.pos]
def raise_error(self, errmsg):
raise error.DataShapeSyntaxError(self.tok.span[0], '<nofile>',
self.ds_str, errmsg)
def parse_homogeneous_list(self, parse_item, sep_tok_id, errmsg,
trailing_sep=False):
"""
<item>_list : <item> <SEP> <item>_list
| <item>
Returns a list of <item>s, or None.
"""
saved_pos = self.pos
# Parse zero or more "<item> <SEP>" repetitions
items = []
item = True
while item is not None:
# Parse the <item>
item = parse_item()
if item is not None:
items.append(item)
if self.tok.id == sep_tok_id:
# If a <SEP> is next, there are more items
self.advance_tok()
else:
# Otherwise we've reached the end
return items
else:
if len(items) > 0:
if trailing_sep:
return items
else:
# If we already saw "<item> <SEP>" at least once,
# we can point at the more specific position within
# the list of <item>s where the error occurred
self.raise_error(errmsg)
else:
self.pos = saved_pos
return None
def syntactic_sugar(self, symdict, name, dshapemsg, error_pos=None):
"""
Looks up a symbol in the provided symbol table dictionary for
syntactic sugar, raising a standard error message if the symbol
is missing.
Parameters
----------
symdict : symbol table dictionary
One of self.sym.dtype, self.sym.dim,
self.sym.dtype_constr, or self.sym.dim_constr.
name : str
The name of the symbol to look up.
dshapemsg : str
The datashape construct this lookup is for, e.g.
'{...} dtype constructor'.
error_pos : int, optional
The position in the token stream at which to flag the error.
"""
entry = symdict.get(name)
if entry is not None:
return entry
else:
if error_pos is not None:
self.pos = error_pos
self.raise_error(('Symbol table missing "%s" ' +
'entry for %s') % (name, dshapemsg))
def parse_datashape(self):
"""
datashape : datashape_nooption
| QUESTIONMARK datashape_nooption
Returns a datashape object or None.
"""
if self.tok.id == lexer.QUESTIONMARK:
self.advance_tok()
saved_pos = self.pos
ds = self.parse_datashape_nooption()
if ds is not None:
# Look in the dtype symbol table for the option type constructor
option = self.syntactic_sugar(self.sym.dtype_constr, 'option',
'Option dtype construction',
saved_pos - 1)
return coretypes.DataShape(option(ds))
else:
return self.parse_datashape_nooption()
def parse_datashape_nooption(self):
"""
datashape_nooption : dim ASTERISK datashape
| dtype
Returns a datashape object or None.
"""
saved_pos = self.pos
# Try dim ASTERISK datashape
dim = self.parse_dim()
if dim is not None:
if self.tok.id == lexer.ASTERISK:
# If an asterisk is next, we're good
self.advance_tok()
saved_pos = self.pos
dshape = self.parse_datashape()
if dshape is None:
self.pos = saved_pos
self.raise_error('Expected a dim or a dtype')
return coretypes.DataShape(dim, *dshape.parameters)
# Try dtype
dtype = self.parse_dtype()
if dtype:
return coretypes.DataShape(dtype)
else:
return None
def parse_dim(self):
"""
dim : typevar
| ellipsis_typevar
| type
| type_constr
| INTEGER
| ELLIPSIS
typevar : NAME_UPPER
ellipsis_typevar : NAME_UPPER ELLIPSIS
type : NAME_LOWER
type_constr : NAME_LOWER LBRACKET type_arg_list RBRACKET
Returns a the dim object, or None.
TODO: Support type constructors
"""
saved_pos = self.pos
tok = self.tok
if tok.id == lexer.NAME_UPPER:
val = tok.val
self.advance_tok()
if self.tok.id == lexer.ELLIPSIS:
self.advance_tok()
# TypeVars ellipses are treated as the "ellipsis" dim
tconstr = self.syntactic_sugar(self.sym.dim_constr, 'ellipsis',
'TypeVar... dim constructor',
saved_pos)
return tconstr(val)
elif self.tok.id == lexer.ASTERISK:
# Using a lookahead check for '*' after the TypeVar, so that
# the error message would be about a dtype problem instead
# of a dim problem when 'typevar' isn't in the symbol table
#
# TypeVars are treated as the "typevar" dim
tconstr = self.syntactic_sugar(self.sym.dim_constr, 'typevar',
'TypeVar dim constructor',
saved_pos)
return tconstr(val)
else:
self.pos = saved_pos
return None
elif tok.id == lexer.NAME_LOWER:
name = tok.val
self.advance_tok()
if self.tok.id == lexer.LBRACKET:
dim_constr = self.sym.dim_constr.get(name)
if dim_constr is None:
self.pos = saved_pos
return None
self.advance_tok()
args = self.parse_type_arg_list()
if self.tok.id == lexer.RBRACKET:
self.advance_tok()
raise NotImplementedError(
'dim type constructors not actually supported yet')
else:
self.raise_error('Expected a closing "]"')
else:
dim = self.sym.dim.get(name)
if dim is not None:
return dim
else:
self.pos = saved_pos
return None
elif tok.id == lexer.INTEGER:
val = tok.val
self.advance_tok()
# If the token after the INTEGER is not ASTERISK,
# it cannot be a dim, so skip it
if self.tok.id != lexer.ASTERISK:
self.pos = saved_pos
return None
# Integers are treated as "fixed" dimensions
tconstr = self.syntactic_sugar(self.sym.dim_constr, 'fixed',
'integer dimensions')
return tconstr(val)
elif tok.id == lexer.ELLIPSIS:
self.advance_tok()
# Ellipses are treated as the "ellipsis" dim
dim = self.syntactic_sugar(self.sym.dim, 'ellipsis',
'... dim',
saved_pos)
return dim
else:
return None
def parse_dtype(self):
"""
dtype : typevar
| type
| type_constr
| struct_type
| funcproto_or_tuple_type
typevar : NAME_UPPER
ellipsis_typevar : NAME_UPPER ELLIPSIS
type : NAME_LOWER
type_constr : NAME_LOWER LBRACKET type_arg_list RBRACKET
struct_type : LBRACE ...
funcproto_or_tuple_type : LPAREN ...
Returns a the dtype object, or None.
"""
saved_pos = self.pos
tok = self.tok
if tok.id == lexer.NAME_UPPER:
val = tok.val
self.advance_tok()
# TypeVars are treated as the "typevar" dtype
tconstr = self.syntactic_sugar(self.sym.dtype_constr, 'typevar',
'TypeVar dtype constructor',
saved_pos)
return tconstr(val)
elif tok.id == lexer.NAME_LOWER:
name = tok.val
self.advance_tok()
if self.tok.id == lexer.LBRACKET:
dtype_constr = self.sym.dtype_constr.get(name)
if dtype_constr is None:
self.pos = saved_pos
return None
self.advance_tok()
args, kwargs = self.parse_type_arg_list()
if self.tok.id == lexer.RBRACKET:
if len(args) == 0 and len(kwargs) == 0:
self.raise_error('Expected at least one type ' +
'constructor argument')
self.advance_tok()
return dtype_constr(*args, **kwargs)
else:
self.raise_error('Invalid type constructor argument')
else:
dtype = self.sym.dtype.get(name)
if dtype is not None:
return dtype
else:
self.pos = saved_pos
return None
elif tok.id == lexer.LBRACE:
return self.parse_struct_type()
elif tok.id == lexer.LPAREN:
return self.parse_funcproto_or_tuple_type()
else:
return None
def parse_type_arg_list(self):
"""
type_arg_list : type_arg COMMA type_arg_list
| type_kwarg_list
| type_arg
type_kwarg_list : type_kwarg COMMA type_kwarg_list
| type_kwarg
Returns a tuple (args, kwargs), or (None, None).
"""
# Parse zero or more "type_arg COMMA" repetitions
args = []
arg = True
while arg is not None:
# Parse the type_arg
arg = self.parse_type_arg()
if arg is not None:
if self.tok.id == lexer.COMMA:
# If a comma is next, there are more args
self.advance_tok()
args.append(arg)
else:
# Otherwise we've reached the end, and there
# were no keyword args
args.append(arg)
return (args, {})
else:
break
kwargs = self.parse_homogeneous_list(self.parse_type_kwarg, lexer.COMMA,
'Expected another keyword argument, ' +
'positional arguments cannot follow ' +
'keyword arguments')
return (args, dict(kwargs) if kwargs else {})
def parse_type_arg(self):
"""
type_arg : datashape
| INTEGER
| STRING
| list_type_arg
list_type_arg : LBRACKET RBRACKET
| LBRACKET datashape_list RBRACKET
| LBRACKET integer_list RBRACKET
| LBRACKET string_list RBRACKET
Returns a type_arg value, or None.
"""
ds = self.parse_datashape()
if ds is not None:
return ds
if self.tok.id in [lexer.INTEGER, lexer.STRING]:
val = self.tok.val
self.advance_tok()
return val
elif self.tok.id == lexer.LBRACKET:
self.advance_tok()
val = self.parse_datashape_list()
if val is None:
val = self.parse_integer_list()
if val is None:
val = self.parse_string_list()
if self.tok.id == lexer.RBRACKET:
self.advance_tok()
return [] if val is None else val
else:
if val is None:
self.raise_error('Expected a type constructor argument ' +
'or a closing "]"')
else:
self.raise_error('Expected a "," or a closing "]"')
else:
return None
def parse_type_kwarg(self):
"""
type_kwarg : NAME_LOWER EQUAL type_arg
Returns a (name, type_arg) tuple, or None.
"""
if self.tok.id != lexer.NAME_LOWER:
return None
saved_pos = self.pos
name = self.tok.val
self.advance_tok()
if self.tok.id != lexer.EQUAL:
self.pos = saved_pos
return None
self.advance_tok()
arg = self.parse_type_arg()
if arg is not None:
return (name, arg)
else:
# After "NAME_LOWER EQUAL", a type_arg is required.
self.raise_error('Expected a type constructor argument')
def parse_datashape_list(self):
"""
datashape_list : datashape COMMA datashape_list
| datashape
Returns a list of datashape type objects, or None.
"""
return self.parse_homogeneous_list(self.parse_datashape, lexer.COMMA,
'Expected another datashape, ' +
'type constructor parameter ' +
'lists must have uniform type')
def parse_integer(self):
"""
integer : INTEGER
"""
if self.tok.id == lexer.INTEGER:
val = self.tok.val
self.advance_tok()
return val
else:
return None
def parse_integer_list(self):
"""
integer_list : INTEGER COMMA integer_list
| INTEGER
Returns a list of integers, or None.
"""
return self.parse_homogeneous_list(self.parse_integer, lexer.COMMA,
'Expected another integer, ' +
'type constructor parameter ' +
'lists must have uniform type')
def parse_string(self):
"""
string : STRING
"""
if self.tok.id == lexer.STRING:
val = self.tok.val
self.advance_tok()
return val
else:
return None
def parse_string_list(self):
"""
string_list : STRING COMMA string_list
| STRING
Returns a list of strings, or None.
"""
return self.parse_homogeneous_list(self.parse_string, lexer.COMMA,
'Expected another string, ' +
'type constructor parameter ' +
'lists must have uniform type')
def parse_struct_type(self):
"""
struct_type : LBRACE struct_field_list RBRACE
| LBRACE struct_field_list COMMA RBRACE
Returns a struct type, or None.
"""
if self.tok.id != lexer.LBRACE:
return None
saved_pos = self.pos
self.advance_tok()
fields = self.parse_homogeneous_list(self.parse_struct_field, lexer.COMMA,
'Invalid field in struct',
trailing_sep=True) or []
if self.tok.id != lexer.RBRACE:
self.raise_error('Invalid field in struct')
self.advance_tok()
# Split apart the names and types into separate lists,
# compatible with type constructor parameters
names = [f[0] for f in fields]
types = [f[1] for f in fields]
# Structs are treated as the "struct" dtype
tconstr = self.syntactic_sugar(self.sym.dtype_constr, 'struct',
'{...} dtype constructor', saved_pos)
return tconstr(names, types)
def parse_struct_field(self):
"""
struct_field : struct_field_name COLON datashape
struct_field_name : NAME_LOWER
| NAME_UPPER
| NAME_OTHER
| STRING
Returns a tuple (name, datashape object) or None
"""
if self.tok.id not in [lexer.NAME_LOWER, lexer.NAME_UPPER,
lexer.NAME_OTHER, lexer.STRING]:
return None
name = self.tok.val
self.advance_tok()
if self.tok.id != lexer.COLON:
self.raise_error('Expected a ":" separating the field ' +
'name from its datashape')
self.advance_tok()
ds = self.parse_datashape()
if ds is None:
self.raise_error('Expected the datashape of the field')
return (name, ds)
def parse_funcproto_or_tuple_type(self):
"""
funcproto_or_tuple_type : tuple_type RARROW datashape
| tuple_type
tuple_type : LPAREN tuple_item_list RPAREN
| LPAREN tuple_item_list COMMA RPAREN
| LPAREN RPAREN
tuple_item_list : datashape COMMA tuple_item_list
| datashape
Returns a tuple type object, a function prototype, or None.
"""
if self.tok.id != lexer.LPAREN:
return None
saved_pos = self.pos
self.advance_tok()
dshapes = self.parse_homogeneous_list(self.parse_datashape, lexer.COMMA,
'Invalid datashape in tuple',
trailing_sep=True)
if dshapes is None and self.tok.id == lexer.RPAREN:
self.raise_error('At least one datashape is required in ' +
'a tuple datashape')
if self.tok.id != lexer.RPAREN:
self.raise_error('Invalid datashape in tuple')
self.advance_tok()
if self.tok.id != lexer.RARROW:
# Tuples are treated as the "tuple" dtype
tconstr = self.syntactic_sugar(self.sym.dtype_constr, 'tuple',
'(...) dtype constructor', saved_pos)
return tconstr(dshapes)
else:
# Get the return datashape after the right arrow
self.advance_tok()
ret_dshape = self.parse_datashape()
if ret_dshape is None:
self.raise_error('Expected function prototype return ' +
'datashape')
# Function Prototypes are treated as the "funcproto" dtype
tconstr = self.syntactic_sugar(self.sym.dtype_constr, 'funcproto',
'(...) -> ... dtype constructor',
saved_pos)
return tconstr(dshapes, ret_dshape)
def parse(ds_str, sym):
"""Parses a single datashape from a string.
Parameters
----------
ds_str : string
The datashape string to parse.
sym : TypeSymbolTable
The symbol tables of dimensions, dtypes, and type constructors for each.
"""
dsp = DataShapeParser(ds_str, sym)
ds = dsp.parse_datashape()
# If no datashape could be found
if ds is None:
dsp.raise_error('Invalid datashape')
# Make sure there's no garbage at the end
if dsp.pos != dsp.end_pos:
dsp.raise_error('Unexpected token in datashape')
return ds
|
|
"""
Creates a shapefile with a given root name using data from given
X, Y, Z, and value arrays (curvilinear-type data). The shapes
are quadrilaterals derived from the X and Y arrays which can have
varying Z coordinates; i.e., vertices are the (i,j), (i,j+1),
(i+1,j+1), and (i+1,j) elements of the X, Y, and Z coordinates.
The value associated with each quadrilateral comes from the value
array; i.e., the (i,j) element of the value for the previously
mentioned quadrilateral. Quadrilaterals associated with missing
values are omitted from the shapefile.
"""
from __future__ import print_function
import shapefile
import pyferret
import pyferret.fershp
def ferret_init(efid):
"""
Initialization for the shapefile_writexyzval PyEF
"""
retdict = { "numargs": 7,
"descript": "Writes a shapefile of XY quadrilaterals with Z coordinates from the curvilinear data arrays.",
"restype": pyferret.FLOAT_ARRAY,
"axes": ( pyferret.AXIS_ABSTRACT,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST, ),
"argnames": ( "SHAPEFILE", "GRIDX", "GRIDY", "GRIDZ", "VALUE", "VALNAME", "MAPPRJ"),
"argdescripts": ( "Shapefile name (any extension given is ignored)",
"X values (longitudes) for the quad. grid; must be 2D on X and Y axes",
"Y values (latitudes) for the quad. grid; must be 2D on X and Y axes",
"Z values (levels) for the quad. grid; must be 2D on X and Y axes",
"Shape values; must be 2D on X and Y axes",
"Name for the shape value",
"Common name or WKT description of map projection; " \
"if blank, WGS 84 is used", ),
"argtypes": ( pyferret.STRING_ONEVAL,
pyferret.FLOAT_ARRAY,
pyferret.FLOAT_ARRAY,
pyferret.FLOAT_ARRAY,
pyferret.FLOAT_ARRAY,
pyferret.STRING_ONEVAL,
pyferret.STRING_ONEVAL, ),
"influences": ( (False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False),
(False, False, False, False, False, False), ),
}
return retdict
def ferret_result_limits(efid):
"""
Abstract axis limits for the shapefile_writexyzval PyEF
"""
return ( (1, 1), None, None, None, None, None, )
def ferret_compute(efid, result, resbdf, inputs, inpbdfs):
"""
Create the shapefile named in inputs[0] using the grid X coordinates
given in inputs[1], grid Y coordinates given in inputs[2], grid Z
coordinates given in inputs[3], and shape values given in inputs[4].
The X, Y, and Z coordinates are used for the quadrilaterals vertices
and must have an additional value along each dimension. The value
[i, j] is used for the quadrilateral with diagonal corners [i, j] and
[i+1, j+1]. Quadrilateral associated with missing values are omitted
from the shapefile. The field name for the value in the shapefile
given in inputs[5]. Either a common name or a WKT description of the
map projection for the coordinates should be given in inputs[6]. If
blank, WGS 84 is used. If successful, fills result (which might as
well be a 1x1x1x1 array) with zeros. If a problem occurs, an error
will be raised.
"""
shapefile_name = inputs[0]
grid_xs = inputs[1]
grid_ys = inputs[2]
grid_zs = inputs[3]
grid_vals = inputs[4]
missing_val = inpbdfs[4]
field_name = inputs[5].strip()
if not field_name:
field_name = "VALUE"
map_projection = inputs[6]
# Verify the shapes are as expected
if (grid_vals.shape[2] != 1) or (grid_vals.shape[3] != 1) or \
(grid_vals.shape[4] != 1) or (grid_vals.shape[5] != 1):
raise ValueError("The Z, T, E, and F axes of VALUE must be undefined or singleton axes")
exp_shape = ( grid_vals.shape[0] + 1, grid_vals.shape[1] + 1, 1, 1, 1, 1 )
if (grid_xs.shape != exp_shape) or \
(grid_ys.shape != exp_shape) or \
(grid_zs.shape != exp_shape):
raise ValueError('GRIDX, GRIDY, and GRIDZ must have one more value ' \
'along both X and Y axes compared to VALUE')
# Create polygons with a single field value
sfwriter = shapefile.Writer(shapefile.POLYGONZ)
sfwriter.field(field_name, "N", 20, 7)
# Add the shapes with their values
shape_written = False
for j in range(grid_vals.shape[1]):
for i in range(grid_vals.shape[0]):
if grid_vals[i, j, 0, 0, 0, 0] != missing_val:
shape_written = True
pyferret.fershp.addquadxyvalues(sfwriter,
(grid_xs[i, j, 0, 0, 0, 0], grid_ys[i, j, 0, 0, 0, 0]),
(grid_xs[i, j+1, 0, 0, 0, 0], grid_ys[i, j+1, 0, 0, 0, 0]),
(grid_xs[i+1, j+1, 0, 0, 0, 0], grid_ys[i+1, j+1, 0, 0, 0, 0]),
(grid_xs[i+1, j, 0, 0, 0, 0], grid_ys[i+1, j, 0, 0, 0, 0]),
( grid_zs[i, j, 0, 0, 0, 0],
grid_zs[i, j+1, 0, 0, 0, 0],
grid_zs[i+1, j+1, 0, 0, 0, 0],
grid_zs[i+1, j, 0, 0, 0, 0] ),
[ float(grid_vals[i, j, 0, 0, 0, 0]) ])
if not shape_written:
raise ValueError("All values are missing values")
sfwriter.save(shapefile_name)
# Create the .prj file from the map projection common name or the WKT description
pyferret.fershp.createprjfile(map_projection, shapefile_name)
result[:, :, :, :, :, :] = 0
#
# The following is only for testing this module from the command line
#
if __name__ == "__main__":
import numpy
import numpy.random
import os
shapefilename = "tripolarwz"
fieldname = "AREA"
wgs84_descript = 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]'
# real world longitudes and latitudes of tripolar coordinates X=80W:60E:10 + 100E:120W:10,Y=45N:85N:5
geolon_c = numpy.array([
[ -92.1, -87.7, -82.4, -75.7, -66.7, -53.8, -34.9, -10.0, 14.9, 33.8,
46.7, 55.7, 62.4, 67.7, 72.1, 87.9, 92.3, 97.6, 104.3, 113.3,
126.2, 145.1, 170.0, 194.9, 213.8, 226.7, 235.7, 242.4, 247.7, 252.1, 267.9, ],
[ -86.0, -78.5, -70.2, -60.9, -50.2, -38.1, -24.5, -10.0, 4.5, 18.1,
30.2, 40.9, 50.2, 58.5, 66.0, 94.0, 101.5, 109.8, 119.1, 129.8,
141.9, 155.5, 170.0, 184.5, 198.1, 210.2, 220.9, 230.2, 238.5, 246.0, 274.0, ],
[ -82.3, -73.1, -63.6, -53.7, -43.3, -32.5, -21.4, -10.0, 1.4, 12.5,
23.3, 33.7, 43.6, 53.1, 62.3, 97.7, 106.9, 116.4, 126.3, 136.7,
147.5, 158.6, 170.0, 181.4, 192.5, 203.3, 213.7, 223.6, 233.1, 242.3, 277.7, ],
[ -80.5, -70.6, -60.7, -50.7, -40.6, -30.5, -20.3, -10.0, 0.3, 10.5,
20.6, 30.7, 40.7, 50.6, 60.5, 99.5, 109.4, 119.3, 129.3, 139.4,
149.5, 159.7, 170.0, 180.3, 190.5, 200.6, 210.7, 220.7, 230.6, 240.5, 279.5, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, 280.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, 280.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, 280.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, 280.0, ],
[ -80.0, -70.0, -60.0, -50.0, -40.0, -30.0, -20.0, -10.0, 0.0, 10.0,
20.0, 30.0, 40.0, 50.0, 60.0, 100.0, 110.0, 120.0, 130.0, 140.0,
150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 210.0, 220.0, 230.0, 240.0, 280.0, ],
], dtype=numpy.float64)
geolon_c = geolon_c.T[:, :, numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis]
geolat_c = numpy.array([
[ 71.85, 74.69, 77.25, 79.54, 81.58, 83.30, 84.53, 85.00, 84.53, 83.30,
81.58, 79.54, 77.25, 74.69, 71.85, 71.85, 74.69, 77.25, 79.54, 81.58,
83.30, 84.53, 85.00, 84.53, 83.30, 81.58, 79.54, 77.25, 74.69, 71.85, 71.85, ],
[ 70.51, 72.81, 74.83, 76.56, 77.99, 79.08, 79.76, 80.00, 79.76, 79.08,
77.99, 76.56, 74.83, 72.81, 70.51, 70.51, 72.81, 74.83, 76.56, 77.99,
79.08, 79.76, 80.00, 79.76, 79.08, 77.99, 76.56, 74.83, 72.81, 70.51, 70.51, ],
[ 68.71, 70.29, 71.67, 72.83, 73.76, 74.44, 74.86, 75.00, 74.86, 74.44,
73.76, 72.83, 71.67, 70.29, 68.71, 68.71, 70.29, 71.67, 72.83, 73.76,
74.44, 74.86, 75.00, 74.86, 74.44, 73.76, 72.83, 71.67, 70.29, 68.71, 68.71, ],
[ 66.80, 67.60, 68.30, 68.90, 69.37, 69.72, 69.93, 70.00, 69.93, 69.72,
69.37, 68.90, 68.30, 67.60, 66.80, 66.80, 67.60, 68.30, 68.90, 69.37,
69.72, 69.93, 70.00, 69.93, 69.72, 69.37, 68.90, 68.30, 67.60, 66.80, 66.80, ],
[ 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00,
65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00,
65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, 65.00, ],
[ 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00,
60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00,
60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, 60.00, ],
[ 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00,
55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00,
55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, 55.00, ],
[ 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00,
50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00,
50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, 50.00, ],
[ 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00,
45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00,
45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, 45.00, ],
], dtype=numpy.float64)
geolat_c = geolat_c.T[:, :, numpy.newaxis, numpy.newaxis, numpy.newaxis, numpy.newaxis]
# Just create random elevations in [0.0,100.0)
levels = 100.0 * numpy.random.rand(*(geolon_c.shape))
levels = numpy.array(levels, dtype=numpy.float64, order='F')
# Make the value an approximate sphere surface area (in square degrees) of the quadrilateral
vals = geolon_c[:-1, :-1] * geolat_c[:-1, 1:]
vals -= geolon_c[:-1, 1:] * geolat_c[:-1, :-1]
vals += geolon_c[:-1, 1:] * geolat_c[ 1:, 1:]
vals -= geolon_c[ 1:, 1:] * geolat_c[:-1, 1:]
vals += geolon_c[ 1:, 1:] * geolat_c[ 1:, :-1]
vals -= geolon_c[ 1:, :-1] * geolat_c[ 1:, 1:]
vals += geolon_c[ 1:, :-1] * geolat_c[:-1, :-1]
vals -= geolon_c[:-1, :-1] * geolat_c[ 1:, :-1]
vals = 0.5 * numpy.fabs(vals)
vals *= numpy.cos( 0.25 * numpy.deg2rad(geolat_c[:-1, :-1] + \
geolat_c[:-1, 1:] + \
geolat_c[ 1:, 1:] + \
geolat_c[ 1:, :-1]) )
# make sure these calls do not generate errors
info = ferret_init(0)
del info
limits = ferret_result_limits(0)
del limits
# check that ferret_compute does not cause any errors
resbdf = numpy.array([-99999.0], dtype=numpy.float64)
inpbdfs = numpy.array([-88888.0, -77777.0, -66666.0, -55555.0, -44444.0, -33333.0, -22222.0], dtype=numpy.float64)
result = numpy.ones((1,1,1,1,1,1), dtype=numpy.float64)
ferret_compute(0, result, resbdf, (shapefilename, geolon_c, geolat_c, levels, vals, fieldname, ""), inpbdfs)
if result[0,0,0,0,0,0] != 0.0:
raise ValueError("ferret_compute result array value: expected 0.0, found %f" % result[0,0,0,0,0,0])
# create the expected arrays returned from shapefile.Reader
# as well as from shapefile_readxyz and shapefile_readxyzval
exppoints = []
expzs = []
expvals = []
expcurvals = [ [], [], [], [] ]
for j in range(vals.shape[1]):
for i in range(vals.shape[0]):
# add the values expected to be returned from shapefile.Reader
exppoints.append( numpy.array([ [ geolon_c[i, j, 0, 0, 0, 0],
geolat_c[i, j, 0, 0, 0, 0] ],
[ geolon_c[i+1, j, 0, 0, 0, 0],
geolat_c[i+1, j, 0, 0, 0, 0] ],
[ geolon_c[i+1, j+1, 0, 0, 0, 0],
geolat_c[i+1, j+1, 0, 0, 0, 0] ],
[ geolon_c[i, j+1, 0, 0, 0, 0],
geolat_c[i, j+1, 0, 0, 0, 0] ],
[ geolon_c[i, j, 0, 0, 0, 0],
geolat_c[i, j, 0, 0, 0, 0] ] ]) )
expzs.append( numpy.array([ levels[i, j, 0, 0, 0, 0],
levels[i+1, j, 0, 0, 0, 0],
levels[i+1, j+1, 0, 0, 0, 0],
levels[i, j+1, 0, 0, 0, 0],
levels[i, j, 0, 0, 0, 0] ]) )
expvals.append(vals[i, j, 0, 0, 0, 0])
# add the expected values returned from shapefile_readxyz and shapefile_readxyzval
expcurvals[0].extend(exppoints[-1][:,0])
expcurvals[0].append(resbdf[0])
expcurvals[1].extend(exppoints[-1][:,1])
expcurvals[1].append(resbdf[0])
expcurvals[2].extend(expzs[-1])
expcurvals[2].append(resbdf[0])
expcurvals[3].append(expvals[-1])
# shapefile_readxyz and shapefile_readxyzval return numpy.float64 arrays
expcurvals[0] = numpy.array(expcurvals[0], dtype=numpy.float64)
expcurvals[1] = numpy.array(expcurvals[1], dtype=numpy.float64)
expcurvals[2] = numpy.array(expcurvals[2], dtype=numpy.float64)
expcurvals[3] = numpy.array(expcurvals[3], dtype=numpy.float64)
# check the values read using shapefile.Reader
sfreader = shapefile.Reader(shapefilename)
shapes = sfreader.shapes()
records = sfreader.records()
explen = vals.shape[0] * vals.shape[1]
if len(shapes) != explen:
raise ValueError("Expected %d shapes; found %d" % (explen, len(shapes)))
if len(records) != explen:
raise ValueError("Expected %d records; found %d" % (explen, len(records)))
# this does not assume any order that the shapes were written
for (shape, record) in zip(shapes, records):
for k in range(len(exppoints)):
if numpy.allclose(shape.points, exppoints[k], rtol=1.0E-4):
break
else:
raise ValueError("Unexpected X,Y vertices %s" % str(shape.points))
if not numpy.allclose(shape.z, expzs[k], rtol=1.0E-4, atol=1.0E-5):
raise ValueError("Expected Zs %s; found %s for shape.points %s" % \
(str(expzs[k]), str(shape.z), str(shape.points)))
if not numpy.allclose(record, expvals[k], rtol=1.0E-4):
raise ValueError("Expected value %s; found %s for shape.points %s" % \
(str(expvals[k]), str(record), str(shape.points)))
junk = exppoints.pop(k)
junk = expzs.pop(k)
junk = expvals.pop(k)
prjfile = file("%s.prj" % shapefilename, "r")
datalines = prjfile.readlines()
prjfile.close()
if len(datalines) != 1:
raise ValueError("Number of lines in the .prj file: expected: 1, found %d" % len(datalines))
descript = datalines[0].strip()
if descript != wgs84_descript:
raise ValueError("Description in the .prj file:\n" \
" expect: %s\n" \
" found: %s" % (wgs84_descript, descript))
print("shapefile_writexyzval: SUCCESS")
# Check the result for calling ferret_compute of shapefile_readxyz
# in this directory. This assumes the ordering of the shapes does
# not change, which appears to be the case but is not required.
import shapefile_readxyz
maxpts = len(expcurvals[0])
result = -11111.0 * numpy.ones((maxpts, 3, 1, 1, 1, 1), dtype=numpy.float64, order='F')
shapefile_readxyz.ferret_compute(0, result, resbdf, (shapefilename, maxpts), inpbdfs[:2])
if not numpy.allclose(result[:,0,0,0,0,0], expcurvals[0], rtol=1.0E-4):
raise ValueError("Xs from shapefile_readxyz:\n expected\n%s\n found\n%s" % \
(str(expcurvals[0]), str(result[:,0,0,0,0,0])))
if not numpy.allclose(result[:,1,0,0,0,0], expcurvals[1], rtol=1.0E-4):
raise ValueError("Ys from shapefile_readxyz:\n expected\n%s\n found\n%s" % \
(str(expcurvals[1]), str(result[:,1,0,0,0,0])))
if not numpy.allclose(result[:,2,0,0,0,0], expcurvals[2], rtol=1.0E-4, atol=1.0E-5):
raise ValueError("Zs from shapefile_readxyz:\n expected\n%s\n found\n%s" % \
(str(expcurvals[2]), str(result[:,2,0,0,0,0])))
print("shapefile_readxyz: SUCCESS")
# Check the result for calling ferret_compute of shapefile_readxyzval
# in this directory. This assumes the ordering of the shapes does
# not change, which appears to be the case but is not required.
import shapefile_readxyzval
result = -11111.0 * numpy.ones((maxpts, 4, 1, 1, 1, 1), dtype=numpy.float64, order='F')
shapefile_readxyzval.ferret_compute(0, result, resbdf, (shapefilename, fieldname, maxpts), inpbdfs[:3])
if not numpy.allclose(result[:,0,0,0,0,0], expcurvals[0], rtol=1.0E-4):
raise ValueError("Xs from shapefile_readxyzval:\n expected\n%s\n found\n%s" % \
(str(expcurvals[0]), str(result[:,0,0,0,0,0])))
if not numpy.allclose(result[:,1,0,0,0,0], expcurvals[1], rtol=1.0E-4):
raise ValueError("Ys from shapefile_readxyzval:\n expected\n%s\n found\n%s" % \
(str(expcurvals[1]), str(result[:,1,0,0,0,0])))
if not numpy.allclose(result[:,2,0,0,0,0], expcurvals[2], rtol=1.0E-4, atol=1.0E-5):
raise ValueError("Zs from shapefile_readxyzval:\n expected\n%s\n found\n%s" % \
(str(expcurvals[2]), str(result[:,2,0,0,0,0])))
numvals = len(expcurvals[3])
if not numpy.allclose(result[:numvals,3,0,0,0,0], expcurvals[3], rtol=1.0E-4):
raise ValueError("Values from shapefile_readxyzval:\n expected\n%s\n found\n%s" % \
(str(expcurvals[3]), str(result[:numvals,3,0,0,0,0])))
if not numpy.allclose(result[numvals:,3,0,0,0,0], resbdf, rtol=1.0E-4):
raise ValueError("Extra values from shapefile_readxyzval: expected all %s\n found\n%s" % \
(str(float(resbdf[0])), str(result[numvals:,3,0,0,0,0])))
print("shapefile_readxyzval: SUCCESS")
os.remove("%s.dbf" % shapefilename)
os.remove("%s.shp" % shapefilename)
os.remove("%s.shx" % shapefilename)
os.remove("%s.prj" % shapefilename)
|
|
#!/usr/bin/python
#given a fastq location, create a .sh script to run mapping through generation of sorted bam
'''
The MIT License (MIT)
Copyright (c) 2013 Charles Lin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
#===================================================================
#========================MODULES AND DEPENDENCIES===================
#===================================================================
import string
import random
import utils
#===================================================================
#==========================GLOBAL PARAMETERS========================
#===================================================================
#command arguments
bowtieString = 'bowtie2'
samtoolsString = 'samtools'
tempParentFolder = '/grail/BOWTIE_TEMP/'
fastqcString = '/usr/local/FastQC/fastqc'
fastqDelimiter = '::'
#tempParentFolder = '/mnt/d0-0/share/bradnerlab/projects/anna/BOWTIE_TEMP/'
#===================================================================
#=============================FUNCTIONS=============================
#===================================================================
def stripExtension(fileName):
'''
tries to strip the extension of a filename
can strip .tar.gz, .txt, .fastq,.gz,.zip
'''
extensionList = ['.tar.gz','.tar', '.txt','.fastq','.fasta','.gz','.zip']
for extension in extensionList:
fileName = fileName.replace(extension,'')
return fileName
#print stripExtension('CGATGT-s_8_1_sequence.txt')
def makeFileNameDict(fastqFile,genome,tempString,tempParentFolder,finalFolder,linkFolder,uniqueID='',pairedEnd = False):
'''
creates a dictionary w/ all filenames
'''
fileNameDict = {}
if pairedEnd:
fastqFileList = fastqFile.split(fastqDelimiter)
fileNameDict['fastqFile_1'] = fastqFileList[0]
fileNameDict['fastqFile_2'] = fastqFileList[1]
else:
fileNameDict['fastqFile'] = fastqFile
if uniqueID == '':
fastqName = fastqFile.split('/')[-1]
fastqName = stripExtension(fastqName)
else:
fastqName = uniqueID
fileNameDict['fastqName'] = fastqName
#make the temp Folder
tempFolder = tempParentFolder + 'bwt2_' + fastqName + tempString + '/'
fileNameDict['tempFolder'] = tempFolder
if pairedEnd:
tempFastqFile1 = tempFolder + fastqName + '_1.rawFastq'
fileNameDict['tempFastqFile_1'] = tempFastqFile1
tempFastqFile2 = tempFolder + fastqName + '_2.rawFastq'
fileNameDict['tempFastqFile_2'] = tempFastqFile2
else:
tempFastqFile = tempFolder + fastqName + '.rawFastq'
fileNameDict['tempFastqFile'] = tempFastqFile
tempSamFile = tempFolder + fastqName + '.sam'
fileNameDict['tempSamFile'] = tempSamFile
tempBamFile = tempFolder + fastqName + '.bam'
fileNameDict['tempBamFile'] = tempBamFile
tempSortedBamFile = tempFolder + fastqName + '.%s.bwt2.sorted' % (genome)
fileNameDict['tempSortedBamFile'] = tempSortedBamFile
sortedSamFile = fastqName + '.%s.bwt2.sorted.sam' % (genome)
fileNameDict['sortedSamFile'] = sortedSamFile
groupHeader = tempFolder + fastqName + '.%s.bwt2' % (genome)
fileNameDict['groupHeader'] = groupHeader
fileNameDict['finalFolder'] = finalFolder
fileNameDict['linkFolder'] = linkFolder
return fileNameDict
def extractFastqCmd(fileNameDict,pairedEnd = False):
'''
creates a command to extract/copy the fastq to a temp location
'''
if pairedEnd:
fastqList = []
fastqFile1 = fileNameDict['fastqFile_1']
tempFastqFile1 = fileNameDict['tempFastqFile_1']
fastqList.append([fastqFile1,tempFastqFile1])
fastqFile2 = fileNameDict['fastqFile_2']
tempFastqFile2 = fileNameDict['tempFastqFile_2']
fastqList.append([fastqFile2,tempFastqFile2])
else:
fastqFile = fileNameDict['fastqFile']
tempFastqFile = fileNameDict['tempFastqFile']
fastqList = [[fastqFile,tempFastqFile]]
cmdList = []
for [fastqFile,tempFastqFile] in fastqList:
#there are 3 possibilities, a gzipped, tarballed, or naked fastq
if string.lower(fastqFile).count('tar.gz') == 1:
cmd = "tar --strip-components 5 --to-stdout -xzvf %s > %s" % (fastqFile,tempFastqFile)
elif string.lower(fastqFile).count('tar') == 1:
cmd = "tar -xzvf %s > %s" % (fastqFile,tempFastqFile)
elif string.lower(fastqFile.split('.')[-1]) == 'gz':
cmd = 'cp %s %s.gz\n' % (fastqFile,tempFastqFile)
cmd+= 'gunzip %s.gz' % (tempFastqFile)
else:
cmd = 'cp %s %s' % (fastqFile,tempFastqFile)
cmdList.append(cmd)
fullCmd = string.join(cmdList,'\n')
return fullCmd
def runFastQC(fastqcString,fileNameDict,pairedEnd = False):
'''
cmd to run fastqc
'''
if pairedEnd:
fastqName = fileNameDict['fastqName']
tempFastqFile1 = fileNameDict['tempFastqFile_1']
tempFastqFile2 = fileNameDict['tempFastqFile_2']
finalFolder = fileNameDict['finalFolder']
if finalFolder[-1] != '/':
finalFolder+='/'
finalFolder1 = finalFolder + '%s_1_fastqc' % (fastqName)
finalFolder2 = finalFolder + '%s_2_fastqc' % (fastqName)
cmd = 'mkdir %s\n' % (finalFolder1)
cmd += 'mkdir %s\n' % (finalFolder2)
cmd += '%s -o %s %s\n' % (fastqcString,finalFolder1,tempFastqFile1)
cmd += '%s -o %s %s' % (fastqcString,finalFolder2,tempFastqFile2)
else:
fastqName = fileNameDict['fastqName']
tempFastqFile = fileNameDict['tempFastqFile']
finalFolder = fileNameDict['finalFolder']
if finalFolder[-1] != '/':
finalFolder+='/'
finalFolder += '%s_fastqc' % (fastqName)
cmd = 'mkdir %s\n' % (finalFolder)
cmd += '%s -o %s %s' % (fastqcString,finalFolder,tempFastqFile)
return cmd
def bowtieCmd(bowtieString,paramString,bowtieIndex,fileNameDict,pairedEnd=False):
'''
creates the bowtie command call
'''
#calling bowtie
if pairedEnd:
tempFastqFile1 = fileNameDict['tempFastqFile_1']
tempFastqFile2 = fileNameDict['tempFastqFile_2']
tempSamFile = fileNameDict['tempSamFile']
cmd = "%s %s -x %s -1 %s -2 %s -S %s" % (bowtieString,paramString,bowtieIndex,tempFastqFile1,tempFastqFile2,tempSamFile)
else:
tempFastqFile = fileNameDict['tempFastqFile']
tempSamFile = fileNameDict['tempSamFile']
cmd = "%s %s -x %s -U %s -S %s" % (bowtieString,paramString,bowtieIndex,tempFastqFile,tempSamFile)
return cmd
def removeTempFastqCmd(fileNameDict,pairedEnd = False):
'''
removes the temp fastq
'''
if pairedEnd:
tempFastqFile1 = fileNameDict['tempFastqFile_1']
tempFastqFile2 = fileNameDict['tempFastqFile_2']
cmd = '/bin/rm -f %s\n' % (tempFastqFile1)
cmd += '/bin/rm -f %s' % (tempFastqFile2)
else:
tempFastqFile = fileNameDict['tempFastqFile']
cmd = '/bin/rm -f %s' % (tempFastqFile)
return cmd
#generate a bam file
def generateTempBamCmd(samtoolsString,fileNameDict):
'''
uses samtools to convert the sam to a bam
'''
tempSamFile = fileNameDict['tempSamFile']
tempBamFile = fileNameDict['tempBamFile']
cmd = "%s view -bS '%s' > '%s'" % (samtoolsString,tempSamFile,tempBamFile)
return cmd
#change into temp directory
def changeTempDir(fileNameDict):
'''
changes into the temp directory
'''
tempFolder = fileNameDict['tempFolder']
cmd = "cd %s" % (tempFolder)
return cmd
#sort
def sortBamCmd(samtoolsString,fileNameDict):
'''
uses smatools to sort the bam
'''
tempBamFile = fileNameDict['tempBamFile']
tempSortedBamFile = fileNameDict['tempSortedBamFile']
cmd = "%s sort '%s' '%s'" % (samtoolsString,tempBamFile,tempSortedBamFile)
return cmd
#index
def indexBamCmd(samtoolsString,fileNameDict):
'''
uses samtools to index the bam
'''
tempSortedBamFile=fileNameDict['tempSortedBamFile']
cmd = "%s index '%s.bam'" % (samtoolsString,tempSortedBamFile)
return cmd
def rmSamCmd(fileNameDict):
'''
remove the sam
'''
tempSamFile = fileNameDict['tempSamFile']
cmd = "/bin/rm -f '%s'" % (tempSamFile)
return cmd
def mvSamCmd(fileNameDict):
'''
rename and move the sam
'''
tempSamFile = fileNameDict['tempSamFile']
finalFolder = fileNameDict['finalFolder']
sortedSamFile=fileNameDict['sortedSamFile']
cmd = "mv %s %s%s" % (tempSamFile,finalFolder,sortedSamFile)
return cmd
def mvBamCmd(fileNameDict):
'''
moves and renames the bam w/o the temp string
'''
groupHeader = fileNameDict['groupHeader']
finalFolder = fileNameDict['finalFolder']
cmd = "mv %s* %s" % (groupHeader,finalFolder)
return cmd
def linkBamCmd(fileNameDict):
'''
moves and renames the bam w/o the temp string
'''
groupHeader = fileNameDict['groupHeader']
finalFolder = fileNameDict['finalFolder']
linkFolder = fileNameDict['linkFolder']
groupName = groupHeader.split('/')[-1]
cmd = "ln %s%s* %s" % (finalFolder,groupName,linkFolder)
return cmd
def rmTempFiles(fileNameDict):
'''
removes everything left in the temp folder
'''
groupHeader = fileNameDict['groupHeader']
cmd = "/bin/rm -f '%s*'" % (groupHeader)
return cmd
#===================================================================
#=============================MAIN==================================
#===================================================================
def main():
'''
main run function
'''
from optparse import OptionParser
usage = "usage: %prog [options] -f [FASTQFILE] -g [GENOME] -u [UNIQUEID] -o [OUTPUTFOLDER]"
parser = OptionParser(usage = usage)
#required flags
parser.add_option("-f","--fastq", dest="fastq",nargs = 1, default=None,
help = "Enter the full path of a fastq file to be mapped")
parser.add_option("-g","--genome",dest="genome",nargs =1, default = None,
help = "specify a genome, options are hg19,hg18, mm9 or geckov2 right now")
parser.add_option("-u","--unique",dest="unique",nargs =1, default = None,
help = "specify a uniqueID")
parser.add_option("-o","--output",dest="output",nargs =1, default = None,
help = "Specify an output folder")
#optional arguments
parser.add_option("--param",dest="paramString",nargs =1, default = '',
help = "A string of bowtie parameters")
parser.add_option("--link-folder",dest="linkFolder",nargs =1, default = None,
help = "Specify a folder to symlink the bam")
parser.add_option("-p","--paired",dest="paired",action='store_true',default = False,
help = "Flag for paired end data")
parser.add_option("-S","--sam",dest="sam",action='store_true',default = False,
help = "Flag to save sam")
parser.add_option("-q","--qc",dest="qc",action='store_true',default = False,
help = "Flag to run fastqc")
(options,args) = parser.parse_args()
if not options.fastq or not options.genome or not options.unique or not options.output:
parser.print_help()
exit()
#retrive the arguments
fastqFile = options.fastq
genome = string.lower(options.genome)
uniqueID = options.unique
outputFolder = options.output
#make the output folder
outputFolder = utils.formatFolder(outputFolder,True)
#retrieve optional arguments
paramString = options.paramString
if options.linkFolder:
linkFolder = options.linkFolder
else:
linkFolder =''
pairedEnd = options.paired
#get the bowtie index
bowtieDict = {
'mm9':'/raider/index/mm9/Bowtie2Index/genome',
'hg19':'/raider/index/hg19/Bowtie2Index/genome',
'hg18':'/grail/genomes/Homo_sapiens/human_gp_mar_06_no_random/bowtie/hg18',
'geckov2':'/grail/genomes/gecko/GeCKOv2/Sequence/Bowtie2Index/gecko',
'ribo':'/raider/temp/rDNA/hg19_45S_index/genome',
'hg19_ribo':'/grail/genomes/Homo_sapiens/UCSC/hg19/Sequence/Bowtie2Index_ribo/genome',
}
bowtieIndex = bowtieDict[string.lower(genome)]
#get the temp string
tempString = '_%s' % str(random.randint(1,10000))
fileNameDict = makeFileNameDict(fastqFile,genome,tempString,tempParentFolder,outputFolder,linkFolder,uniqueID,pairedEnd)
#open the bashfile to write to
bashFileName = "%s%s_bwt2.sh" % (outputFolder,uniqueID)
bashFile = open(bashFileName,'w')
#shebang
bashFile.write('#!/usr/bin/bash\n')
#make temp directory
cmd = 'mkdir %s' % (fileNameDict['tempFolder'])
bashFile.write(cmd+'\n')
#extract fastq
cmd = extractFastqCmd(fileNameDict,pairedEnd)
bashFile.write(cmd+'\n')
#call fastqc
if options.qc:
cmd =runFastQC(fastqcString,fileNameDict,pairedEnd)
bashFile.write(cmd+'\n')
#call bowtie
cmd = bowtieCmd(bowtieString,paramString,bowtieIndex,fileNameDict,pairedEnd)
bashFile.write(cmd+'\n')
#remove temp fastq
cmd = removeTempFastqCmd(fileNameDict,pairedEnd)
bashFile.write(cmd+'\n')
#generate a bam
cmd = generateTempBamCmd(samtoolsString,fileNameDict)
bashFile.write(cmd+'\n')
#change into the temp directory
cmd = changeTempDir(fileNameDict)
bashFile.write(cmd+'\n')
#sort the bam
cmd = sortBamCmd(samtoolsString,fileNameDict)
bashFile.write(cmd+'\n')
#index
cmd = indexBamCmd(samtoolsString,fileNameDict)
bashFile.write(cmd+'\n')
#remove sam
if not options.sam:
cmd = rmSamCmd(fileNameDict)
bashFile.write(cmd+'\n')
#or move the sam
if options.sam:
cmd = mvSamCmd(fileNameDict)
bashFile.write(cmd+'\n')
#mv bams
cmd = mvBamCmd(fileNameDict)
bashFile.write(cmd+'\n')
#link bams
if options.linkFolder:
cmd = linkBamCmd(fileNameDict)
bashFile.write(cmd+'\n')
#cleanup
cmd = rmTempFiles(fileNameDict)
bashFile.write(cmd+'\n')
bashFile.close()
print "Wrote mapping command to %s" % (bashFileName)
if __name__ == "__main__":
main()
|
|
import subprocess
from mozpackager.settings import BUILD_DIR, BUILD_LOG_DIR, MEDIA_PATH
import json
class Mock:
root = None
mock = '/usr/bin/mock'
_build_log_text = None
_error_log_text = None
def __init__(self, build_package):
"""
Perhaps pull these dynamically at some point
I've not seen this as necessary. Cross compilation
has worked just fine when building on x86_64
"""
self.arch = 'x86_64'
self.root = 'mozilla-6-x86_64'
self.build_package = build_package
self.mozpackage = build_package.mozilla_package
self.build_source = build_package.build_source
self.required_install_packages = [
'zeroinstall-injector',
'ruby-devel',
'python-devel',
'rubygems',
'python-setuptools',
'rubygem-fpm',
]
def build_mock(self, root=None, arch=None):
"""
Builds a mock based environment
example usage:
/usr/bin/mock --root=mozilla-6-x86_64 --arch=x86_64 --init
"""
scrub_mock = [
self.mock,
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--scrub=all']
output, errors = self._run_command(scrub_mock)
init_mock = [
self.mock,
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--init']
output, errors = self._run_command(init_mock)
print output, errors
"""
status = self._parse_build_status(errors)
Do something with status.
Not sure if it's even useful at this point
"""
def install_build_file(self):
build_file_content = self.build_package.generate_build_file_content()
fh = open('/tmp/build_package.sh', 'w')
fh.write(build_file_content)
fh.close()
output, errors = self._copyin('/tmp/build_package.sh', '/')
chmod_build_file = [
self.mock,
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--shell',
'chmod 755 /build_package.sh',
]
output, errors = self._run_command(chmod_build_file)
print output, errors
def _copyin(self, path, destination='/tmp/'):
cmd = [
self.mock,
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--copyin',
path,
destination
]
output, errors = self._run_command(cmd)
return output, errors
@property
def build_log(self):
"""
Cat the /tmp/log file
Store in the class variable _build_log_text This content won't
change, so we'll use self._build_log_text as a cache
"""
if not self._build_log_text:
self._build_log_text = self._cat('/tmp/log')
return self._build_log_text
@property
def error_log(self):
"""
Cat the /tmp/error file
Store in the class variable _error_log_text This content won't
change, so we'll use self._error_log_text as a cache
"""
if not self._error_log_text:
self._error_log_text = self._cat('/tmp/errors')
return self._error_log_text
@property
def build_path(self):
build_log = self.build_log
path = None
for line in build_log.split("\n"):
try:
obj = json.loads(line)
path = obj['path']
except:
pass
return path
@property
def build_message(self):
build_log = self.build_log
message = None
for line in build_log.split("\n"):
try:
obj = json.loads(line)
message = obj['message']
except:
pass
return message
def _cat(self, path):
cmd = [
self.mock,
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--shell',
'cat %s' % path,
]
output, errors = self._run_command(cmd)
return output
def copyout_built_package(self, path, destination):
self._copyout(path, destination)
def patch_arr_pm(self):
"""
Here we're copying in a new version of file.rb
Pretty hacky, but works in the interim until
the upstream version gets patched to function
on ruby < 1.9
"""
self._copyin('build_scripts/file.rb',
'/usr/lib/ruby/gems/1.8/gems/arr-pm-0.0.7/lib/arr-pm/file.rb')
self._copyin('build_scripts/rpm.rb',
'/usr/lib/ruby/gems/1.8/gems/fpm-0.4.24/lib/fpm/package/rpm.rb')
def copyin_source_file(self):
try:
upload_file = "%s/%s" % (MEDIA_PATH, self.build_source.build_source_file.source_file)
except:
upload_file = None
if upload_file and upload_file != '':
self._copyin(upload_file, '/')
def _copyout(self, path, destination='/tmp/'):
cmd = [
self.mock,
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--copyout',
path,
destination
]
output, errors = self._run_command(cmd)
return output, errors
def copyout_log(self):
try:
print "Copying out log"
self._copyout('/tmp/log', '%s/%s' % (BUILD_LOG_DIR, self.mozpackage.install_package_name))
output_log = open('%s/%s' % (BUILD_LOG_DIR, self.mozpackage.install_package_name), 'r').read()
except Exception, e:
output_log = ''
print "Exception on copying out /tmp/log"
print "%s" % (e)
return output_log
def copyout_error(self):
try:
print "Copying out error"
self._copyout('/tmp/errors', '%s/%s_error' % (BUILD_LOG_DIR, self.mozpackage.install_package_name))
output_log = open('%s/%s_error' % (BUILD_LOG_DIR, self.mozpackage.install_package_name), 'r').read()
except Exception, e:
output_log = ''
print "Exception on copying out /tmp/error"
print "%s" % (e)
return output_log
def install_packages(self, additional_packages = []):
if len(self.build_source.mozillabuildsourcesystemdependency_set.all()) > 0:
for dep in self.build_source.mozillabuildsourcesystemdependency_set.all():
additional_packages.append(dep.name)
else:
additional_packages = []
self._install_packages(self.required_install_packages + additional_packages)
def compile_package(self):
"""
He we'll copy in the file that will actually build the package
"""
build_package = [
self.mock,
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--shell',
'/build_package.sh',
]
output, errors = self._run_command(build_package)
def _install_packages(self, package_list):
"""
package_list is just for testing
"""
installed_count = 0
for package in package_list:
install = [
self.mock,
'-q',
'--root=%s' % self.root,
'--arch=%s' % self.arch,
'--install',
'%s' % package
]
"""
Lots of useless debugging
@TODO: Remove
"""
print "Installing Package %s" % package
output, errors = self._run_command(install)
print output, errors
installed_count += 1
"""
Lots of useless debugging
@TODO: Remove
"""
print output
print errors
def _run_command(self, command):
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, errors = p.communicate()
return output, errors
|
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-08-26 14:45
import logging
import math
import os
from abc import ABC, abstractmethod
from typing import Optional, List, Any, Dict
import numpy as np
import tensorflow as tf
import hanlp.utils
from hanlp_common.io import save_json, load_json
from hanlp.callbacks.fine_csv_logger import FineCSVLogger
from hanlp.common.component import Component
from hanlp.common.transform_tf import Transform
from hanlp.common.vocab_tf import VocabTF
from hanlp.metrics.chunking.iobes_tf import IOBES_F1_TF
from hanlp.optimizers.adamw import AdamWeightDecay
from hanlp.utils import io_util
from hanlp.utils.io_util import get_resource, tempdir_human
from hanlp.utils.log_util import init_logger, logger
from hanlp.utils.string_util import format_scores
from hanlp.utils.tf_util import format_metrics, size_of_dataset, summary_of_model, get_callback_by_class, NumpyEncoder
from hanlp.utils.time_util import Timer, now_datetime
from hanlp_common.reflection import str_to_type, classpath_of
from hanlp_common.structure import SerializableDict
from hanlp_common.util import merge_dict
class KerasComponent(Component, ABC):
def __init__(self, transform: Transform) -> None:
super().__init__()
self.meta = {
'class_path': classpath_of(self),
'hanlp_version': hanlp.version.__version__,
}
self.model: Optional[tf.keras.Model] = None
self.config = SerializableDict()
self.transform = transform
# share config with transform for convenience, so we don't need to pass args around
if self.transform.config:
for k, v in self.transform.config.items():
self.config[k] = v
self.transform.config = self.config
def evaluate(self, input_path: str, save_dir=None, output=False, batch_size=128, logger: logging.Logger = None,
callbacks: List[tf.keras.callbacks.Callback] = None, warm_up=True, verbose=True, **kwargs):
input_path = get_resource(input_path)
file_prefix, ext = os.path.splitext(input_path)
name = os.path.basename(file_prefix)
if not name:
name = 'evaluate'
if save_dir and not logger:
logger = init_logger(name=name, root_dir=save_dir, level=logging.INFO if verbose else logging.WARN,
mode='w')
tst_data = self.transform.file_to_dataset(input_path, batch_size=batch_size)
samples = self.num_samples_in(tst_data)
num_batches = math.ceil(samples / batch_size)
if warm_up:
for x, y in tst_data:
self.model.predict_on_batch(x)
break
if output:
assert save_dir, 'Must pass save_dir in order to output'
if isinstance(output, bool):
output = os.path.join(save_dir, name) + '.predict' + ext
elif isinstance(output, str):
output = output
else:
raise RuntimeError('output ({}) must be of type bool or str'.format(repr(output)))
timer = Timer()
eval_outputs = self.evaluate_dataset(tst_data, callbacks, output, num_batches, **kwargs)
loss, score, output = eval_outputs[0], eval_outputs[1], eval_outputs[2]
delta_time = timer.stop()
speed = samples / delta_time.delta_seconds
if logger:
f1: IOBES_F1_TF = None
for metric in self.model.metrics:
if isinstance(metric, IOBES_F1_TF):
f1 = metric
break
extra_report = ''
if f1:
overall, by_type, extra_report = f1.state.result(full=True, verbose=False)
extra_report = ' \n' + extra_report
logger.info('Evaluation results for {} - '
'loss: {:.4f} - {} - speed: {:.2f} sample/sec{}'
.format(name + ext, loss,
format_scores(score) if isinstance(score, dict) else format_metrics(self.model.metrics),
speed, extra_report))
if output:
logger.info('Saving output to {}'.format(output))
with open(output, 'w', encoding='utf-8') as out:
self.evaluate_output(tst_data, out, num_batches, self.model.metrics)
return loss, score, speed
def num_samples_in(self, dataset):
return size_of_dataset(dataset)
def evaluate_dataset(self, tst_data, callbacks, output, num_batches, **kwargs):
loss, score = self.model.evaluate(tst_data, callbacks=callbacks, steps=num_batches)
return loss, score, output
def evaluate_output(self, tst_data, out, num_batches, metrics: List[tf.keras.metrics.Metric]):
# out.write('x\ty_true\ty_pred\n')
for metric in metrics:
metric.reset_states()
for idx, batch in enumerate(tst_data):
outputs = self.model.predict_on_batch(batch[0])
for metric in metrics:
metric(batch[1], outputs, outputs._keras_mask if hasattr(outputs, '_keras_mask') else None)
self.evaluate_output_to_file(batch, outputs, out)
print('\r{}/{} {}'.format(idx + 1, num_batches, format_metrics(metrics)), end='')
print()
def evaluate_output_to_file(self, batch, outputs, out):
for x, y_gold, y_pred in zip(self.transform.X_to_inputs(batch[0]),
self.transform.Y_to_outputs(batch[1], gold=True),
self.transform.Y_to_outputs(outputs, gold=False)):
out.write(self.transform.input_truth_output_to_str(x, y_gold, y_pred))
def _capture_config(self, config: Dict,
exclude=(
'trn_data', 'dev_data', 'save_dir', 'kwargs', 'self', 'logger', 'verbose',
'dev_batch_size', '__class__')):
"""
Save arguments to config
Parameters
----------
config
`locals()`
exclude
"""
if 'kwargs' in config:
config.update(config['kwargs'])
config = dict(
(key, tf.keras.utils.serialize_keras_object(value)) if hasattr(value, 'get_config') else (key, value) for
key, value in config.items())
for key in exclude:
config.pop(key, None)
self.config.update(config)
def save_meta(self, save_dir, filename='meta.json', **kwargs):
self.meta['create_time']: now_datetime()
self.meta.update(kwargs)
save_json(self.meta, os.path.join(save_dir, filename))
def load_meta(self, save_dir, filename='meta.json'):
save_dir = get_resource(save_dir)
metapath = os.path.join(save_dir, filename)
if os.path.isfile(metapath):
self.meta.update(load_json(metapath))
def save_config(self, save_dir, filename='config.json'):
self.config.save_json(os.path.join(save_dir, filename))
def load_config(self, save_dir, filename='config.json'):
save_dir = get_resource(save_dir)
self.config.load_json(os.path.join(save_dir, filename))
def save_weights(self, save_dir, filename='model.h5'):
self.model.save_weights(os.path.join(save_dir, filename))
def load_weights(self, save_dir, filename='model.h5', **kwargs):
assert self.model.built or self.model.weights, 'You must call self.model.built() in build_model() ' \
'in order to load it'
save_dir = get_resource(save_dir)
self.model.load_weights(os.path.join(save_dir, filename))
def save_vocabs(self, save_dir, filename='vocabs.json'):
vocabs = SerializableDict()
for key, value in vars(self.transform).items():
if isinstance(value, VocabTF):
vocabs[key] = value.to_dict()
vocabs.save_json(os.path.join(save_dir, filename))
def load_vocabs(self, save_dir, filename='vocabs.json'):
save_dir = get_resource(save_dir)
vocabs = SerializableDict()
vocabs.load_json(os.path.join(save_dir, filename))
for key, value in vocabs.items():
vocab = VocabTF()
vocab.copy_from(value)
setattr(self.transform, key, vocab)
def load_transform(self, save_dir) -> Transform:
"""
Try to load transform only. This method might fail due to the fact it avoids building the model.
If it do fail, then you have to use `load` which might be too heavy but that's the best we can do.
:param save_dir: The path to load.
"""
save_dir = get_resource(save_dir)
self.load_config(save_dir)
self.load_vocabs(save_dir)
self.transform.build_config()
self.transform.lock_vocabs()
return self.transform
def save(self, save_dir: str, **kwargs):
self.save_config(save_dir)
self.save_vocabs(save_dir)
self.save_weights(save_dir)
def load(self, save_dir: str, logger=hanlp.utils.log_util.logger, **kwargs):
self.meta['load_path'] = save_dir
save_dir = get_resource(save_dir)
self.load_config(save_dir)
self.load_vocabs(save_dir)
self.build(**merge_dict(self.config, training=False, logger=logger, **kwargs, overwrite=True, inplace=True))
self.load_weights(save_dir, **kwargs)
self.load_meta(save_dir)
@property
def input_shape(self) -> List:
return self.transform.output_shapes[0]
def build(self, logger, **kwargs):
self.transform.build_config()
self.model = self.build_model(**merge_dict(self.config, training=kwargs.get('training', None),
loss=kwargs.get('loss', None)))
self.transform.lock_vocabs()
optimizer = self.build_optimizer(**self.config)
loss = self.build_loss(
**self.config if 'loss' in self.config else dict(list(self.config.items()) + [('loss', None)]))
# allow for different
metrics = self.build_metrics(**merge_dict(self.config, metrics=kwargs.get('metrics', 'accuracy'),
logger=logger, overwrite=True))
if not isinstance(metrics, list):
if isinstance(metrics, tf.keras.metrics.Metric):
metrics = [metrics]
if not self.model.built:
sample_inputs = self.sample_data
if sample_inputs is not None:
self.model(sample_inputs)
else:
if len(self.transform.output_shapes[0]) == 1 and self.transform.output_shapes[0][0] is None:
x_shape = self.transform.output_shapes[0]
else:
x_shape = list(self.transform.output_shapes[0])
for i, shape in enumerate(x_shape):
x_shape[i] = [None] + shape # batch + X.shape
self.model.build(input_shape=x_shape)
self.compile_model(optimizer, loss, metrics)
return self.model, optimizer, loss, metrics
def compile_model(self, optimizer, loss, metrics):
self.model.compile(optimizer=optimizer, loss=loss, metrics=metrics, run_eagerly=self.config.run_eagerly)
def build_optimizer(self, optimizer, **kwargs):
if isinstance(optimizer, (str, dict)):
custom_objects = {'AdamWeightDecay': AdamWeightDecay}
optimizer: tf.keras.optimizers.Optimizer = tf.keras.utils.deserialize_keras_object(optimizer,
module_objects=vars(
tf.keras.optimizers),
custom_objects=custom_objects)
self.config.optimizer = tf.keras.utils.serialize_keras_object(optimizer)
return optimizer
def build_loss(self, loss, **kwargs):
if not loss:
loss = tf.keras.losses.SparseCategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
from_logits=True)
elif isinstance(loss, (str, dict)):
loss = tf.keras.utils.deserialize_keras_object(loss, module_objects=vars(tf.keras.losses))
if isinstance(loss, tf.keras.losses.Loss):
self.config.loss = tf.keras.utils.serialize_keras_object(loss)
return loss
def build_transform(self, **kwargs):
return self.transform
def build_vocab(self, trn_data, logger):
train_examples = self.transform.fit(trn_data, **self.config)
self.transform.summarize_vocabs(logger)
return train_examples
def build_metrics(self, metrics, logger: logging.Logger, **kwargs):
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
return [metric]
@abstractmethod
def build_model(self, **kwargs) -> tf.keras.Model:
pass
def fit(self, trn_data, dev_data, save_dir, batch_size, epochs, run_eagerly=False, logger=None, verbose=True,
finetune: str = None, **kwargs):
self._capture_config(locals())
self.transform = self.build_transform(**self.config)
if not save_dir:
save_dir = tempdir_human()
if not logger:
logger = init_logger(name='train', root_dir=save_dir, level=logging.INFO if verbose else logging.WARN)
logger.info('Hyperparameter:\n' + self.config.to_json())
num_examples = self.build_vocab(trn_data, logger)
# assert num_examples, 'You forgot to return the number of training examples in your build_vocab'
logger.info('Building...')
train_steps_per_epoch = math.ceil(num_examples / batch_size) if num_examples else None
self.config.train_steps = train_steps_per_epoch * epochs if num_examples else None
model, optimizer, loss, metrics = self.build(**merge_dict(self.config, logger=logger, training=True))
logger.info('Model built:\n' + summary_of_model(self.model))
if finetune:
finetune = get_resource(finetune)
if os.path.isdir(finetune):
finetune = os.path.join(finetune, 'model.h5')
model.load_weights(finetune, by_name=True, skip_mismatch=True)
logger.info(f'Loaded pretrained weights from {finetune} for finetuning')
self.save_config(save_dir)
self.save_vocabs(save_dir)
self.save_meta(save_dir)
trn_data = self.build_train_dataset(trn_data, batch_size, num_examples)
dev_data = self.build_valid_dataset(dev_data, batch_size)
callbacks = self.build_callbacks(save_dir, **merge_dict(self.config, overwrite=True, logger=logger))
# need to know #batches, otherwise progbar crashes
dev_steps = math.ceil(self.num_samples_in(dev_data) / batch_size)
checkpoint = get_callback_by_class(callbacks, tf.keras.callbacks.ModelCheckpoint)
timer = Timer()
try:
history = self.train_loop(**merge_dict(self.config, trn_data=trn_data, dev_data=dev_data, epochs=epochs,
num_examples=num_examples,
train_steps_per_epoch=train_steps_per_epoch, dev_steps=dev_steps,
callbacks=callbacks, logger=logger, model=model, optimizer=optimizer,
loss=loss,
metrics=metrics, overwrite=True))
except KeyboardInterrupt:
print()
if not checkpoint or checkpoint.best in (np.Inf, -np.Inf):
self.save_weights(save_dir)
logger.info('Aborted with model saved')
else:
logger.info(f'Aborted with model saved with best {checkpoint.monitor} = {checkpoint.best:.4f}')
# noinspection PyTypeChecker
history: tf.keras.callbacks.History() = get_callback_by_class(callbacks, tf.keras.callbacks.History)
delta_time = timer.stop()
best_epoch_ago = 0
if history and hasattr(history, 'epoch'):
trained_epoch = len(history.epoch)
logger.info('Trained {} epochs in {}, each epoch takes {}'.
format(trained_epoch, delta_time, delta_time / trained_epoch if trained_epoch else delta_time))
save_json(history.history, io_util.path_join(save_dir, 'history.json'), cls=NumpyEncoder)
monitor_history: List = history.history.get(checkpoint.monitor, None)
if monitor_history:
best_epoch_ago = len(monitor_history) - monitor_history.index(checkpoint.best)
if checkpoint and monitor_history and checkpoint.best != monitor_history[-1]:
logger.info(f'Restored the best model saved with best '
f'{checkpoint.monitor} = {checkpoint.best:.4f} '
f'saved {best_epoch_ago} epochs ago')
self.load_weights(save_dir) # restore best model
return history
def train_loop(self, trn_data, dev_data, epochs, num_examples, train_steps_per_epoch, dev_steps, model, optimizer,
loss, metrics, callbacks,
logger, **kwargs):
history = self.model.fit(trn_data, epochs=epochs, steps_per_epoch=train_steps_per_epoch,
validation_data=dev_data,
callbacks=callbacks,
validation_steps=dev_steps,
) # type:tf.keras.callbacks.History
return history
def build_valid_dataset(self, dev_data, batch_size):
dev_data = self.transform.file_to_dataset(dev_data, batch_size=batch_size, shuffle=False)
return dev_data
def build_train_dataset(self, trn_data, batch_size, num_examples):
trn_data = self.transform.file_to_dataset(trn_data, batch_size=batch_size,
shuffle=True,
repeat=-1 if self.config.train_steps else None)
return trn_data
def build_callbacks(self, save_dir, logger, **kwargs):
metrics = kwargs.get('metrics', 'accuracy')
if isinstance(metrics, (list, tuple)):
metrics = metrics[-1]
monitor = f'val_{metrics}'
checkpoint = tf.keras.callbacks.ModelCheckpoint(
os.path.join(save_dir, 'model.h5'),
# verbose=1,
monitor=monitor, save_best_only=True,
mode='max',
save_weights_only=True)
logger.debug(f'Monitor {checkpoint.monitor} for checkpoint')
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=io_util.makedirs(io_util.path_join(save_dir, 'logs')))
csv_logger = FineCSVLogger(os.path.join(save_dir, 'train.log'), separator=' | ', append=True)
callbacks = [checkpoint, tensorboard_callback, csv_logger]
lr_decay_per_epoch = self.config.get('lr_decay_per_epoch', None)
if lr_decay_per_epoch:
learning_rate = self.model.optimizer.get_config().get('learning_rate', None)
if not learning_rate:
logger.warning('Learning rate decay not supported for optimizer={}'.format(repr(self.model.optimizer)))
else:
logger.debug(f'Created LearningRateScheduler with lr_decay_per_epoch={lr_decay_per_epoch}')
callbacks.append(tf.keras.callbacks.LearningRateScheduler(
lambda epoch: learning_rate / (1 + lr_decay_per_epoch * epoch)))
anneal_factor = self.config.get('anneal_factor', None)
if anneal_factor:
callbacks.append(tf.keras.callbacks.ReduceLROnPlateau(factor=anneal_factor,
patience=self.config.get('anneal_patience', 10)))
early_stopping_patience = self.config.get('early_stopping_patience', None)
if early_stopping_patience:
callbacks.append(tf.keras.callbacks.EarlyStopping(monitor=monitor, mode='max',
verbose=1,
patience=early_stopping_patience))
return callbacks
def on_train_begin(self):
"""
Callback before the training starts
"""
pass
def predict(self, data: Any, batch_size=None, **kwargs):
assert self.model, 'Please call fit or load before predict'
if not data:
return []
data, flat = self.transform.input_to_inputs(data)
if not batch_size:
batch_size = self.config.batch_size
dataset = self.transform.inputs_to_dataset(data, batch_size=batch_size, gold=kwargs.get('gold', False))
results = []
num_samples = 0
data_is_list = isinstance(data, list)
for idx, batch in enumerate(dataset):
samples_in_batch = tf.shape(batch[-1] if isinstance(batch[-1], tf.Tensor) else batch[-1][0])[0]
if data_is_list:
inputs = data[num_samples:num_samples + samples_in_batch]
else:
inputs = None # if data is a generator, it's usually one-time, not able to transform into a list
for output in self.predict_batch(batch, inputs=inputs, **kwargs):
results.append(output)
num_samples += samples_in_batch
self.transform.cleanup()
if flat:
return results[0]
return results
def predict_batch(self, batch, inputs=None, **kwargs):
X = batch[0]
Y = self.model.predict_on_batch(X)
for output in self.transform.Y_to_outputs(Y, X=X, inputs=inputs, batch=batch, **kwargs):
yield output
@property
def sample_data(self):
return None
@staticmethod
def from_meta(meta: dict, **kwargs):
"""
Parameters
----------
meta
kwargs
Returns
-------
KerasComponent
"""
cls = str_to_type(meta['class_path'])
obj: KerasComponent = cls()
assert 'load_path' in meta, f'{meta} doesn\'t contain load_path field'
obj.load(meta['load_path'])
return obj
def export_model_for_serving(self, export_dir=None, version=1, overwrite=False, show_hint=False):
assert self.model, 'You have to fit or load a model before exporting it'
if not export_dir:
assert 'load_path' in self.meta, 'When not specifying save_dir, load_path has to present'
export_dir = get_resource(self.meta['load_path'])
model_path = os.path.join(export_dir, str(version))
if os.path.isdir(model_path) and not overwrite:
logger.info(f'{model_path} exists, skip since overwrite = {overwrite}')
return export_dir
logger.info(f'Exporting to {export_dir} ...')
tf.saved_model.save(self.model, model_path)
logger.info(f'Successfully exported model to {export_dir}')
if show_hint:
logger.info(f'You can serve it through \n'
f'tensorflow_model_server --model_name={os.path.splitext(os.path.basename(self.meta["load_path"]))[0]} '
f'--model_base_path={export_dir} --rest_api_port=8888')
return export_dir
def serve(self, export_dir=None, grpc_port=8500, rest_api_port=0, overwrite=False, dry_run=False):
export_dir = self.export_model_for_serving(export_dir, show_hint=False, overwrite=overwrite)
if not dry_run:
del self.model # free memory
logger.info('The inputs of exported model is shown below.')
os.system(f'saved_model_cli show --all --dir {export_dir}/1')
cmd = f'nohup tensorflow_model_server --model_name={os.path.splitext(os.path.basename(self.meta["load_path"]))[0]} ' \
f'--model_base_path={export_dir} --port={grpc_port} --rest_api_port={rest_api_port} ' \
f'>serve.log 2>&1 &'
logger.info(f'Running ...\n{cmd}')
if not dry_run:
os.system(cmd)
|
|
import sys
import unittest
from dynd import nd, ndt
import numpy as np
class TestCopyFromPy(unittest.TestCase):
def test_bool(self):
a = nd.empty('var * bool')
a[...] = [True, False, 1, 0, 'true', 'false', 'on', 'off']
self.assertEqual(nd.as_py(a), [True, False] * 4)
@unittest.skip('Test disabled since callables were reworked')
class TestCopyFromNumPy(unittest.TestCase):
def test_simple_strided(self):
a = nd.empty('3 * int32')
a[...] = np.int64(1)
self.assertEqual(nd.as_py(a), [1] * 3)
a[...] = np.array(2.0)
self.assertEqual(nd.as_py(a), [2] * 3)
a[...] = np.array([3], dtype=np.int8)
self.assertEqual(nd.as_py(a), [3] * 3)
a[...] = np.array([1, 2, 3])
self.assertEqual(nd.as_py(a), [1, 2, 3])
def test_simple_var(self):
a = nd.empty('var * int32')
a[...] = np.int64(1)
self.assertEqual(nd.as_py(a), [1])
a = nd.empty('var * int32')
a[...] = np.array(2.0)
self.assertEqual(nd.as_py(a), [2])
a = nd.empty('var * int32')
a[...] = np.array([3], dtype=np.int8)
self.assertEqual(nd.as_py(a), [3])
a = nd.empty('var * int32')
a[...] = np.array([1, 2, 3])
self.assertEqual(nd.as_py(a), [1, 2, 3])
a[...] = np.array([4])
self.assertEqual(nd.as_py(a), [4] * 3)
def test_object_arr(self):
a = nd.empty('3 * int')
a[...] = np.array([1, 2, 3.0], dtype=object)
self.assertEqual(nd.as_py(a), [1, 2, 3])
a = nd.empty('3 * string')
a[...] = np.array(['testing', 'one', u'two'], dtype=object)
self.assertEqual(nd.as_py(a), ['testing', 'one', 'two'])
a = nd.empty('3 * string')
a[...] = np.array(['broadcast_string'], dtype=object)
self.assertEqual(nd.as_py(a), ['broadcast_string'] * 3)
a = nd.empty('3 * string')
a[...] = np.array('testing', dtype=object)
self.assertEqual(nd.as_py(a), ['testing'] * 3)
def test_object_in_struct_arr(self):
a = nd.empty('3 * {x: int, y: string}')
a[...] = np.array([(1, 'test'), (2, u'one'), (3.0, 'two')],
dtype=[('x', np.int64), ('y', object)])
self.assertEqual(nd.as_py(a),
[{'x': 1, 'y': 'test'}, {'x': 2, 'y': 'one'}, {'x': 3, 'y': 'two'}])
a = nd.empty('3 * {x: int, y: string}')
a[...] = np.array([('opposite', 4)],
dtype=[('y', object), ('x', np.int64)])
self.assertEqual(nd.as_py(a),
[{'x': 4, 'y': 'opposite'}] * 3)
a = nd.empty('var * {x: int, y: string}')
a[...] = np.array([(1, 'test'), (2, u'one'), (3.0, 'two')],
dtype=[('x', object), ('y', object)])
self.assertEqual(nd.as_py(a),
[{'x': 1, 'y': 'test'}, {'x': 2, 'y': 'one'}, {'x': 3, 'y': 'two'}])
@unittest.skip('Test disabled since callables were reworked')
class TestStructCopy(unittest.TestCase):
def test_single_struct(self):
a = nd.empty('{x:int32, y:string, z:bool}')
a[...] = [3, 'test', False]
self.assertEqual(nd.as_py(a.x), 3)
self.assertEqual(nd.as_py(a.y), 'test')
self.assertEqual(nd.as_py(a.z), False)
a = nd.empty('{x:int32, y:string, z:bool}')
a[...] = {'x':10, 'y':'testing', 'z':True}
self.assertEqual(nd.as_py(a.x), 10)
self.assertEqual(nd.as_py(a.y), 'testing')
self.assertEqual(nd.as_py(a.z), True)
def test_nested_struct(self):
a = nd.empty('{x: 2 * int16, y: {a: string, b: float64}, z: 1 * complex[float32]}')
a[...] = [[1,2], ['test', 3.5], [3j]]
self.assertEqual(nd.as_py(a.x), [1, 2])
self.assertEqual(nd.as_py(a.y.a), 'test')
self.assertEqual(nd.as_py(a.y.b), 3.5)
self.assertEqual(nd.as_py(a.z), [3j])
a = nd.empty('{x: 2 * int16, y: {a: string, b: float64}, z: 1 * complex[float32]}')
a[...] = {'x':[1,2], 'y':{'a':'test', 'b':3.5}, 'z':[3j]}
self.assertEqual(nd.as_py(a.x), [1, 2])
self.assertEqual(nd.as_py(a.y.a), 'test')
self.assertEqual(nd.as_py(a.y.b), 3.5)
self.assertEqual(nd.as_py(a.z), [3j])
def test_single_struct_array(self):
a = nd.empty('3 * {x:int32, y:int32}')
a[...] = [(0,0), (3,5), (12,10)]
self.assertEqual(nd.as_py(a.x), [0, 3, 12])
self.assertEqual(nd.as_py(a.y), [0, 5, 10])
a[...] = [{'x':1,'y':2}, {'x':4,'y':7}, {'x':14,'y':190}]
self.assertEqual(nd.as_py(a.x), [1, 4, 14])
self.assertEqual(nd.as_py(a.y), [2, 7, 190])
a = nd.empty('2 * var * {count:int32, size:fixed_string[1,"A"]}')
a[...] = [[(3, 'X')], [(10, 'L'), (12, 'M')]]
self.assertEqual(nd.as_py(a.count), [[3], [10, 12]])
self.assertEqual(nd.as_py(a.size), [['X'], ['L', 'M']])
a[...] = [[{'count':6, 'size':'M'}],
[{'count':3, 'size':'F'}, {'count':16, 'size':'D'}]]
self.assertEqual(nd.as_py(a.count), [[6], [3, 16]])
self.assertEqual(nd.as_py(a.size), [['M'], ['F', 'D']])
a[...] = {'count':1, 'size':'Z'}
self.assertEqual(nd.as_py(a.count), [[1], [1, 1]])
self.assertEqual(nd.as_py(a.size), [['Z'], ['Z', 'Z']])
a[...] = [[(10, 'A')], [(5, 'B')]]
self.assertEqual(nd.as_py(a.count), [[10], [5, 5]])
self.assertEqual(nd.as_py(a.size), [['A'], ['B', 'B']])
def test_nested_struct_array(self):
a = nd.empty('3 * {x:{a:int16, b:int16}, y:int32}')
a[...] = [((0,1),0), ((2,2),5), ((100,10),10)]
self.assertEqual(nd.as_py(a.x.a), [0, 2, 100])
self.assertEqual(nd.as_py(a.x.b), [1, 2, 10])
self.assertEqual(nd.as_py(a.y), [0, 5, 10])
a[...] = [{'x':{'a':1,'b':2},'y':5},
{'x':{'a':3,'b':6},'y':7},
{'x':{'a':1001,'b':110},'y':110}]
self.assertEqual(nd.as_py(a.x.a), [1, 3, 1001])
self.assertEqual(nd.as_py(a.x.b), [2, 6, 110])
self.assertEqual(nd.as_py(a.y), [5, 7, 110])
a = nd.empty('2 * var * {count:int32, size:{name:fixed_string[1,"A"], id: int8}}')
a[...] = [[(3, ('X', 10))], [(10, ('L', 7)), (12, ('M', 5))]]
self.assertEqual(nd.as_py(a.count), [[3], [10, 12]])
self.assertEqual(nd.as_py(a.size.name), [['X'], ['L', 'M']])
self.assertEqual(nd.as_py(a.size.id), [[10], [7, 5]])
def test_missing_field(self):
a = nd.empty('{x:int32, y:int32, z:int32}')
def assign(x, y):
x[...] = y
self.assertRaises(nd.BroadcastError, assign, a, [0, 1])
self.assertRaises(nd.BroadcastError, assign, a, {'x':0, 'z':1})
def test_extra_field(self):
a = nd.empty('{x:int32, y:int32, z:int32}')
def assign(x, y):
x[...] = y
self.assertRaises(nd.BroadcastError, assign, a, [0, 1, 2, 3])
self.assertRaises(nd.BroadcastError, assign, a, {'x':0,'y':1,'z':2,'w':3})
@unittest.skip('Test disabled since callables were reworked')
class TestIteratorAssign(unittest.TestCase):
def test_simple_var_dim(self):
# Assign to a var dim from a generator
a = nd.empty('var * int32')
a[...] = (x + 2 for x in range(10))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [x + 2 for x in range(10)])
# If we assign from a generator with one element, it broadcasts
a[...] = (x + 3 for x in range(5,6))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [8]*10)
def assign(x, y):
x[...] = y
# If we assign from a generator with too few elements, it errors
self.assertRaises(nd.BroadcastError, assign, a,
(x + 2 for x in range(9)))
# If we assign from a generator with too many elements, it errors
self.assertRaises(nd.BroadcastError, assign, a,
(x + 2 for x in range(11)))
def test_simple_strided_dim(self):
# Assign to a strided dim from a generator
a = nd.empty(10, ndt.int32)
a[...] = (x + 2 for x in range(10))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [x + 2 for x in range(10)])
# If we assign from a generator with one element, it broadcasts
a[...] = (x + 3 for x in range(5,6))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [8]*10)
def assign(x, y):
x[...] = y
# If we assign from a generator with too few elements, it errors
self.assertRaises(nd.BroadcastError, assign, a,
(x + 2 for x in range(9)))
# If we assign from a generator with too many elements, it errors
self.assertRaises(nd.BroadcastError, assign, a,
(x + 2 for x in range(11)))
def test_simple_fixed_dim(self):
# Assign to a strided dim from a generator
a = nd.empty(10, ndt.int32)
a[...] = (x + 2 for x in range(10))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [x + 2 for x in range(10)])
# If we assign from a generator with one element, it broadcasts
a[...] = (x + 3 for x in range(5,6))
self.assertEqual(len(a), 10)
self.assertEqual(nd.as_py(a), [8]*10)
def assign(x, y):
x[...] = y
# If we assign from a generator with too few elements, it errors
self.assertRaises(nd.BroadcastError, assign, a,
(x + 2 for x in range(9)))
# If we assign from a generator with too many elements, it errors
self.assertRaises(nd.BroadcastError, assign, a,
(x + 2 for x in range(11)))
@unittest.skip('Test disabled since callables were reworked')
class TestStringCopy(unittest.TestCase):
def test_string_assign_to_slice(self):
a = nd.array(['a', 'b', 'c', 'd', 'e'], type = ndt.make_fixed_dim(5, 'fixed_string[8]'))
a[:3] = 'test'
self.assertEqual(nd.as_py(a), ['test', 'test', 'test', 'd', 'e'])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
'''
List resource API
@author: Youyk
'''
import apibinding.api_actions as api_actions
import account_operations
import deploy_operations
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstacklib.utils.lock as lock
import zstacklib.utils.xmlobject as xmlobject
import time
import os
import sys
import traceback
import threading
#Define default get resource method. default is using searchAPI, it can also be ListAPI.
SEARCH_RESOURCE_METHOD = 'search'
LIST_RESOURCE_METHOD = 'list'
GET_RESOURCE_METHOD_BY_GET = 'get'
#GET_RESOURCE_METHOD = SEARCH_RESOURCE_METHOD
GET_RESOURCE_METHOD = LIST_RESOURCE_METHOD
BACKUP_STORAGE = 'BackupStorage'
SFTP_BACKUP_STORAGE = 'SftpBackupStorage'
CEPH_BACKUP_STORAGE = 'CephBackupStorage'
ZONE = 'Zone'
CLUSTER = 'Cluster'
PRIMARY_STORAGE = 'PrimaryStorage'
CEPH_PRIMARY_STORAGE = 'CephPrimaryStorage'
CEPH_PRIMARY_STORAGE_POOL = 'CephPrimaryStoragePool'
L2_NETWORK = 'L2Network'
L2_VLAN_NETWORK = 'L2VlanNetwork'
L2_VXLAN_NETWORK = 'L2VxlanNetwork'
L2_VXLAN_NETWORK_POOL = 'L2VxlanNetworkPool'
VNI_RANGE = 'VniRange'
L3_NETWORK = 'L3Network'
INSTANCE_OFFERING = 'InstanceOffering'
IMAGE = 'Image'
VOLUME = 'Volume'
SHARE_VOLUME = 'ShareVolume'
VM_INSTANCE = 'VmInstance'
IP_RANGE = 'IpRange'
HOST = 'Host'
NETWORK_SERVICE_PROVIDER = 'NetworkServiceProvider'
NETWORK_SERVICE_PROVIDER_L3_REF = 'NetworkServiceProviderL3Ref'
APPLIANCE_VM = 'ApplianceVm'
VIRTUALROUTER_VM = 'VirtualRouterVm'
DISK_OFFERING = 'DiskOffering'
ACCOUNT = 'Account'
USER = 'User'
PRIMARY_STORAGE = 'PrimaryStorage'
SECURITY_GROUP = 'SecurityGroup'
SECURITY_GROUP_RULE = 'SecurityGroupRule'
VM_SECURITY_GROUP = 'VmSecurityGroup'
VM_NIC = 'VmNic'
PORT_FORWARDING = 'PortForwarding'
MANAGEMENT_NODE = 'ManagementNode'
EIP = 'Eip'
VIP = 'Vip'
IP_CAPACITY = 'IpCapacity'
VR_OFFERING = 'VirtualRouterOffering'
SYSTEM_TAG = 'SystemTag'
USER_TAG = 'UserTag'
VOLUME_SNAPSHOT_TREE = 'VolumeSnapshotTree'
VOLUME_SNAPSHOT = 'VolumeSnapshot'
LOAD_BALANCER = 'LoadBalancer'
LOAD_BALANCER_LISTENER = 'LoadBalancerListener'
LOCAL_STORAGE_RESOURCE_REF = 'LocalStorageResourceRef'
IMAGE_STORE_BACKUP_STORAGE = 'ImageStoreBackupStorage'
SCHEDULER = 'Scheduler'
SCHEDULERJOB = 'SchedulerJob'
SCHEDULERTRIGGER = 'SchedulerTrigger'
VCENTER = 'VCenter'
VCENTER_CLUSTER = 'VCenterCluster'
VCENTER_BACKUP_STORAGE = 'VCenterBackupStorage'
VCENTER_PRIMARY_STORAGE = 'VCenterPrimaryStorage'
VCENTER_DVSWITCHES = 'VCenterDVSwitches'
MONITOR_TRIGGER = 'MonitorTrigger'
MONITOR_TRIGGER_ACTION = 'MonitorTriggerAction'
PXE_SERVER = 'PxeServer'
CHASSIS = 'Chassis'
HWINFO = 'HardwareInfo'
BAREMETAL_INS = 'BaremetalInstance'
LONGJOB = 'LongJob'
ALARM = 'Alarm'
EVENT_SUBSCRIPTION = 'EventSubscription'
SNS_APPLICATION_ENDPOINT = 'SNSApplicationEndpoint'
SNS_APPLICATION_PLATFORM ='SNSApplicationPlatform'
SNS_TOPIC = 'SNSTopic'
SNS_TOPIC_SUBSCRIBER = 'SNSTopicSubscriber'
SNS_DING_TALK_ENDPOINT = 'SNSDingTalkEndpoint'
SNS_EMAIL_ENDPOINT = 'SNSEmailEndpoint'
SNS_EMAIL_PLATFORM = 'SNSEmailPlatform'
SNS_HTTP_ENDPOINT = 'SNSHttpEndpoint'
SNS_TEXT_TEMPLATE = 'SNSTextTemplate'
AFFINITY_GROUP = "AffinityGroup"
IAM2_ORGANIZATION = 'IAM2Organization'
IAM2_PROJECT = 'IAM2Project'
IAM2_VIRTUAL_ID_GROUP = 'IAM2VirtualIDGroup'
IAM2_VIRTUAL_ID = 'IAM2VirtualID'
IAM2_PROJECT_TEMPLATE = 'IAM2ProjectTemplate'
IAM2_VIRTUAL_ID_GROUP_ATTRIBUTE = 'IAM2VirtualIDGroupAttribute'
IAM2_VIRTUAL_ID_ATTRIBUTE = 'IAM2VirtualIDAttribute'
IAM2_PROJECT_ATTRIBUTE = 'IAM2ProjectAttribute'
IAM2_ORGANIZATION_ATTRIBUTE = 'IAM2OrganizationAttribute'
ROLE='Role'
DATACENTER = 'DataCenter'
NAS_FILESYSTEM = 'NasFileSystem'
NAS_MOUNTTARGET = 'NasMountTarget'
ALIYUNNAS_ACCESSGROUP = 'AliyunNasAccessGroup'
STACK_TEMPLATE = "StackTemplate"
RESOURCE_STACK = "ResourceStack"
EVENT_FROM_STACK = "EventFromStack"
TICKET = 'Ticket'
TICKET_HISTORY = 'TicketHistory'
QUOTA = 'Quota'
CERTIFICATE = 'certificate'
VOLUME_BACKUP = 'VolumeBackup'
IPSEC_CONNECTION = 'IPsecConnection'
SCSI_LUN = 'ScsiLun'
ISCSI_SERVER = 'iScsiServer'
def find_item_by_uuid(inventories, uuid):
for item in inventories:
if item.uuid == uuid:
#test_util.test_logger("Item found by UUID: %s" % uuid)
return [item]
#test_util.test_logger("Not found item with UUID: %s" % uuid)
return None
def find_item_by_name(inventories, name):
for item in inventories:
if item.name == name:
#test_util.test_logger("Item found by name: %s" % name)
return [item]
#test_util.test_logger("Not found item with name: %s" % name)
return None
#Using List API
def list_resource(resource, session_uuid=None, uuid=None, name=None):
'''
Return: list by list API.
'''
if resource == BACKUP_STORAGE:
action = api_actions.ListBackupStorageAction()
elif resource == ZONE:
action = api_actions.ListZonesAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.ListPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.ListL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.ListL2VlanNetworkAction()
elif resource == CLUSTER:
action = api_actions.ListClusterAction()
elif resource == L3_NETWORK:
action = api_actions.ListL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.ListInstanceOfferingAction()
elif resource == IMAGE:
action = api_actions.ListImageAction()
elif resource == VOLUME:
action = api_actions.ListVolumeAction()
elif resource == VM_INSTANCE:
action = api_actions.ListVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.ListIpRangeAction()
elif resource == HOST:
action = api_actions.ListHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.ListNetworkServiceProviderAction()
elif resource == APPLIANCE_VM:
action = api_actions.ListApplianceVmAction()
elif resource == DISK_OFFERING:
action = api_actions.ListDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.ListAccountAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.ListPrimaryStorageAction()
elif resource == SECURITY_GROUP:
action = api_actions.ListSecurityGroupAction()
elif resource == VM_SECURITY_GROUP:
action = api_actions.ListVmNicInSecurityGroupAction()
elif resource == VM_NIC:
action = api_actions.ListVmNicAction()
elif resource == PORT_FORWARDING:
action = api_actions.ListPortForwardingRuleAction()
elif resource == MANAGEMENT_NODE:
action = api_actions.ListManagementNodeAction()
ret = account_operations.execute_action_with_session(action, session_uuid)
if uuid:
return find_item_by_uuid(ret, uuid)
if name:
return find_item_by_name(ret, name)
return ret
#Using Search API
def search_resource(resource, session_uuid, uuid=None, name=None):
'''
Return: list by search
This API was depricated.
'''
if resource == BACKUP_STORAGE:
action = api_actions.SearchBackupStorageAction()
elif resource == ZONE:
action = api_actions.SearchZoneAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.SearchPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.SearchL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.SearchL2VlanNetworkAction()
elif resource == CLUSTER:
action = api_actions.SearchClusterAction()
elif resource == L3_NETWORK:
action = api_actions.SearchL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.SearchInstanceOfferingAction()
elif resource == IMAGE:
action = api_actions.SearchImageAction()
elif resource == VOLUME:
action = api_actions.SearchVolumeAction()
elif resource == VM_INSTANCE:
action = api_actions.SearchVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.SearchIpRangeAction()
elif resource == HOST:
action = api_actions.SearchHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.SearchNetworkServiceProviderAction()
elif resource == APPLIANCE_VM:
action = api_actions.SearchApplianceVmAction()
elif resource == DISK_OFFERING:
action = api_actions.SearchDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.SearchAccountAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.SearchPrimaryStorageAction()
#elif resource == SECURITY_GROUP:
# action = api_actions.SearchSecurityGroupAction()
#elif resource == VM_SECURITY_GROUP:
# action = api_actions.SearchVmNicInSecurityGroupAction()
action.sessionUuid = session_uuid
action.nameOpValueTriples = []
if uuid:
t = inventory.NOVTriple()
t.name = 'uuid'
t.op = inventory.AND_EQ
t.val = uuid
action.nameOpValueTriples.append(t)
if name:
t = inventory.NOVTriple()
t.name = 'name'
t.op = inventory.AND_EQ
t.val = name
action.nameOpValueTriples.append(t)
# the time delay is because of elastic search iventory will delay 0.5s after original data was created in database.
time.sleep(0.3)
ret = action.run()
return ret
def get_resource_by_get(resource, session_uuid, uuid):
'''
Return a list by get API.
'''
if resource == BACKUP_STORAGE:
action = api_actions.GetBackupStorageAction()
elif resource == ZONE:
action = api_actions.GetZoneAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.GetPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.GetL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.GetL2VlanNetworkAction()
elif resource == CLUSTER:
action = api_actions.GetClusterAction()
elif resource == L3_NETWORK:
action = api_actions.GetL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.GetInstanceOfferingAction()
elif resource == IMAGE:
action = api_actions.GetImageAction()
elif resource == VOLUME:
action = api_actions.GetVolumeAction()
elif resource == VM_INSTANCE:
action = api_actions.GetVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.GetIpRangeAction()
elif resource == HOST:
action = api_actions.GetHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.GetNetworkServiceProviderAction()
elif resource == APPLIANCE_VM:
action = api_actions.GetApplianceVmAction()
elif resource == DISK_OFFERING:
action = api_actions.GetDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.GetAccountAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.GetPrimaryStorageAction()
elif resource == VR_OFFERING:
action = api_actions.GetVirtualRouterOfferingAction()
elif resource == VCENTER_DVSWITCHES:
action = api_actions.GetVCenterDVSwitchesAction()
#elif resource == SECURITY_GROUP:
# action = api_actions.GetSecurityGroupAction()
#elif resource == VM_SECURITY_GROUP:
# action = api_actions.GetVmNicInSecurityGroupAction()
action.uuid = uuid
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def gen_query_conditions(name, op, value, conditions=[]):
new_conditions = [{'name': name, 'op': op, 'value': value}]
new_conditions.extend(conditions)
return new_conditions
reimage_thread_queue = 0
@lock.lock('image_thread')
def increase_image_thread():
global reimage_thread_queue
reimage_thread_queue += 1
@lock.lock('image_thread')
def decrease_image_thread():
global reimage_thread_queue
reimage_thread_queue -= 1
def wait_for_image_thread_queue():
while reimage_thread_queue >= IMAGE_THREAD_LIMIT:
time.sleep(1)
print 'reimage_thread_queue: %d' % reimage_thread_queue
def cleanup_exc_info():
exc_info = []
def check_thread_exception():
if exc_info:
info1 = exc_info[0][1]
info2 = exc_info[0][2]
cleanup_exc_info()
raise info1, None, info2
def wait_for_thread_done(report = False):
while threading.active_count() > 1:
check_thread_exception()
time.sleep(1)
if report:
print 'thread count: %d' % threading.active_count()
check_thread_exception()
exc_info = []
IMAGE_THREAD_LIMIT=2
def _lazyload_image(condition=None):
def _load_image(action):
increase_image_thread()
try:
#evt = action.run()
evt = account_operations.execute_action_with_session(action, None)
except:
exc_info.append(sys.exc_info())
finally:
decrease_image_thread()
iaction = api_actions.QueryImageAction()
iaction.conditions = condition
ret = account_operations.execute_action_with_session(iaction, None)
if len(ret) != 0:
print "no need lazy"
return
test_config_path = os.environ.get('WOODPECKER_TEST_CONFIG_FILE')
test_config_obj = test_util.TestConfig(test_config_path)
#Special config in test-config.xml, such like test ping target.
test_config = test_config_obj.get_test_config()
#All configs in deploy.xml.
all_config = test_config_obj.get_deploy_config()
#Detailed zstack deployment information, including zones/cluster/hosts...
deploy_config = all_config.deployerConfig
for i in xmlobject.safe_list(deploy_config.images.image):
image_action = api_actions.QueryImageAction()
condition = gen_query_conditions('name', '=', i.name_)
image_action.conditions = condition
ret = account_operations.execute_action_with_session(image_action, None)
if len(ret) != 0:
print "image has beed added"
continue
session_uuid = None
if i.hasattr('label_') and i.label_ == 'lazyload':
for bsref in xmlobject.safe_list(i.backupStorageRef):
bss = get_resource(BACKUP_STORAGE, None, name=bsref.text_)
bs = deploy_operations.get_first_item_from_list(bss, 'backup storage', bsref.text_, 'image')
action = api_actions.AddImageAction()
action.sessionUuid = session_uuid
#TODO: account uuid will be removed later.
action.accountUuid = inventory.INITIAL_SYSTEM_ADMIN_UUID
action.backupStorageUuids = [bs.uuid]
action.bits = i.bits__
if not action.bits:
action.bits = 64
action.description = i.description__
action.format = i.format_
action.mediaType = i.mediaType_
action.guestOsType = i.guestOsType__
if not action.guestOsType:
action.guestOsType = 'unknown'
action.platform = i.platform__
if not action.platform:
action.platform = 'Linux'
action.hypervisorType = i.hypervisorType__
action.name = i.name_
action.url = i.url_
action.timeout = 1800000
if i.hasattr('system_'):
action.system = i.system_
if i.hasattr('systemTags_'):
action.systemTags = i.systemTags_.split(',')
thread = threading.Thread(target = _load_image, args = (action, ))
wait_for_image_thread_queue()
print 'before add image2: %s' % i.url_
thread.start()
print 'add image: %s' % i.url_
print 'all images add command are executed'
wait_for_thread_done(True)
print 'all images have been added'
def _gen_query_action(resource, condition=None):
if resource == BACKUP_STORAGE:
action = api_actions.QueryBackupStorageAction()
elif resource == SFTP_BACKUP_STORAGE:
action = api_actions.QuerySftpBackupStorageAction()
elif resource == CEPH_BACKUP_STORAGE:
action = api_actions.QueryCephBackupStorageAction()
elif resource == ZONE:
action = api_actions.QueryZoneAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.QueryPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.QueryL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.QueryL2VlanNetworkAction()
elif resource == L2_VXLAN_NETWORK:
action = api_actions.QueryL2VxlanNetworkAction()
elif resource == L2_VXLAN_NETWORK_POOL:
action = api_actions.QueryL2VxlanNetworkPoolAction()
elif resource == VNI_RANGE:
action = api_actions.QueryVniRangeAction()
elif resource == CLUSTER:
action = api_actions.QueryClusterAction()
elif resource == L3_NETWORK:
action = api_actions.QueryL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.QueryInstanceOfferingAction()
elif resource == IMAGE:
_lazyload_image(condition)
action = api_actions.QueryImageAction()
elif resource == VOLUME:
action = api_actions.QueryVolumeAction()
elif resource == SHARE_VOLUME:
action = api_actions.QueryShareableVolumeVmInstanceRefAction()
elif resource == VM_INSTANCE:
action = api_actions.QueryVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.QueryIpRangeAction()
elif resource == HOST:
action = api_actions.QueryHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.QueryNetworkServiceProviderAction()
elif resource == NETWORK_SERVICE_PROVIDER_L3_REF:
action = api_actions.QueryNetworkServiceL3NetworkRefAction()
elif resource == APPLIANCE_VM:
action = api_actions.QueryApplianceVmAction()
elif resource == VIRTUALROUTER_VM:
action = api_actions.QueryVirtualRouterVmAction()
elif resource == DISK_OFFERING:
action = api_actions.QueryDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.QueryAccountAction()
elif resource == CEPH_PRIMARY_STORAGE:
action = api_actions.QueryCephPrimaryStorageAction()
elif resource == CEPH_PRIMARY_STORAGE_POOL:
action = api_actions.QueryCephPrimaryStoragePoolAction()
elif resource == SECURITY_GROUP:
action = api_actions.QuerySecurityGroupAction()
elif resource == SECURITY_GROUP_RULE:
action = api_actions.QuerySecurityGroupRuleAction()
elif resource == VM_SECURITY_GROUP:
action = api_actions.QueryVmNicInSecurityGroupAction()
elif resource == VM_NIC:
action = api_actions.QueryVmNicAction()
elif resource == PORT_FORWARDING:
action = api_actions.QueryPortForwardingRuleAction()
elif resource == MANAGEMENT_NODE:
action = api_actions.QueryManagementNodeAction()
elif resource == EIP:
action = api_actions.QueryEipAction()
elif resource == VIP:
action = api_actions.QueryVipAction()
elif resource == VR_OFFERING:
action = api_actions.QueryVirtualRouterOfferingAction()
elif resource == SYSTEM_TAG:
action = api_actions.QuerySystemTagAction()
elif resource == USER_TAG:
action = api_actions.QueryUserTagAction()
elif resource == VOLUME_SNAPSHOT_TREE:
action = api_actions.QueryVolumeSnapshotTreeAction()
elif resource == VOLUME_SNAPSHOT:
action = api_actions.QueryVolumeSnapshotAction()
elif resource == USER:
action = api_actions.QueryUserAction()
elif resource == LOAD_BALANCER:
action = api_actions.QueryLoadBalancerAction()
elif resource == LOAD_BALANCER_LISTENER:
action = api_actions.QueryLoadBalancerListenerAction()
elif resource == LOCAL_STORAGE_RESOURCE_REF:
action = api_actions.QueryLocalStorageResourceRefAction()
elif resource == IMAGE_STORE_BACKUP_STORAGE:
action = api_actions.QueryImageStoreBackupStorageAction()
elif resource == SCHEDULER:
action = api_actions.QuerySchedulerAction()
elif resource == SCHEDULERJOB:
action = api_actions.QuerySchedulerJobAction()
elif resource == SCHEDULERTRIGGER:
action = api_actions.QuerySchedulerTriggerAction()
elif resource == VCENTER:
action = api_actions.QueryVCenterAction()
elif resource == VCENTER_CLUSTER:
action = api_actions.QueryVCenterClusterAction()
elif resource == VCENTER_BACKUP_STORAGE:
action = api_actions.QueryVCenterBackupStorageAction()
elif resource == VCENTER_PRIMARY_STORAGE:
action = api_actions.QueryVCenterPrimaryStorageAction()
elif resource == MONITOR_TRIGGER:
action = api_actions.QueryMonitorTriggerAction()
elif resource == MONITOR_TRIGGER_ACTION:
action = api_actions.QueryMonitorTriggerActionAction()
elif resource == PXE_SERVER:
action = api_actions.QueryBaremetalPxeServerAction()
elif resource == CHASSIS:
action = api_actions.QueryBaremetalChassisAction()
elif resource == HWINFO:
action = api_actions.QueryBaremetalHardwareInfoAction()
elif resource == BAREMETAL_INS:
action = api_actions.QueryBaremetalInstanceAction()
elif resource == LONGJOB:
action = api_actions.QueryLongJobAction()
elif resource == ALARM:
action = api_actions.QueryAlarmAction()
elif resource == EVENT_SUBSCRIPTION:
action = api_actions.QueryEventSubscriptionAction()
elif resource == SNS_APPLICATION_ENDPOINT:
action = api_actions.QuerySNSApplicationEndpointAction()
elif resource == SNS_APPLICATION_PLATFORM:
action = api_actions.QuerySNSApplicationPlatformAction()
elif resource == SNS_TOPIC:
action = api_actions.QuerySNSTopicAction()
elif resource == SNS_TOPIC_SUBSCRIBER:
action = api_actions.QuerySNSTopicSubscriberAction()
elif resource == SNS_DING_TALK_ENDPOINT:
action = api_actions.QuerySNSDingTalkEndpointAction()
elif resource == SNS_EMAIL_ENDPOINT:
action = api_actions.QuerySNSEmailEndpointAction()
elif resource == SNS_EMAIL_PLATFORM:
action = api_actions.QuerySNSEmailPlatformAction()
elif resource == SNS_HTTP_ENDPOINT:
action = api_actions.QuerySNSHttpEndpointAction()
elif resource == SNS_TEXT_TEMPLATE:
action = api_actions.QuerySNSTextTemplateAction()
elif resource == AFFINITY_GROUP:
action = api_actions.QueryAffinityGroupAction()
elif resource == IAM2_ORGANIZATION:
action = api_actions.QueryIAM2OrganizationAction()
elif resource == IAM2_PROJECT:
action = api_actions.QueryIAM2ProjectAction()
elif resource == IAM2_VIRTUAL_ID_GROUP:
action = api_actions.QueryIAM2VirtualIDGroupAction()
elif resource == IAM2_VIRTUAL_ID:
action = api_actions.QueryIAM2VirtualIDAction()
elif resource == IAM2_PROJECT_TEMPLATE:
action = api_actions.QueryIAM2ProjectTemplateAction()
elif resource == IAM2_VIRTUAL_ID_GROUP_ATTRIBUTE:
action = api_actions.QueryIAM2VirtualIDGroupAttributeAction()
elif resource == IAM2_VIRTUAL_ID_ATTRIBUTE:
action = api_actions.QueryIAM2VirtualIDAttributeAction()
elif resource == IAM2_PROJECT_ATTRIBUTE:
action = api_actions.QueryIAM2ProjectAttributeAction()
elif resource == IAM2_ORGANIZATION_ATTRIBUTE:
action = api_actions.QueryIAM2OrganizationAttributeAction()
elif resource == ROLE:
action = api_actions.QueryRoleAction()
elif resource == DATACENTER:
action = api_actions.QueryDataCenterFromLocalAction()
elif resource == ALIYUNNAS_ACCESSGROUP:
action = api_actions.QueryAliyunNasAccessGroupAction()
elif resource == NAS_FILESYSTEM:
action = api_actions.QueryNasFileSystemAction()
elif resource == NAS_MOUNTTARGET:
action = api_actions.QueryNasMountTargetAction()
elif resource == STACK_TEMPLATE:
action = api_actions.QueryStackTemplateAction()
elif resource == RESOURCE_STACK:
action = api_actions.QueryResourceStackAction()
elif resource == EVENT_FROM_STACK:
action = api_actions.QueryEventFromResourceStackAction()
elif resource == TICKET:
action = api_actions.QueryTicketAction()
elif resource == TICKET_HISTORY:
action = api_actions.QueryTicketHistoryAction()
elif resource == QUOTA:
action = api_actions.QueryQuotaAction()
elif resource == CERTIFICATE:
action = api_actions.QueryCertificateAction()
elif resource == VOLUME_BACKUP:
action = api_actions.QueryVolumeBackupAction()
elif resource == IPSEC_CONNECTION:
action = api_actions.QueryIPSecConnectionAction()
elif resource == SCSI_LUN:
action = api_actions.QueryScsiLunAction()
elif resource == ISCSI_SERVER:
action = api_actions.QueryIscsiServerAction()
return action
def query_event_from_resource_stack(conditions = [], resource=EVENT_FROM_STACK, session_uuid=None, count='false'):
'''
Call Query API and return all matched resource.
conditions could be generated by gen_query_conditions()
If session_uuid is missing, we will create one for you and only live in
this API.
'''
action = _gen_query_action(resource, conditions)
action.conditions = conditions
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def query_resource(resource, conditions = [], session_uuid=None, count='false'):
'''
Call Query API and return all matched resource.
conditions could be generated by gen_query_conditions()
If session_uuid is missing, we will create one for you and only live in
this API.
'''
action = _gen_query_action(resource, conditions)
action.conditions = conditions
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def query_resource_count(resource, conditions = [], session_uuid=None):
'''
Call Query API to return the matched resource count
When count=true, it will only return the number of matched resource
'''
action = _gen_query_action(resource, conditions)
action.conditions = conditions
action.count='true'
account_operations.execute_action_with_session(action, session_uuid)
return action.reply.total
def query_resource_with_num(resource, conditions = [], session_uuid=None, \
start=0, limit=1000):
'''
Query matched resource and return required numbers.
'''
action = _gen_query_action(resource, conditions)
action.conditions = conditions
action.start = start
action.limit = limit
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def query_resource_fields(resource, conditions = [], session_uuid=None, \
fields=[], start=0, limit=1000):
'''
Query matched resource by returning required fields and required numbers.
'''
action = _gen_query_action(resource, conditions)
action.conditions = conditions
action.start = start
action.limit = limit
action.fields = fields
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def get_resource(resource, session_uuid=None, uuid=None, name=None):
if uuid:
cond = gen_query_conditions('uuid', '=', uuid)
elif name:
cond = gen_query_conditions('name', '=', name)
else:
cond = gen_query_conditions('uuid', '!=', 'NULL')
return query_resource(resource, cond, session_uuid)
#if GET_RESOURCE_METHOD == LIST_RESOURCE_METHOD:
# return list_resource(resource, session_uuid, uuid=uuid, name=name)
#elif GET_RESOURCE_METHOD == GET_RESOURCE_METHOD_BY_GET:
# if not uuid:
# raise Exception('Get_Resource function error, uuid can not be None')
# return get_resource_by_get(resource, session_uuid, uuid=uuid)
#else:
# return search_resource(resource, session_uuid, uuid=uuid, name=name)
def safely_get_resource(res_name, cond = [], session_uuid = None, \
fields = None, limit = 100):
'''
If there are a lot of resource (e.g. >1k), query all of them in 1 command
is very dangours. It might crash ZStack, when the data is huge.
'''
res_count = query_resource_count(res_name, cond, session_uuid)
res_list = []
if res_count <= limit:
res_list = query_resource_fields(res_name, cond, session_uuid, fields)
else:
curr_count = 0
while curr_count <= res_count:
curr_list = query_resource_with_num(res_name, cond, \
session_uuid, fields, start=current_count, limit = limit)
res_list.extend(curr_list)
curr_count += limit
return res_list
def change_recource_owner(accountUuid, resourceUuid, session_uuid = None):
action = api_actions.ChangeResourceOwnerAction()
action.accountUuid = accountUuid
action.resourceUuid = resourceUuid
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def get_resource_owner(resourceUuid, session_uuid = None):
action = api_actions.GetResourceAccountAction()
action.resourceUuids = resourceUuid
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret.inventories[resourceUuid[0]].uuid
def get_task_progress(apiId, session_uuid = None):
action = api_actions.GetTaskProgressAction()
action.apiId = apiId
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def get_progress(apiId, session_uuid = None):
action = api_actions.GetTaskProgressAction()
action.apiId = apiId
evt = account_operations.execute_action_with_session(action, session_uuid)
inventories = []
for ei in evt.inventories:
if ei.type == 'Progress':
inventories.append(ei)
return inventories
def enable_change_vm_password(is_enable, resourceUuid, resourceType, session_uuid = None):
action = api_actions.EnableChangeVmPasswordAction()
action.enable = is_enable
action.resourceUuid = resourceUuid
action.resourceType = resourceType
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import contextlib
import math
import sys
import numpy
import unittest
import os
import numpy as np
def resnet_cifar10(input, depth=32):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
def shortcut(input, ch_in, ch_out, stride):
if ch_in != ch_out:
return conv_bn_layer(input, ch_out, 1, stride, 0, None)
else:
return input
def basicblock(input, ch_in, ch_out, stride):
tmp = conv_bn_layer(input, ch_out, 3, stride, 1)
tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True)
short = shortcut(input, ch_in, ch_out, stride)
return fluid.layers.elementwise_add(x=tmp, y=short, act='relu')
def layer_warp(block_func, input, ch_in, ch_out, count, stride):
tmp = block_func(input, ch_in, ch_out, stride)
for i in range(1, count):
tmp = block_func(tmp, ch_out, ch_out, 1)
return tmp
assert (depth - 2) % 6 == 0
n = (depth - 2) // 6
conv1 = conv_bn_layer(
input=input, ch_out=16, filter_size=3, stride=1, padding=1)
res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)
res2 = layer_warp(basicblock, res1, 16, 32, n, 2)
res3 = layer_warp(basicblock, res2, 32, 64, n, 2)
pool = fluid.layers.pool2d(
input=res3, pool_size=8, pool_type='avg', pool_stride=1)
return pool
def vgg16_bn_drop(input):
def conv_block(input, num_filter, groups, dropouts):
return fluid.nets.img_conv_group(
input=input,
pool_size=2,
pool_stride=2,
conv_num_filter=[num_filter] * groups,
conv_filter_size=3,
conv_act='relu',
conv_with_batchnorm=True,
conv_batchnorm_drop_rate=dropouts,
pool_type='max')
conv1 = conv_block(input, 64, 2, [0.3, 0])
conv2 = conv_block(conv1, 128, 2, [0.4, 0])
conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])
conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])
conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])
drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5)
fc1 = fluid.layers.fc(input=drop, size=4096, act=None)
bn = fluid.layers.batch_norm(input=fc1, act='relu')
drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5)
fc2 = fluid.layers.fc(input=drop2, size=4096, act=None)
return fc2
def train(net_type, use_cuda, save_dirname, is_local):
classdim = 10
data_shape = [3, 32, 32]
images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
if net_type == "vgg":
print("train vgg net")
net = vgg16_bn_drop(images)
elif net_type == "resnet":
print("train resnet")
net = resnet_cifar10(images, 32)
else:
raise ValueError("%s network is not supported" % net_type)
predict = fluid.layers.fc(input=net, size=classdim, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(cost)
acc = fluid.layers.accuracy(input=predict, label=label)
# Test program
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_cost)
BATCH_SIZE = 128
PASS_NUM = 1
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=128 * 10),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(place=place, feed_list=[images, label])
def train_loop(main_program):
exe.run(fluid.default_startup_program())
loss = 0.0
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
exe.run(main_program, feed=feeder.feed(data))
if (batch_id % 10) == 0:
acc_list = []
avg_loss_list = []
for tid, test_data in enumerate(test_reader()):
loss_t, acc_t = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[avg_cost, acc])
if math.isnan(float(loss_t)):
sys.exit("got NaN loss, training failed.")
acc_list.append(float(acc_t))
avg_loss_list.append(float(loss_t))
break # Use 1 segment for speeding up CI
acc_value = numpy.array(acc_list).mean()
avg_loss_value = numpy.array(avg_loss_list).mean()
print(
'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'.
format(pass_id, batch_id + 1,
float(avg_loss_value), float(acc_value)))
if acc_value > 0.01: # Low threshold for speeding up CI
fluid.io.save_inference_model(save_dirname, ["pixel"],
[predict], exe)
return
if is_local:
train_loop(fluid.default_main_program())
else:
port = os.getenv("PADDLE_PSERVER_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("PADDLE_TRAINERS"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
pserver_prog = t.get_pserver_program(current_endpoint)
pserver_startup = t.get_startup_program(current_endpoint,
pserver_prog)
exe.run(pserver_startup)
exe.run(pserver_prog)
elif training_role == "TRAINER":
train_loop(t.get_trainer_program())
def infer(use_cuda, save_dirname=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(save_dirname, exe)
# The input's dimension of conv should be 4-D or 5-D.
# Use normilized image pixels as input data, which should be in the range [0, 1.0].
batch_size = 1
tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32")
# Use inference_transpiler to speedup
inference_transpiler_program = inference_program.clone()
t = fluid.transpiler.InferenceTranspiler()
t.transpile(inference_transpiler_program, place)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
transpiler_results = exe.run(inference_transpiler_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
assert len(results[0]) == len(transpiler_results[0])
for i in range(len(results[0])):
np.testing.assert_almost_equal(
results[0][i], transpiler_results[0][i], decimal=4)
print("infer results: ", results[0])
fluid.io.save_inference_model(save_dirname, feed_target_names,
fetch_targets, exe,
inference_transpiler_program)
def main(net_type, use_cuda, is_local=True):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# Directory for saving the trained model
save_dirname = "image_classification_" + net_type + ".inference.model"
train(net_type, use_cuda, save_dirname, is_local)
infer(use_cuda, save_dirname)
class TestImageClassification(unittest.TestCase):
def test_vgg_cuda(self):
with self.scope_prog_guard():
main('vgg', use_cuda=True)
def test_resnet_cuda(self):
with self.scope_prog_guard():
main('resnet', use_cuda=True)
def test_vgg_cpu(self):
with self.scope_prog_guard():
main('vgg', use_cuda=False)
def test_resnet_cpu(self):
with self.scope_prog_guard():
main('resnet', use_cuda=False)
@contextlib.contextmanager
def scope_prog_guard(self):
prog = fluid.Program()
startup_prog = fluid.Program()
scope = fluid.core.Scope()
with fluid.scope_guard(scope):
with fluid.program_guard(prog, startup_prog):
yield
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2014, Scalyr, Inc.
#
# Note, this can be run in standalone mode by:
# python -m scalyr_agent.run_monitor
# scalyr_agent.builtin_monitors.apache_monitor
from __future__ import absolute_import
from __future__ import unicode_literals
import six
import six.moves.http_client
import six.moves.urllib.request
import six.moves.urllib.error
import six.moves.urllib.parse
import socket
from scalyr_agent import (
ScalyrMonitor,
define_config_option,
define_log_field,
define_metric,
)
httpSourceAddress = "127.0.0.1"
__monitor__ = __name__
define_config_option(
__monitor__,
"module",
"Always ``scalyr_agent.builtin_monitors.apache_monitor``",
convert_to=six.text_type,
required_option=True,
)
define_config_option(
__monitor__,
"id",
"Optional. Included in each log message generated by this monitor, as a field named ``instance``. "
"Allows you to distinguish between values recorded by different monitors. This is especially "
"useful if you are running multiple Apache instances on a single server; you can monitor each "
"instance with a separate apache_monitor record in the Scalyr Agent configuration.",
convert_to=six.text_type,
)
define_config_option(
__monitor__,
"status_url",
"Optional. Specifies the URL -- in particular, the port number -- at which the Apache status "
"module is served. The URL should end in ``/?auto`` to indicate the machine-readable version of "
"the page should be returned.",
default="http://localhost/server-status/?auto",
)
define_config_option(
__monitor__,
"source_address",
"Optional (defaults to '%s'). The IP address to be used as the source address when fetching "
"the status URL. Many servers require this to be 127.0.0.1 because they only server the status "
"page to requests from localhost." % httpSourceAddress,
default=httpSourceAddress,
)
define_log_field(__monitor__, "monitor", "Always ``apache_monitor``.")
define_log_field(
__monitor__,
"metric",
"The metric name. See the metric tables for more information.",
)
define_log_field(__monitor__, "value", "The value of the metric.")
define_log_field(
__monitor__, "instance", "The ``id`` value from the monitor configuration."
)
define_metric(
__monitor__,
"apache.connections.active",
"The number of connections that are being handled "
"asynchronously (not using workers) currently open on the "
"server",
)
define_metric(
__monitor__,
"apache.connections.writing",
"The number of connections that are being handled "
"asynchronously (not using workers) that are currently "
"writing response data.",
)
define_metric(
__monitor__,
"apache.connections.idle",
"The number of connections that are being handled "
"asynchronously (not using workers) that are currently "
"idle / sending keepalives.",
)
define_metric(
__monitor__,
"apache.connections.closing",
"The number of connections that are being handled "
"asynchronously (not using workers) that are currently "
"closing.",
)
define_metric(
__monitor__,
"apache.workers.active",
"How many workers are currently active. Each worker is a process "
"handling an incoming request.",
)
define_metric(
__monitor__,
"apache.workers.idle",
"How many of the workers are currently idle. Each worker is a "
"process that can handle an incoming request.",
)
# Taken from:
# http://stackoverflow.com/questions/1150332/source-interface-with-python-and-urllib2
#
# For connecting to local machine, specifying the source IP may be required. So, using
# this mechanism should allow that. Since getting status requires "opening up" a
# non-standard/user-facing web page, it is best to be cautious.
#
# Note - the use of a global is ugly, but this form is more compatible than with another
# method mentioned which would not require the global. (The cleaner version was added
# in Python 2.7.)
class BindableHTTPConnection(six.moves.http_client.HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.socket()
self.sock.bind((self.source_ip, 0))
if isinstance(self.timeout, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
def BindableHTTPConnectionFactory(source_ip):
def _get(host, port=None, strict=None, timeout=0):
# pylint: disable=unexpected-keyword-arg
if six.PY2:
kwargs = {"strict": strict}
else:
kwargs = {}
bhc = BindableHTTPConnection(host, port=port, timeout=timeout, **kwargs)
bhc.source_ip = source_ip
return bhc
return _get
class BindableHTTPHandler(six.moves.urllib.request.HTTPHandler):
def http_open(self, req):
return self.do_open(BindableHTTPConnectionFactory(httpSourceAddress), req)
class ApacheMonitor(ScalyrMonitor):
"""
# Apache Monitor
This agent monitor plugin records performance and usage data from an Apache server.
@class=bg-warning docInfoPanel: An *agent monitor plugin* is a component of the Scalyr Agent. To use a plugin,
simply add it to the ``monitors`` section of the Scalyr Agent configuration file (``/etc/scalyr/agent.json``).
For more information, see [Agent Plugins](/help/scalyr-agent#plugins).
## Configuring Apache
To use this monitor, you will need to configure your Apache server to enable the status module. For details,
see the [Apache documentation](http://httpd.apache.org/docs/2.2/mod/mod_status.html).
First, verify that the module is enabled in your Apache server. On most Linux installations, you can use the
following command:
ls /etc/apache2/mods-enabled
If you see ``status.conf`` and ``status.load`` present, the module is enabled. Otherwise, use the following
command (again, for most Linux installations):
sudo /usr/sbin/a2enmod status
On some platforms, you may need to use different commands to enable the status module. Also, if Apache was
compiled manually, the module may not be available. Consult the documentation for your particular platform.
Here are some links:
- [CentOS 5/RHEL 5](https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s1-apache-addmods.html)
- [Ubuntu 14.04](https://help.ubuntu.com/14.04/serverguide/httpd.html)
- [Windows](http://httpd.apache.org/docs/2.0/platform/windows.html#cust)
Next, you must enable the status module, usually by updating the ``VirtualHost`` configuration section of your
Apache server. On Linux, this is typically found in the ``/etc/apache2/sites-available`` directory, in the file
that corresponds to your site.
Add the following to the ``VirtualHost`` section (between ``<VirtualHost>`` and ``</VirtualHost>``):
<Location /server-status>
SetHandler server-status
Order deny,allow
Deny from all
Allow from 127.0.0.1
</Location>
This specifies that the status page should be served at ``http://<address>/server-status``, and can't be accessed
from other servers.
Once you make the configuration change, you will need to restart Apache. On most Linux systems, use the following
command:
sudo service apache2 restart
To verify that the status module is working properly, you can view it manually. Execute this command on the server
(substituting the appropriate port number as needed):
curl http://localhost:80/server-status
If you have any difficulty enabling the status module, drop us a line at [[email protected]](mailto:[email protected]).
## Sample Configuration
Here is a typical configuration fragment:
monitors: [
{
module: "scalyr_agent.builtin_monitors.apache_monitor",
status_url: "http://localhost:80/server-status/?auto"
}
]
If your Apache server is running on a nonstandard port, replace ``80`` with the appropriate port number. For additional
options, see Configuration Reference.
accessLog:
## Uploading the Apache access log
If you have not already done so, you should also configure the Scalyr Agent to upload the access log
generated by Apache. Scalyr's Apache dashboard uses this log to generate many statistics.
For most Linux systems, the access log is saved in ``/var/log/apache2/access.log``. To upload, edit the
``logs`` section of ``/etc/scalyr-agent-2/agent.json``. Add the following entry:
logs: [
...
*** {***
*** path: "/var/log/apache2/access.log",***
*** attributes: {parser: "accessLog", serverType: "apache"}***
*** }***
]
Edit the ``path`` field as appropriate for your system setup.
## Viewing Data
After adding this plugin to the agent configuration file, wait one minute for data to begin recording. Then
click the {{menuRef:Dashboards}} menu and select {{menuRef:Apache}}. (The dashboard may not be listed until
the agent begins sending Apache data.) You will see an overview of Apache data across all servers where you are
running the Apache plugin. Use the {{menuRef:ServerHost}} dropdown to show data for a specific server.
See [Analyze Access Logs](/solutions/analyze-access-logs) for more information about working with web access logs.
"""
def _initialize(self):
global httpSourceAddress
self.__url = self._config.get(
"status_url", default="http://localhost/server-status/?auto"
)
self.__sourceaddress = self._config.get(
"source_addresss", default=httpSourceAddress
)
httpSourceAddress = self.__sourceaddress
def _parse_data(self, data):
fields = {
b"Total Accesses:": "total_accesses",
b"Total kBytes:": "total_kbytes_sent",
b"Uptime:": "uptime",
b"ReqPerSec:": "request_per_sec",
b"BytesPerSec:": "bytes_per_sec",
b"BytesPerReq:": "bytes_per_req",
b"BusyWorkers:": "busy_workers",
b"IdleWorkers:": "idle_workers",
b"ConnsTotal:": "connections_total",
b"ConnsAsyncWriting:": "async_connections_writing",
b"ConnsAsyncKeepAlive:": "async_connections_keep_alive",
b"ConnsAsyncClosing:": "async_connections_closing",
}
result = {}
lines = data.splitlines()
i = 0
# skip any blank lines
while len(lines[i]) == 0:
i = i + 1
while i < len(lines):
for key in fields:
if lines[i].startswith(key):
values = lines[i].split()
result[fields[key]] = values[1]
i = i + 1
return result
def _get_status(self):
data = None
# verify that the URL is valid
try:
url = six.moves.urllib.parse.urlparse(self.__url)
except Exception:
self._logger.error(
"The URL configured for requesting the status page appears to be invalid. Please verify that the URL is correct in your monitor configuration. The specified url: %s"
% self.__url
)
return data
# attempt to request server status
try:
opener = six.moves.urllib.request.build_opener(BindableHTTPHandler)
handle = opener.open(self.__url)
data = handle.read()
if data is not None:
data = self._parse_data(data)
except six.moves.urllib.error.HTTPError as err:
message = (
"An HTTP error occurred attempting to retrieve the status. Please consult your server logs to determine the cause. HTTP error code: ",
err.code,
)
if err.code == 404:
message = "The URL used to request the status page appears to be incorrect. Please verify the correct URL and update your apache_monitor configuration."
elif err.code == 403:
message = "The server is denying access to the URL specified for requesting the status page. Please verify that permissions to access the status page are correctly configured in your server configuration and that your apache_monitor configuration reflects the same configuration requirements."
elif err.code >= 500 or err.code < 600:
message = (
"The server failed to fulfill the request to get the status page. Please consult your server logs to determine the cause. HTTP error code: ",
err.code,
)
self._logger.error(message)
data = None
except six.moves.urllib.error.URLError as err:
message = (
"The was an error attempting to reach the server. Make sure the server is running and properly configured. The error reported is: %s"
% (six.text_type(err))
)
if err.reason.errno == 111:
message = (
"The HTTP server does not appear to running or cannot be reached. Please check that it is running and is reachable at the address: %s"
% url.netloc
)
self._logger.error(message)
data = None
except Exception as e:
self._logger.error(
"An error occurred attempting to request the server status: %s" % e
)
data = None
return data
"""
# Currently disabled as it requires platform specific functionality. This will need
# be reactivated once a cross platform solution is implemented.
def _get_procinfo(self):
try:
data = subprocess.Popen("ps aux | grep apache | grep -v grep | grep -v scalyr | awk '{print $2, $3, $4}'", shell=True, stdout=subprocess.PIPE).stdout.read()
result = {}
lines = data.splitlines()
i = 0
while i < len(lines):
if len(lines[i]) != 0:
values = lines[i].split()
if len(values) == 3:
result[values[0]] = {
"cpu": values[1],
"mem": values[2]
}
i = i + 1
except Exception, e:
self._logger.error("Unable to check process status: %s" % e)
result = None
return result
"""
def gather_sample(self):
data = self._get_status()
if data is None:
self._logger.error("No data returned.")
else:
samplesToEmit = [
("busy_workers", "apache.workers.active"),
("idle_workers", "apache.workers.idle"),
("connections_total", "apache.connections.active"),
("async_connections_writing", "apache.connections.writing"),
("async_connections_keep_alive", "apache.connections.idle"),
("async_connections_closing", "apache.connections.closing"),
]
statsEmitted = 0
for key, metric_name in samplesToEmit:
if key in data:
self._logger.emit_value(metric_name, int(data[key]))
statsEmitted += 1
if statsEmitted == 0:
self._logger.error(
"Status page did not match expected format. Check to make sure you included "
'the "?auto" option in the status url'
)
|
|
"""The tests for the MQTT light platform.
Configuration for RGB Version with brightness:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for XY Version with brightness:
light:
platform: mqtt
name: "Office Light XY"
state_topic: "office/xy1/light/status"
command_topic: "office/xy1/light/switch"
brightness_state_topic: "office/xy1/brightness/status"
brightness_command_topic: "office/xy1/brightness/set"
xy_state_topic: "office/xy1/xy/status"
xy_command_topic: "office/xy1/xy/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB and brightness:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with brightness and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and color temp
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
color_temp_state_topic: "office/rgb1/color_temp/status"
color_temp_command_topic: "office/rgb1/color_temp/set"
qos: 0
payload_on: "on"
payload_off: "off"
config with brightness and effect
light:
platform: mqtt
name: "Office Light Color Temp"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
effect_state_topic: "office/rgb1/effect/status"
effect_command_topic: "office/rgb1/effect/set"
effect_list:
- rainbow
- colorloop
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with white value and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
white_value_state_topic: "office/rgb1/white_value/status"
white_value_command_topic: "office/rgb1/white_value/set"
white_value_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with RGB command template:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_command_template: "{{ '#%02x%02x%02x' | format(red, green, blue)}}"
qos: 0
payload_on: "on"
payload_off: "off"
Configuration for HS Version with brightness:
light:
platform: mqtt
name: "Office Light HS"
state_topic: "office/hs1/light/status"
command_topic: "office/hs1/light/switch"
brightness_state_topic: "office/hs1/brightness/status"
brightness_command_topic: "office/hs1/brightness/set"
hs_state_topic: "office/hs1/hs/status"
hs_command_topic: "office/hs1/hs/set"
qos: 0
payload_on: "on"
payload_off: "off"
"""
import pytest
from homeassistant.components import light, mqtt
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import ATTR_ASSUMED_STATE, STATE_OFF, STATE_ON
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.async_mock import call, patch
from tests.common import assert_setup_component, async_fire_mqtt_message
from tests.components.light import common
DEFAULT_CONFIG = {
light.DOMAIN: {"platform": "mqtt", "name": "test", "command_topic": "test-topic"}
}
async def test_fail_setup_if_no_command_topic(hass, mqtt_mock):
"""Test if command fails with command topic."""
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {"platform": "mqtt", "name": "test"}}
)
await hass.async_block_till_done()
assert hass.states.get("light.test") is None
async def test_no_color_brightness_color_temp_hs_white_xy_if_no_topics(hass, mqtt_mock):
"""Test if there is no color and brightness if no topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
async def test_controlling_state_via_topic(hass, mqtt_mock):
"""Test the controlling of the state via topic."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_state_topic": "test_light_rgb/rgb/status",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_state_topic": "test_light_rgb/effect/status",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_state_topic": "test_light_rgb/hs/status",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_state_topic": "test_light_rgb/white_value/status",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_state_topic": "test_light_rgb/xy/status",
"xy_command_topic": "test_light_rgb/xy/set",
"qos": "0",
"payload_on": 1,
"payload_off": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 150
assert state.attributes.get("effect") == "none"
assert state.attributes.get("hs_color") == (0, 0)
assert state.attributes.get("white_value") == 255
assert state.attributes.get("xy_color") == (0.323, 0.329)
async_fire_mqtt_message(hass, "test_light_rgb/status", "0")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "100")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "300")
light_state = hass.states.get("light.test")
assert light_state.attributes["color_temp"] == 300
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "rainbow")
light_state = hass.states.get("light.test")
assert light_state.attributes["effect"] == "rainbow"
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "100")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 100
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "125,125,125")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "200,50")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (200, 50)
async_fire_mqtt_message(hass, "test_light_rgb/xy/status", "0.675,0.322")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.672, 0.324)
async def test_invalid_state_via_topic(hass, mqtt_mock, caplog):
"""Test handling of empty data via topic."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_state_topic": "test_light_rgb/rgb/status",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_state_topic": "test_light_rgb/effect/status",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_state_topic": "test_light_rgb/hs/status",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_state_topic": "test_light_rgb/white_value/status",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_state_topic": "test_light_rgb/xy/status",
"xy_command_topic": "test_light_rgb/xy/set",
"qos": "0",
"payload_on": 1,
"payload_off": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("hs_color") is None
assert state.attributes.get("white_value") is None
assert state.attributes.get("xy_color") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb/status", "1")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 150
assert state.attributes.get("effect") == "none"
assert state.attributes.get("hs_color") == (0, 0)
assert state.attributes.get("white_value") == 255
assert state.attributes.get("xy_color") == (0.323, 0.329)
async_fire_mqtt_message(hass, "test_light_rgb/status", "")
assert "Ignoring empty state message" in caplog.text
light_state = hass.states.get("light.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", "")
assert "Ignoring empty brightness message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 255
async_fire_mqtt_message(hass, "test_light_rgb/color_temp/status", "")
assert "Ignoring empty color temp message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["color_temp"] == 150
async_fire_mqtt_message(hass, "test_light_rgb/effect/status", "")
assert "Ignoring empty effect message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["effect"] == "none"
async_fire_mqtt_message(hass, "test_light_rgb/white_value/status", "")
assert "Ignoring empty white value message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 255
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", "")
assert "Ignoring empty rgb message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (255, 255, 255)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "")
assert "Ignoring empty hs message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (0, 0)
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", "bad,bad")
assert "Failed to parse hs state update" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("hs_color") == (0, 0)
async_fire_mqtt_message(hass, "test_light_rgb/xy/status", "")
assert "Ignoring empty xy-color message" in caplog.text
light_state = hass.states.get("light.test")
assert light_state.attributes.get("xy_color") == (0.323, 0.329)
async def test_brightness_controlling_scale(hass, mqtt_mock):
"""Test the brightness controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale/status",
"command_topic": "test_scale/set",
"brightness_state_topic": "test_scale/brightness/status",
"brightness_command_topic": "test_scale/brightness/set",
"brightness_scale": "99",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale/status", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
async_fire_mqtt_message(hass, "test_scale/status", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_scale/status", "on")
async_fire_mqtt_message(hass, "test_scale/brightness/status", "99")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 255
async def test_brightness_from_rgb_controlling_scale(hass, mqtt_mock):
"""Test the brightness controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale_rgb/status",
"command_topic": "test_scale_rgb/set",
"rgb_state_topic": "test_scale_rgb/rgb/status",
"rgb_command_topic": "test_scale_rgb/rgb/set",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale_rgb/status", "on")
async_fire_mqtt_message(hass, "test_scale_rgb/rgb/status", "255,0,0")
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 255
async_fire_mqtt_message(hass, "test_scale_rgb/rgb/status", "127,0,0")
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 127
async def test_white_value_controlling_scale(hass, mqtt_mock):
"""Test the white_value controlling scale."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_scale/status",
"command_topic": "test_scale/set",
"white_value_state_topic": "test_scale/white_value/status",
"white_value_command_topic": "test_scale/white_value/set",
"white_value_scale": "99",
"qos": 0,
"payload_on": "on",
"payload_off": "off",
}
},
)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_scale/status", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 255
async_fire_mqtt_message(hass, "test_scale/status", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async_fire_mqtt_message(hass, "test_scale/status", "on")
async_fire_mqtt_message(hass, "test_scale/white_value/status", "99")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 255
async def test_controlling_state_via_topic_with_templates(hass, mqtt_mock):
"""Test the setting of the state with a template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "test_light_rgb/status",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"brightness_state_topic": "test_light_rgb/brightness/status",
"color_temp_state_topic": "test_light_rgb/color_temp/status",
"effect_state_topic": "test_light_rgb/effect/status",
"hs_state_topic": "test_light_rgb/hs/status",
"rgb_state_topic": "test_light_rgb/rgb/status",
"white_value_state_topic": "test_light_rgb/white_value/status",
"xy_state_topic": "test_light_rgb/xy/status",
"state_value_template": "{{ value_json.hello }}",
"brightness_value_template": "{{ value_json.hello }}",
"color_temp_value_template": "{{ value_json.hello }}",
"effect_value_template": "{{ value_json.hello }}",
"hs_value_template": '{{ value_json.hello | join(",") }}',
"rgb_value_template": '{{ value_json.hello | join(",") }}',
"white_value_template": "{{ value_json.hello }}",
"xy_value_template": '{{ value_json.hello | join(",") }}',
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
assert state.attributes.get("rgb_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/rgb/status", '{"hello": [1, 2, 3]}')
async_fire_mqtt_message(hass, "test_light_rgb/status", '{"hello": "ON"}')
async_fire_mqtt_message(hass, "test_light_rgb/brightness/status", '{"hello": "50"}')
async_fire_mqtt_message(
hass, "test_light_rgb/color_temp/status", '{"hello": "300"}'
)
async_fire_mqtt_message(
hass, "test_light_rgb/effect/status", '{"hello": "rainbow"}'
)
async_fire_mqtt_message(
hass, "test_light_rgb/white_value/status", '{"hello": "75"}'
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 50
assert state.attributes.get("rgb_color") == (84, 169, 255)
assert state.attributes.get("color_temp") == 300
assert state.attributes.get("effect") == "rainbow"
assert state.attributes.get("white_value") == 75
async_fire_mqtt_message(hass, "test_light_rgb/hs/status", '{"hello": [100,50]}')
state = hass.states.get("light.test")
assert state.attributes.get("hs_color") == (100, 50)
async_fire_mqtt_message(
hass, "test_light_rgb/xy/status", '{"hello": [0.123,0.123]}'
)
state = hass.states.get("light.test")
assert state.attributes.get("xy_color") == (0.14, 0.131)
async def test_sending_mqtt_commands_and_optimistic(hass, mqtt_mock):
"""Test the sending of command in optimistic mode."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_rgb/set",
"brightness_command_topic": "test_light_rgb/brightness/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"color_temp_command_topic": "test_light_rgb/color_temp/set",
"effect_command_topic": "test_light_rgb/effect/set",
"hs_command_topic": "test_light_rgb/hs/set",
"white_value_command_topic": "test_light_rgb/white_value/set",
"xy_command_topic": "test_light_rgb/xy/set",
"effect_list": ["colorloop", "random"],
"qos": 2,
"payload_on": "on",
"payload_off": "off",
}
}
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
"white_value": 50,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
):
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 95
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") == 100
assert state.attributes.get("white_value") == 50
assert state.attributes.get(ATTR_ASSUMED_STATE)
await common.async_turn_on(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "on", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_ON
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with(
"test_light_rgb/set", "off", 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
mqtt_mock.reset_mock()
await common.async_turn_on(
hass, "light.test", brightness=50, xy_color=[0.123, 0.123]
)
await common.async_turn_on(hass, "light.test", brightness=50, hs_color=[359, 78])
await common.async_turn_on(
hass, "light.test", rgb_color=[255, 128, 0], white_value=80
)
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/set", "on", 2, False),
call("test_light_rgb/rgb/set", "255,128,0", 2, False),
call("test_light_rgb/brightness/set", 50, 2, False),
call("test_light_rgb/hs/set", "359.0,78.0", 2, False),
call("test_light_rgb/white_value/set", 80, 2, False),
call("test_light_rgb/xy/set", "0.14,0.131", 2, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 0)
assert state.attributes["brightness"] == 50
assert state.attributes["hs_color"] == (30.118, 100)
assert state.attributes["white_value"] == 80
assert state.attributes["xy_color"] == (0.611, 0.375)
async def test_sending_mqtt_rgb_command_with_template(hass, mqtt_mock):
"""Test the sending of RGB command with template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_rgb/set",
"rgb_command_topic": "test_light_rgb/rgb/set",
"rgb_command_template": '{{ "#%02x%02x%02x" | '
"format(red, green, blue)}}",
"payload_on": "on",
"payload_off": "off",
"qos": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 64])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_rgb/set", "on", 0, False),
call("test_light_rgb/rgb/set", "#ff803f", 0, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["rgb_color"] == (255, 128, 63)
async def test_sending_mqtt_color_temp_command_with_template(hass, mqtt_mock):
"""Test the sending of Color Temp command with template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light_color_temp/set",
"color_temp_command_topic": "test_light_color_temp/color_temp/set",
"color_temp_command_template": "{{ (1000 / value) | round(0) }}",
"payload_on": "on",
"payload_off": "off",
"qos": 0,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", color_temp=100)
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light_color_temp/set", "on", 0, False),
call("test_light_color_temp/color_temp/set", "10", 0, False),
],
any_order=True,
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes["color_temp"] == 100
async def test_show_brightness_if_only_command_topic(hass, mqtt_mock):
"""Test the brightness if only a command topic is present."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"brightness_command_topic": "test_light_rgb/brightness/set",
"command_topic": "test_light_rgb/set",
"state_topic": "test_light_rgb/status",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("brightness") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
async def test_show_color_temp_only_if_command_topic(hass, mqtt_mock):
"""Test the color temp only if a command topic is present."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"color_temp_command_topic": "test_light_rgb/brightness/set",
"command_topic": "test_light_rgb/set",
"state_topic": "test_light_rgb/status",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("color_temp") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("color_temp") == 150
async def test_show_effect_only_if_command_topic(hass, mqtt_mock):
"""Test the effect only if a command topic is present."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"effect_command_topic": "test_light_rgb/effect/set",
"command_topic": "test_light_rgb/set",
"state_topic": "test_light_rgb/status",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("effect") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("effect") == "none"
async def test_show_hs_if_only_command_topic(hass, mqtt_mock):
"""Test the hs if only a command topic is present."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"hs_command_topic": "test_light_rgb/hs/set",
"command_topic": "test_light_rgb/set",
"state_topic": "test_light_rgb/status",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("hs_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("hs_color") == (0, 0)
async def test_show_white_value_if_only_command_topic(hass, mqtt_mock):
"""Test the white_value if only a command topic is present."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"white_value_command_topic": "test_light_rgb/white_value/set",
"command_topic": "test_light_rgb/set",
"state_topic": "test_light_rgb/status",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("white_value") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("white_value") == 255
async def test_show_xy_if_only_command_topic(hass, mqtt_mock):
"""Test the xy if only a command topic is present."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"xy_command_topic": "test_light_rgb/xy/set",
"command_topic": "test_light_rgb/set",
"state_topic": "test_light_rgb/status",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("xy_color") is None
async_fire_mqtt_message(hass, "test_light_rgb/status", "ON")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("xy_color") == (0.323, 0.329)
async def test_on_command_first(hass, mqtt_mock):
"""Test on command being sent before brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"on_command_type": "first",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=50)
# Should get the following MQTT messages.
# test_light/set: 'ON'
# test_light/bright: 50
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/set", "ON", 0, False),
call("test_light/bright", 50, 0, False),
],
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_on_command_last(hass, mqtt_mock):
"""Test on command being sent after brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=50)
# Should get the following MQTT messages.
# test_light/bright: 50
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/bright", 50, 0, False),
call("test_light/set", "ON", 0, False),
],
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_on_command_brightness(hass, mqtt_mock):
"""Test on command being sent as only brightness."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"rgb_command_topic": "test_light/rgb",
"on_command_type": "brightness",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn on w/ no brightness - should set to max
await common.async_turn_on(hass, "light.test")
# Should get the following MQTT messages.
# test_light/bright: 255
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", 255, 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ brightness
await common.async_turn_on(hass, "light.test", brightness=50)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", 50, 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
# Turn on w/ just a color to ensure brightness gets
# added and sent.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/bright", 50, 0, False),
],
any_order=True,
)
async def test_on_command_brightness_scaled(hass, mqtt_mock):
"""Test brightness scale."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"brightness_command_topic": "test_light/bright",
"brightness_scale": 100,
"rgb_command_topic": "test_light/rgb",
"on_command_type": "brightness",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# Turn on w/ no brightness - should set to max
await common.async_turn_on(hass, "light.test")
# Should get the following MQTT messages.
# test_light/bright: 100
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", 100, 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ brightness
await common.async_turn_on(hass, "light.test", brightness=50)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", 20, 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ max brightness
await common.async_turn_on(hass, "light.test", brightness=255)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", 100, 0, False)
mqtt_mock.async_publish.reset_mock()
# Turn on w/ min brightness
await common.async_turn_on(hass, "light.test", brightness=1)
mqtt_mock.async_publish.assert_called_once_with("test_light/bright", 1, 0, False)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
# Turn on w/ just a color to ensure brightness gets
# added and sent.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/bright", 1, 0, False),
],
any_order=True,
)
async def test_on_command_rgb(hass, mqtt_mock):
"""Test on command in RGB brightness mode."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"rgb_command_topic": "test_light/rgb",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=127)
# Should get the following MQTT messages.
# test_light/rgb: '127,127,127'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "127,127,127", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=255)
# Should get the following MQTT messages.
# test_light/rgb: '255,255,255'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,255,255", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=1)
# Should get the following MQTT messages.
# test_light/rgb: '1,1,1'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "1,1,1", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
# Ensure color gets scaled with brightness.
await common.async_turn_on(hass, "light.test", rgb_color=[255, 128, 0])
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "1,0,0", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_on(hass, "light.test", brightness=255)
# Should get the following MQTT messages.
# test_light/rgb: '255,128,0'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "255,128,0", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
async def test_on_command_rgb_template(hass, mqtt_mock):
"""Test on command in RGB brightness mode with RGB template."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"rgb_command_topic": "test_light/rgb",
"rgb_command_template": "{{ red }}/{{ green }}/{{ blue }}",
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", brightness=127)
# Should get the following MQTT messages.
# test_light/rgb: '127,127,127'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/rgb", "127/127/127", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_effect(hass, mqtt_mock):
"""Test effect."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_light/set",
"effect_command_topic": "test_light/effect/set",
"effect_list": ["rainbow", "colorloop"],
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.state == STATE_OFF
await common.async_turn_on(hass, "light.test", effect="rainbow")
# Should get the following MQTT messages.
# test_light/effect/set: 'rainbow'
# test_light/set: 'ON'
mqtt_mock.async_publish.assert_has_calls(
[
call("test_light/effect/set", "rainbow", 0, False),
call("test_light/set", "ON", 0, False),
],
any_order=True,
)
mqtt_mock.async_publish.reset_mock()
await common.async_turn_off(hass, "light.test")
mqtt_mock.async_publish.assert_called_once_with("test_light/set", "OFF", 0, False)
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, light.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one light per unique_id."""
config = {
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, light.DOMAIN, config)
async def test_discovery_removal_light(hass, mqtt_mock, caplog):
"""Test removal of discovered light."""
data = (
'{ "name": "test",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_removal(hass, mqtt_mock, caplog, light.DOMAIN, data)
async def test_discovery_deprecated(hass, mqtt_mock, caplog):
"""Test discovery of mqtt light with deprecated platform option."""
entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
await async_start(hass, "homeassistant", entry)
data = (
'{ "name": "Beer",' ' "platform": "mqtt",' ' "command_topic": "test_topic"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async def test_discovery_update_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_update(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_discovery_update_unchanged_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
data1 = (
'{ "name": "Beer",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
with patch(
"homeassistant.components.mqtt.light.schema_basic.MqttLight.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, light.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic" }'
)
await help_test_discovery_broken(
hass, mqtt_mock, caplog, light.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, light.DOMAIN, DEFAULT_CONFIG
)
async def test_max_mireds(hass, mqtt_mock):
"""Test setting min_mireds and max_mireds."""
config = {
light.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test_max_mireds/set",
"color_temp_command_topic": "test_max_mireds/color_temp/set",
"max_mireds": 370,
}
}
assert await async_setup_component(hass, light.DOMAIN, config)
await hass.async_block_till_done()
state = hass.states.get("light.test")
assert state.attributes.get("min_mireds") == 153
assert state.attributes.get("max_mireds") == 370
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compas
import compas.colors
import compas.geometry
from compas.data import Data
from compas.datastructures import Mesh
from compas.files.urdf import URDFElement
from compas.geometry import Frame
from compas.robots.model.base import ProxyObject
from compas.robots.model.base import _attr_from_data
from compas.robots.model.base import _attr_to_data
from compas.robots.model.base import _parse_floats
__all__ = [
'Geometry',
'MeshDescriptor',
'Color',
'Texture',
'Material',
'Origin',
'Cylinder',
'Box',
'Sphere',
'Capsule',
]
class BoxProxy(ProxyObject):
"""Proxy class that adds URDF functionality to an instance of :class:`~compas.geometry.Box`.
This class is internal and not intended to be referenced externally.
"""
def get_urdf_element(self):
attributes = {'size': '{} {} {}'.format(*self.size)}
return URDFElement('box', attributes)
@classmethod
def from_urdf(cls, attributes, elements=None, text=None):
size = _parse_floats(attributes['size'])
return cls(compas.geometry.Box(Frame.worldXY(), *size))
@property
def meshes(self):
return [Mesh.from_shape(self)]
@property
def size(self):
return [self.xsize, self.ysize, self.zsize]
class CylinderProxy(ProxyObject):
"""Proxy class that adds URDF functionality to an instance of :class:`~compas.geometry.Cylinder`.
This class is internal and not intended to be referenced externally.
"""
def get_urdf_element(self):
attributes = {'radius': self.radius, 'length': self.length}
return URDFElement('cylinder', attributes)
@classmethod
def from_urdf(cls, attributes, elements=None, text=None):
radius = float(attributes['radius'])
length = float(attributes['length'])
plane = compas.geometry.Plane([0, 0, 0], [0, 0, 1])
circle = compas.geometry.Circle(plane, radius)
return cls(compas.geometry.Cylinder(circle, length))
@property
def meshes(self):
return [Mesh.from_shape(self)]
@property
def length(self):
return self.height
class SphereProxy(ProxyObject):
"""Proxy class that adds URDF functionality to an instance of :class:`~compas.geometry.Sphere`.
This class is internal and not intended to be referenced externally.
"""
def get_urdf_element(self):
attributes = {'radius': self.radius}
return URDFElement('sphere', attributes)
@classmethod
def from_urdf(cls, attributes, elements=None, text=None):
radius = float(attributes['radius'])
return cls(compas.geometry.Sphere((0, 0, 0), radius))
@property
def meshes(self):
return [Mesh.from_shape(self)]
class CapsuleProxy(ProxyObject):
"""Proxy class that adds URDF functionality to an instance of :class:`~compas.geometry.Capsule`.
This class is internal and not intended to be referenced externally.
"""
def get_urdf_element(self):
attributes = {'radius': self.radius, 'length': self.length}
return URDFElement('capsule', attributes)
@classmethod
def from_urdf(cls, attributes, elements=None, text=None):
radius = float(attributes['radius'])
length = float(attributes['length'])
line = ((0, 0, length / -2), (0, 0, length / 2))
return cls(compas.geometry.Capsule(line, radius))
@property
def meshes(self):
return [Mesh.from_shape(self)]
class MeshDescriptor(Data):
"""Description of a mesh.
Parameters
----------
filename : str
The mesh' filename.
scale : str, optional
The scale factors of the mesh in the x-, y-, and z-direction.
**kwargs : dict[str, Any], optional
The keyword arguments (kwargs) collected in a dict.
These allow using non-standard attributes absent in the URDF specification.
Attributes
----------
filename : str
The mesh' filename.
scale : [float, float, float]
The scale factors of the mesh in the x-, y-, and z-direction.
meshes : list[:class:`~compas.datastructures.Mesh`]
List of COMPAS geometric meshes.
Examples
--------
>>> m = MeshDescriptor('link.stl')
"""
def __init__(self, filename, scale='1.0 1.0 1.0', **kwargs):
super(MeshDescriptor, self).__init__()
self.filename = filename
self.scale = _parse_floats(scale)
self.meshes = []
self.attr = kwargs or {}
def get_urdf_element(self):
attributes = {'filename': self.filename}
# There is no need to record default values. Usually these
# coincide with some form of 0 and are filtered out with
# `attributes = dict(filter(lambda x: x[1], attributes.items()))`,
# but here we must be explicit.
if self.scale != [1.0, 1.0, 1.0]:
attributes['scale'] = "{} {} {}".format(*self.scale)
attributes.update(self.attr)
return URDFElement('mesh', attributes)
@property
def data(self):
return {
'filename': self.filename,
'scale': self.scale,
'attr': _attr_to_data(self.attr),
}
@data.setter
def data(self, data):
self.filename = data['filename']
self.scale = data['scale']
self.attr = _attr_from_data(data['attr']) if 'attr' in data else {}
@classmethod
def from_data(cls, data):
md = cls('')
md.data = data
return md
class Color(Data):
"""Color represented in RGBA.
Parameters
----------
rgba : str
Color values as string.
Attributes
----------
rgba : [float, float, float, float]
Color values as list of float
Examples
--------
>>> c = Color('1 0 0')
>>> c.rgba
[1.0, 0.0, 0.0]
"""
def __init__(self, rgba):
super(Color, self).__init__()
self.rgba = _parse_floats(rgba)
def get_urdf_element(self):
attributes = {'rgba': "{} {} {} {}".format(*self.rgba)}
return URDFElement('color', attributes)
@property
def data(self):
return {
'rgba': self.rgba,
}
@data.setter
def data(self, data):
self.rgba = data['rgba']
@classmethod
def from_data(cls, data):
color = cls('1 1 1')
color.data = data
return color
class Texture(Data):
"""Texture description.
Parameters
----------
filename : str
The filename of the texture.
Attributes
----------
filename : str
The filename of the texture.
Examples
--------
>>> t = Texture('wood.jpg')
"""
def __init__(self, filename):
super(Texture, self).__init__()
self.filename = filename
def get_urdf_element(self):
attributes = {'filename': self.filename}
return URDFElement('texture', attributes)
@property
def data(self):
return {
'filename': self.filename,
}
@data.setter
def data(self, data):
self.filename = data['filename']
@classmethod
def from_data(cls, data):
return cls(**data)
class Material(Data):
"""Material description.
Parameters
----------
name : str
The name of the material.
color : :class:`~compas.robots.Color`, optional
The color of the material.
texture : :class:`~compas.robots.Texture`, optional
The filename of the texture.
Examples
--------
>>> c = Color('1 0 0')
>>> material = Material('wood', c)
>>> material = Material('aqua')
>>> material.get_color()
(0.0, 1.0, 1.0, 1.0)
"""
def __init__(self, name=None, color=None, texture=None):
super(Material, self).__init__()
self.name = name
self.color = color
self.texture = texture
def get_urdf_element(self):
attributes = {'name': self.name}
elements = [self.color, self.texture]
return URDFElement('material', attributes, elements)
@property
def data(self):
return {
'name': self.name,
'color': self.color.data if self.color else None,
'texture': self.texture.data if self.texture else None,
}
@data.setter
def data(self, data):
self.name = data['name']
self.color = Color.from_data(data['color']) if data['color'] else None
self.texture = Texture.from_data(data['texture']) if data['texture'] else None
def get_color(self):
"""Get the RGBA color array of the material.
Returns
-------
[float, float, float, float]
List of 4 floats (``0.0-1.0``) indicating RGB colors and Alpha channel of the material.
Examples
--------
>>> material = Material('aqua')
>>> material.get_color()
(0.0, 1.0, 1.0, 1.0)
"""
if self.name:
try:
color = compas.colors.Color.from_name(self.name)
return color.rgba
except ValueError:
pass
if self.color:
return self.color.rgba
return None
TYPE_CLASS_ENUM = {
'box': compas.geometry.Box,
'cylinder': compas.geometry.Cylinder,
'sphere': compas.geometry.Sphere,
'capsule': compas.geometry.Capsule,
'mesh': MeshDescriptor,
}
TYPE_CLASS_ENUM_BY_DATA = {
('frame', 'xsize', 'ysize', 'zsize'): compas.geometry.Box,
('circle', 'height'): compas.geometry.Cylinder,
('point', 'radius'): compas.geometry.Sphere,
('line', 'radius'): compas.geometry.Capsule,
('attr', 'filename', 'scale'): MeshDescriptor,
}
def _get_type_from_shape_data(data):
# This is here only to support models serialized with older versions of COMPAS
if 'type' in data:
return TYPE_CLASS_ENUM[data['type']]
# The current scenario is that we need to figure out the object type based on the DATASCHEMA
keys = tuple(sorted(data.keys()))
return TYPE_CLASS_ENUM_BY_DATA[keys]
class Geometry(Data):
"""Geometrical description of the shape of a link.
Parameters
----------
box : :class:`~compas.geometry.Box`, optional
A box shape primitive.
cylinder : :class:`~compas.geometry.Cylinder`, optional
A cylinder shape primitive.
sphere : :class:`~compas.geometry.Sphere`, optional
A sphere shape primitive.
capsule : :class:`~compas.geometry.Capsule`, optional
A capsule shape primitive.
mesh : :class:`~compas.robots.MeshDescriptor`, optional
A descriptor of a mesh.
**kwargs : dict[str, Any], optional
The keyword arguments (kwargs) collected in a dict.
These allow using non-standard attributes absent in the URDF specification.
Attributes
----------
shape : object
The shape of the geometry
attr : keyword arguments
Additional attributes
Examples
--------
>>> box = compas.geometry.Box(Frame.worldXY(), 1, 1, 1)
>>> geo = Geometry(box=box)
"""
def __init__(self, box=None, cylinder=None, sphere=None, capsule=None, mesh=None, **kwargs):
super(Geometry, self).__init__()
self.shape = box or cylinder or sphere or capsule or mesh
self.attr = kwargs
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, value):
if value is None:
self._shape = None
return
if isinstance(value, compas.geometry.Box):
self._shape = BoxProxy.create_proxy(value)
elif isinstance(value, compas.geometry.Cylinder):
self._shape = CylinderProxy.create_proxy(value)
elif isinstance(value, compas.geometry.Sphere):
self._shape = SphereProxy.create_proxy(value)
elif isinstance(value, compas.geometry.Capsule):
self._shape = CapsuleProxy.create_proxy(value)
else:
self._shape = value
if 'meshes' not in dir(self._shape):
raise TypeError('Shape implementation does not define a meshes accessor')
def get_urdf_element(self):
attributes = self.attr.copy()
elements = [self.shape]
return URDFElement('geometry', attributes, elements)
@property
def data(self):
return {
'shape': self.shape.data,
'attr': _attr_to_data(self.attr),
}
@data.setter
def data(self, data):
class_ = _get_type_from_shape_data(data['shape'])
self.shape = class_.from_data(data['shape'])
self.attr = _attr_from_data(data['attr'])
@classmethod
def from_data(cls, data):
class_ = _get_type_from_shape_data(data['shape'])
geo = cls(box=class_.from_data(data['shape']))
geo.data = data
return geo
@staticmethod
def _get_item_meshes(item):
meshes = item.geometry.shape.meshes
if meshes:
# Coerce meshes into an iterable (a tuple if not natively iterable)
if not hasattr(meshes, '__iter__'):
meshes = (meshes,)
return meshes
# Deprecated: this are aliases for backwards compatibility, but need to be removed on 2.x
Origin = Frame
Cylinder = CylinderProxy
Box = BoxProxy
Sphere = SphereProxy
Capsule = CapsuleProxy
|
|
import pickle
import struct
from datasketch.storage import (
ordered_storage, unordered_storage, _random_name)
from scipy.integrate import quad as integrate
def _false_positive_probability(threshold, b, r):
_probability = lambda s : 1 - (1 - s**float(r))**float(b)
a, err = integrate(_probability, 0.0, threshold)
return a
def _false_negative_probability(threshold, b, r):
_probability = lambda s : 1 - (1 - (1 - s**float(r))**float(b))
a, err = integrate(_probability, threshold, 1.0)
return a
def _optimal_param(threshold, num_perm, false_positive_weight,
false_negative_weight):
'''
Compute the optimal `MinHashLSH` parameter that minimizes the weighted sum
of probabilities of false positive and false negative.
'''
min_error = float("inf")
opt = (0, 0)
for b in range(1, num_perm+1):
max_r = int(num_perm / b)
for r in range(1, max_r+1):
fp = _false_positive_probability(threshold, b, r)
fn = _false_negative_probability(threshold, b, r)
error = fp*false_positive_weight + fn*false_negative_weight
if error < min_error:
min_error = error
opt = (b, r)
return opt
class MinHashLSH(object):
'''
The :ref:`minhash_lsh` index.
It supports query with `Jaccard similarity`_ threshold.
Reference: `Chapter 3, Mining of Massive Datasets
<http://www.mmds.org/>`_.
Args:
threshold (float): The Jaccard similarity threshold between 0.0 and
1.0. The initialized MinHash LSH will be optimized for the threshold by
minizing the false positive and false negative.
num_perm (int, optional): The number of permutation functions used
by the MinHash to be indexed. For weighted MinHash, this
is the sample size (`sample_size`).
weights (tuple, optional): Used to adjust the relative importance of
minimizing false positive and false negative when optimizing
for the Jaccard similarity threshold.
`weights` is a tuple in the format of
:code:`(false_positive_weight, false_negative_weight)`.
params (tuple, optional): The LSH parameters (i.e., number of bands and size
of each bands). This is used to bypass the parameter optimization
step in the constructor. `threshold` and `weights` will be ignored
if this is given.
storage_config (dict, optional): Type of storage service to use for storing
hashtables and keys.
`basename` is an optional property whose value will be used as the prefix to
stored keys. If this is not set, a random string will be generated instead. If you
set this, you will be responsible for ensuring there are no key collisions.
prepickle (bool, optional): If True, all keys are pickled to bytes before
insertion. If None, a default value is chosen based on the
`storage_config`.
hashfunc (function, optional): If a hash function is provided it will be used to
compress the index keys to reduce the memory footprint. This could cause a higher
false positive rate.
Note:
`weights` must sum to 1.0, and the format is
(false positive weight, false negative weight).
For example, if minimizing false negative (or maintaining high recall) is more
important, assign more weight toward false negative: weights=(0.4, 0.6).
Try to live with a small difference between weights (i.e. < 0.5).
'''
def __init__(self, threshold=0.9, num_perm=128, weights=(0.5, 0.5),
params=None, storage_config=None, prepickle=None, hashfunc=None):
storage_config = {'type': 'dict'} if not storage_config else storage_config
self._buffer_size = 50000
if threshold > 1.0 or threshold < 0.0:
raise ValueError("threshold must be in [0.0, 1.0]")
if num_perm < 2:
raise ValueError("Too few permutation functions")
if any(w < 0.0 or w > 1.0 for w in weights):
raise ValueError("Weight must be in [0.0, 1.0]")
if sum(weights) != 1.0:
raise ValueError("Weights must sum to 1.0")
self.h = num_perm
if params is not None:
self.b, self.r = params
if self.b * self.r > num_perm:
raise ValueError("The product of b and r in params is "
"{} * {} = {} -- it must be less than num_perm {}. "
"Did you forget to specify num_perm?".format(
self.b, self.r, self.b*self.r, num_perm))
else:
false_positive_weight, false_negative_weight = weights
self.b, self.r = _optimal_param(threshold, num_perm,
false_positive_weight, false_negative_weight)
self.prepickle = storage_config['type'] == 'redis' if prepickle is None else prepickle
self.hashfunc = hashfunc
if hashfunc:
self._H = self._hashed_byteswap
else:
self._H = self._byteswap
basename = storage_config.get('basename', _random_name(11))
self.hashtables = [
unordered_storage(storage_config, name=b''.join([basename, b'_bucket_', struct.pack('>H', i)]))
for i in range(self.b)]
self.hashranges = [(i*self.r, (i+1)*self.r) for i in range(self.b)]
self.keys = ordered_storage(storage_config, name=b''.join([basename, b'_keys']))
@property
def buffer_size(self):
return self._buffer_size
@buffer_size.setter
def buffer_size(self, value):
self.keys.buffer_size = value
for t in self.hashtables:
t.buffer_size = value
self._buffer_size = value
def insert(self, key, minhash, check_duplication=True):
'''
Insert a key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
:param str key: The identifier of the set.
:param datasketch.MinHash minhash: The MinHash of the set.
:param bool check_duplication: To avoid duplicate keys in the storage (`default=True`).
It's recommended to not change the default, but
if you want to avoid the overhead during insert
you can set `check_duplication = False`.
'''
self._insert(key, minhash, check_duplication=check_duplication, buffer=False)
def insertion_session(self, buffer_size=50000):
'''
Create a context manager for fast insertion into this index.
:param int buffer_size: The buffer size for insert_session mode (default=50000).
Returns:
datasketch.lsh.MinHashLSHInsertionSession
'''
return MinHashLSHInsertionSession(self, buffer_size=buffer_size)
def _insert(self, key, minhash, check_duplication=True, buffer=False):
if len(minhash) != self.h:
raise ValueError("Expecting minhash with length %d, got %d"
% (self.h, len(minhash)))
if self.prepickle:
key = pickle.dumps(key)
if check_duplication and key in self.keys:
raise ValueError("The given key already exists")
Hs = [self._H(minhash.hashvalues[start:end])
for start, end in self.hashranges]
self.keys.insert(key, *Hs, buffer=buffer)
for H, hashtable in zip(Hs, self.hashtables):
hashtable.insert(H, key, buffer=buffer)
def query(self, minhash):
'''
Giving the MinHash of the query set, retrieve
the keys that references sets with Jaccard
similarities greater than the threshold.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
Returns:
`list` of unique keys.
'''
if len(minhash) != self.h:
raise ValueError("Expecting minhash with length %d, got %d"
% (self.h, len(minhash)))
candidates = set()
for (start, end), hashtable in zip(self.hashranges, self.hashtables):
H = self._H(minhash.hashvalues[start:end])
for key in hashtable.get(H):
candidates.add(key)
if self.prepickle:
return [pickle.loads(key) for key in candidates]
else:
return list(candidates)
def add_to_query_buffer(self, minhash):
'''
Giving the MinHash of the query set, buffer
queries to retrieve the keys that references
sets with Jaccard similarities greater than
the threshold.
Buffered queries can be executed using
`collect_query_buffer`. The combination of these
functions is way faster if cassandra backend
is used with `shared_buffer`.
Args:
minhash (datasketch.MinHash): The MinHash of the query set.
'''
if len(minhash) != self.h:
raise ValueError("Expecting minhash with length %d, got %d"
% (self.h, len(minhash)))
for (start, end), hashtable in zip(self.hashranges, self.hashtables):
H = self._H(minhash.hashvalues[start:end])
hashtable.add_to_select_buffer([H])
def collect_query_buffer(self):
'''
Execute and return buffered queries given
by `add_to_query_buffer`.
If multiple query MinHash were added to the query buffer,
the intersection of the results of all query MinHash will be returned.
Returns:
`list` of unique keys.
'''
collected_result_sets = [
set(collected_result_lists)
for hashtable in self.hashtables
for collected_result_lists in hashtable.collect_select_buffer()
]
if not collected_result_sets:
return []
if self.prepickle:
return [pickle.loads(key) for key in set.intersection(*collected_result_sets)]
return list(set.intersection(*collected_result_sets))
def __contains__(self, key):
'''
Args:
key (hashable): The unique identifier of a set.
Returns:
bool: True only if the key exists in the index.
'''
if self.prepickle:
key = pickle.dumps(key)
return key in self.keys
def remove(self, key):
'''
Remove the key from the index.
Args:
key (hashable): The unique identifier of a set.
'''
if self.prepickle:
key = pickle.dumps(key)
if key not in self.keys:
raise ValueError("The given key does not exist")
for H, hashtable in zip(self.keys[key], self.hashtables):
hashtable.remove_val(H, key)
if not hashtable.get(H):
hashtable.remove(H)
self.keys.remove(key)
def is_empty(self):
'''
Returns:
bool: Check if the index is empty.
'''
return any(t.size() == 0 for t in self.hashtables)
def _byteswap(self, hs):
return bytes(hs.byteswap().data)
def _hashed_byteswap(self, hs):
return self.hashfunc(bytes(hs.byteswap().data))
def _query_b(self, minhash, b):
if len(minhash) != self.h:
raise ValueError("Expecting minhash with length %d, got %d"
% (self.h, len(minhash)))
if b > len(self.hashtables):
raise ValueError("b must be less or equal to the number of hash tables")
candidates = set()
for (start, end), hashtable in zip(self.hashranges[:b], self.hashtables[:b]):
H = self._H(minhash.hashvalues[start:end])
if H in hashtable:
for key in hashtable[H]:
candidates.add(key)
if self.prepickle:
return {pickle.loads(key) for key in candidates}
else:
return candidates
def get_counts(self):
'''
Returns a list of length ``self.b`` with elements representing the
number of keys stored under each bucket for the given permutation.
'''
counts = [
hashtable.itemcounts() for hashtable in self.hashtables]
return counts
def get_subset_counts(self, *keys):
'''
Returns the bucket allocation counts (see :func:`~datasketch.MinHashLSH.get_counts` above)
restricted to the list of keys given.
Args:
keys (hashable) : the keys for which to get the bucket allocation
counts
'''
if self.prepickle:
key_set = [pickle.dumps(key) for key in set(keys)]
else:
key_set = list(set(keys))
hashtables = [unordered_storage({'type': 'dict'}) for _ in
range(self.b)]
Hss = self.keys.getmany(*key_set)
for key, Hs in zip(key_set, Hss):
for H, hashtable in zip(Hs, hashtables):
hashtable.insert(H, key)
return [hashtable.itemcounts() for hashtable in hashtables]
class MinHashLSHInsertionSession:
'''Context manager for batch insertion of documents into a MinHashLSH.
'''
def __init__(self, lsh, buffer_size):
self.lsh = lsh
self.lsh.buffer_size = buffer_size
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self.lsh.keys.empty_buffer()
for hashtable in self.lsh.hashtables:
hashtable.empty_buffer()
def insert(self, key, minhash, check_duplication=True):
'''
Insert a unique key to the index, together
with a MinHash (or weighted MinHash) of the set referenced by
the key.
Args:
key (hashable): The unique identifier of the set.
minhash (datasketch.MinHash): The MinHash of the set.
'''
self.lsh._insert(key, minhash, check_duplication=check_duplication,
buffer=True)
|
|
"""Test the translation helper."""
import asyncio
from os import path
import pathlib
import pytest
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.generated import config_flows
from homeassistant.helpers import translation
from homeassistant.loader import async_get_integration
from homeassistant.setup import async_setup_component, setup_component
from tests.async_mock import Mock, patch
@pytest.fixture
def mock_config_flows():
"""Mock the config flows."""
flows = []
with patch.object(config_flows, "FLOWS", flows):
yield flows
def test_flatten():
"""Test the flatten function."""
data = {"parent1": {"child1": "data1", "child2": "data2"}, "parent2": "data3"}
flattened = translation.flatten(data)
assert flattened == {
"parent1.child1": "data1",
"parent1.child2": "data2",
"parent2": "data3",
}
async def test_component_translation_path(hass):
"""Test the component translation file function."""
assert await async_setup_component(
hass,
"switch",
{"switch": [{"platform": "test"}, {"platform": "test_embedded"}]},
)
assert await async_setup_component(hass, "test_standalone", {"test_standalone"})
assert await async_setup_component(hass, "test_package", {"test_package"})
(
int_test,
int_test_embedded,
int_test_standalone,
int_test_package,
) = await asyncio.gather(
async_get_integration(hass, "test"),
async_get_integration(hass, "test_embedded"),
async_get_integration(hass, "test_standalone"),
async_get_integration(hass, "test_package"),
)
assert path.normpath(
translation.component_translation_path("switch.test", "en", int_test)
) == path.normpath(
hass.config.path("custom_components", "test", "translations", "switch.en.json")
)
assert path.normpath(
translation.component_translation_path(
"switch.test_embedded", "en", int_test_embedded
)
) == path.normpath(
hass.config.path(
"custom_components", "test_embedded", "translations", "switch.en.json"
)
)
assert (
translation.component_translation_path(
"test_standalone", "en", int_test_standalone
)
is None
)
assert path.normpath(
translation.component_translation_path("test_package", "en", int_test_package)
) == path.normpath(
hass.config.path("custom_components", "test_package", "translations", "en.json")
)
def test_load_translations_files(hass):
"""Test the load translation files function."""
# Test one valid and one invalid file
file1 = hass.config.path(
"custom_components", "test", "translations", "switch.en.json"
)
file2 = hass.config.path(
"custom_components", "test", "translations", "invalid.json"
)
assert translation.load_translations_files(
{"switch.test": file1, "invalid": file2}
) == {
"switch.test": {
"state": {"string1": "Value 1", "string2": "Value 2"},
"something": "else",
},
"invalid": {},
}
async def test_get_translations(hass, mock_config_flows):
"""Test the get translations helper."""
translations = await translation.async_get_translations(hass, "en", "state")
assert translations == {}
assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}})
await hass.async_block_till_done()
translations = await translation.async_get_translations(hass, "en", "state")
assert translations["component.switch.state.string1"] == "Value 1"
assert translations["component.switch.state.string2"] == "Value 2"
translations = await translation.async_get_translations(hass, "de", "state")
assert "component.switch.something" not in translations
assert translations["component.switch.state.string1"] == "German Value 1"
assert translations["component.switch.state.string2"] == "German Value 2"
# Test a partial translation
translations = await translation.async_get_translations(hass, "es", "state")
assert translations["component.switch.state.string1"] == "Spanish Value 1"
assert translations["component.switch.state.string2"] == "Value 2"
# Test that an untranslated language falls back to English.
translations = await translation.async_get_translations(
hass, "invalid-language", "state"
)
assert translations["component.switch.state.string1"] == "Value 1"
assert translations["component.switch.state.string2"] == "Value 2"
async def test_get_translations_loads_config_flows(hass, mock_config_flows):
"""Test the get translations helper loads config flow translations."""
mock_config_flows.append("component1")
integration = Mock(file_path=pathlib.Path(__file__))
integration.name = "Component 1"
with patch(
"homeassistant.helpers.translation.component_translation_path",
return_value="bla.json",
), patch(
"homeassistant.helpers.translation.load_translations_files",
return_value={"component1": {"hello": "world"}},
), patch(
"homeassistant.helpers.translation.async_get_integration",
return_value=integration,
):
translations = await translation.async_get_translations(
hass, "en", "hello", config_flow=True
)
assert translations == {
"component.component1.hello": "world",
}
assert "component1" not in hass.config.components
async def test_get_translations_while_loading_components(hass):
"""Test the get translations helper loads config flow translations."""
integration = Mock(file_path=pathlib.Path(__file__))
integration.name = "Component 1"
hass.config.components.add("component1")
load_count = 0
def mock_load_translation_files(files):
"""Mock load translation files."""
nonlocal load_count
load_count += 1
# Mimic race condition by loading a component during setup
setup_component(hass, "persistent_notification", {})
return {"component1": {"hello": "world"}}
with patch(
"homeassistant.helpers.translation.component_translation_path",
return_value="bla.json",
), patch(
"homeassistant.helpers.translation.load_translations_files",
mock_load_translation_files,
), patch(
"homeassistant.helpers.translation.async_get_integration",
return_value=integration,
):
tasks = [
translation.async_get_translations(hass, "en", "hello") for _ in range(5)
]
all_translations = await asyncio.gather(*tasks)
assert all_translations[0] == {
"component.component1.hello": "world",
}
assert load_count == 1
async def test_get_translation_categories(hass):
"""Test the get translations helper loads config flow translations."""
with patch.object(translation, "async_get_config_flows", return_value={"light"}):
translations = await translation.async_get_translations(
hass, "en", "title", None, True
)
assert "component.light.title" in translations
translations = await translation.async_get_translations(
hass, "en", "device_automation", None, True
)
assert "component.light.device_automation.action_type.turn_on" in translations
async def test_translation_merging(hass, caplog):
"""Test we merge translations of two integrations."""
hass.config.components.add("sensor.moon")
hass.config.components.add("sensor.season")
hass.config.components.add("sensor")
translations = await translation.async_get_translations(hass, "en", "state")
assert "component.sensor.state.moon__phase.first_quarter" in translations
assert "component.sensor.state.season__season.summer" in translations
# Clear cache
hass.bus.async_fire(EVENT_COMPONENT_LOADED)
await hass.async_block_till_done()
# Patch in some bad translation data
orig_load_translations = translation.load_translations_files
def mock_load_translations_files(files):
"""Mock loading."""
result = orig_load_translations(files)
result["sensor.season"] = {"state": "bad data"}
return result
with patch(
"homeassistant.helpers.translation.load_translations_files",
side_effect=mock_load_translations_files,
):
translations = await translation.async_get_translations(hass, "en", "state")
assert "component.sensor.state.moon__phase.first_quarter" in translations
assert (
"An integration providing translations for sensor provided invalid data: bad data"
in caplog.text
)
async def test_caching(hass):
"""Test we cache data."""
hass.config.components.add("sensor")
# Patch with same method so we can count invocations
with patch(
"homeassistant.helpers.translation.merge_resources",
side_effect=translation.merge_resources,
) as mock_merge:
await translation.async_get_translations(hass, "en", "state")
assert len(mock_merge.mock_calls) == 1
await translation.async_get_translations(hass, "en", "state")
assert len(mock_merge.mock_calls) == 1
# This event clears the cache so we should record another call
hass.bus.async_fire(EVENT_COMPONENT_LOADED)
await hass.async_block_till_done()
await translation.async_get_translations(hass, "en", "state")
assert len(mock_merge.mock_calls) == 2
async def test_custom_component_translations(hass):
"""Test getting translation from custom components."""
hass.config.components.add("test_standalone")
hass.config.components.add("test_embedded")
hass.config.components.add("test_package")
assert await translation.async_get_translations(hass, "en", "state") == {}
|
|
###############################################################################
# AnyAxisymmetricRazorThinDiskPotential.py: class that implements the
# potential of an arbitrary
# axisymmetric, razor-thin disk
###############################################################################
import numpy
from scipy import integrate, special
from .Potential import Potential, check_potential_inputs_not_arrays, \
_APY_LOADED
from ..util import conversion
if _APY_LOADED:
from astropy import units
class AnyAxisymmetricRazorThinDiskPotential(Potential):
"""Class that implements the potential of an arbitrary axisymmetric, razor-thin disk with surface density :math:`\Sigma(R)`"""
def __init__(self,surfdens=lambda R: 1.5*numpy.exp(-R/0.5),amp=1.,
normalize=False,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize the potential of an arbitrary axisymmetric disk
INPUT:
surfdens= (1.5 e^[-R/0.3]) function of a single variable that gives the surface density as a function of radius (can return a Quantity)
amp= (1.) amplitude to be applied to the potential
normalize - if True, normalize such that vc(1.,0.)=1., or, if given as a number, such that the force is this fraction of the force necessary to make vc(1.,0.)=1.
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
AnyAxisymmetricRazorThinDiskPotential object
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo)
# Parse surface density: does it have units? does it expect them?
if _APY_LOADED:
_sdens_unit_input= False
try:
surfdens(1)
except (units.UnitConversionError,units.UnitTypeError):
_sdens_unit_input= True
_sdens_unit_output= False
if _sdens_unit_input:
try:
surfdens(1.*units.kpc).to(units.Msun/units.pc**2)
except (AttributeError,units.UnitConversionError): pass
else: _sdens_unit_output= True
else:
try:
surfdens(1.).to(units.Msun/units.pc**2)
except (AttributeError,units.UnitConversionError): pass
else: _sdens_unit_output= True
if _sdens_unit_input and _sdens_unit_output:
self._sdens= lambda R: conversion.parse_surfdens(\
surfdens(R*self._ro*units.kpc),
ro=self._ro,vo=self._vo)
elif _sdens_unit_input:
self._sdens= lambda R: surfdens(R*self._ro*units.kpc)
elif _sdens_unit_output:
self._sdens= lambda R: conversion.parse_surfdens(surfdens(R),
ro=self._ro,
vo=self._vo)
if not hasattr(self,'_sdens'): # unitless
self._sdens= surfdens
# The potential at zero, in case it's asked for
self._pot_zero= -2.*numpy.pi*integrate.quad(lambda a: self._sdens(a),
0,numpy.inf)[0]
if normalize or \
(isinstance(normalize,(int,float)) \
and not isinstance(normalize,bool)): #pragma: no cover
self.normalize(normalize)
@check_potential_inputs_not_arrays
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
if R == 0 and z == 0:
return self._pot_zero
elif numpy.isinf(R**2+z**2):
return 0.
potint= lambda a: a*self._sdens(a)\
/numpy.sqrt((R+a)**2.+z**2.)*special.ellipk(4*R*a/((R+a)**2.+z**2.))
return -4*(integrate.quad(potint,0,2*R,points=[R])[0]
+integrate.quad(potint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
F_R at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def rforceint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*((a2-R2+z2)*special.ellipe(faRoveraRz)
-((a-R)**2+z2)*special.ellipk(faRoveraRz))\
/R/((a-R)**2+z2)/numpy.sqrt(aRz)
return 2*(integrate.quad(rforceint,0,2*R,points=[R])[0]
+integrate.quad(rforceint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
F_z at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
if z == 0:
return 0.
z2= z**2
def zforceint(a):
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*special.ellipe(faRoveraRz)/((a-R)**2+z2)/numpy.sqrt(aRz)
return -4*z*(integrate.quad(zforceint,0,2*R,points=[R])[0]
+integrate.quad(zforceint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the 2nd radial derivative at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2 Phi / dR2 at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def r2derivint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*(-(((a2-3.*R2)*(a2-R2)**2+(3.*a2**2+2.*a2*R2+3.*R2**2)*z2
+(3.*a2+7.*R2)*z**4+z**6)*special.ellipe(faRoveraRz))
+((a-R)**2+z2)*((a2-R2)**2+2.*(a2+2.*R2)*z2+z**4)
*special.ellipk(faRoveraRz))\
/(2.*R2*((a-R)**2+z2)**2*((a+R)**2+z2)**1.5)
return -4*(integrate.quad(r2derivint,0,2*R,points=[R])[0]
+integrate.quad(r2derivint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the 2nd vertical derivative at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2 Phi / dz2 at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def z2derivint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*(-(((a2-R2)**2-2.*(a2+R2)*z2-3.*z**4)*special.ellipe(faRoveraRz))
-z2*((a-R)**2+z2)*special.ellipk(faRoveraRz))\
/(((a-R)**2+z2)**2*((a+R)**2+z2)**1.5)
return -4*(integrate.quad(z2derivint,0,2*R,points=[R])[0]
+integrate.quad(z2derivint,2*R,numpy.inf)[0])
@check_potential_inputs_not_arrays
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed radial, vertical derivative at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
d2 Phi / dRdz at (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
R2= R**2
z2= z**2
def rzderivint(a):
a2= a**2
aRz= (a+R)**2.+z2
faRoveraRz= 4*a*R/aRz
return a*self._sdens(a)\
*(-((a**4-7.*R**4-6.*R2*z2+z**4+2.*a2*(3.*R2+z2))
*special.ellipe(faRoveraRz))
+((a-R)**2+z**2)*(a2-R2+z2)*special.ellipk(faRoveraRz))\
/R/((a-R)**2+z2)**2/((a+R)**2+z2)**1.5
return -2*z*(integrate.quad(rzderivint,0,2*R,points=[R])[0]
+integrate.quad(rzderivint,2*R,numpy.inf)[0])
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Sigma (R,z)
HISTORY:
2021-01-04 - Written - Bovy (UofT)
"""
return self._sdens(R)
|
|
# -*- coding: utf-8 -*-
from .grammar import SchemaGrammar
from ..blueprint import Blueprint
from ...query.expression import QueryExpression
from ...support.fluent import Fluent
class SQLiteSchemaGrammar(SchemaGrammar):
_modifiers = ['nullable', 'default', 'increment']
_serials = ['big_integer', 'integer']
def compile_rename_column(self, blueprint, command, connection):
"""
Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
# The code is a little complex. It will propably change
# if we support complete diffs in dbal
sql = []
schema = connection.get_schema_manager()
table = self.get_table_prefix() + blueprint.get_table()
column = connection.get_column(table, command.from_)
columns = schema.list_table_columns(table).values()
indexes = schema.list_table_indexes(table)
foreign_keys = schema.list_table_foreign_keys(table)
diff = self._get_renamed_diff(blueprint, command, column, schema)
renamed_columns = diff.renamed_columns
old_column_names = list(map(lambda x: x.get_name(), columns))
# We build the new column names
new_column_names = []
for column_name in old_column_names:
if column_name in renamed_columns:
new_column_names.append(renamed_columns[column_name].get_name())
else:
new_column_names.append(column_name)
# We create a temporary table and insert the data into it
temp_table = '__temp__' + self.get_table_prefix() + blueprint.get_table()
sql.append('CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s'
% (temp_table, self.columnize(old_column_names), table))
# We drop the current table
sql += Blueprint(table).drop().to_sql(None, self)
# Building the list a new columns
new_columns = []
for column in columns:
for column_name, changed_column in renamed_columns.items():
if column_name == column.get_name():
new_columns.append(changed_column)
# Here we will try to rebuild a new blueprint to create a new table
# with the original name
new_blueprint = Blueprint(table)
new_blueprint.create()
primary = []
for column in columns:
# Mapping the database type to the blueprint type
type = column.get_type()
if type == 'smallint':
type = 'small_integer'
elif type == 'bigint':
type = 'big_integer'
elif type == 'blob':
type = 'binary'
# If the column is a primary, we will add it to the blueprint later
if column.get_platform_option('pk'):
primary.append(column.get_name())
# If the column is not one that's been renamed we reinsert it into the blueprint
if column.get_name() not in renamed_columns.keys():
col = getattr(new_blueprint, type)(column.get_name())
# If the column is nullable, we flag it
if not column.get_notnull():
col.nullable()
# If the column has a default value, we add it
if column.get_default() is not None:
col.default(QueryExpression(column.get_default()))
# Inserting the renamed columns into the blueprint
for column in new_columns:
type = column.get_type()
if type == 'smallint':
type = 'small_integer'
elif type == 'bigint':
type = 'big_integer'
elif type == 'blob':
type = 'binary'
col = getattr(new_blueprint, type)(column.get_name())
if not column.get_notnull():
col.nullable()
if column.get_default() is not None:
col.default(QueryExpression(column.get_default()))
# We add the primary keys
if primary:
new_blueprint.primary(primary)
# We rebuild the indexes
for index in indexes:
index_columns = index['columns']
new_index_columns = []
index_name = index['name']
for column_name in index_columns:
if column_name in renamed_columns:
new_index_columns.append(renamed_columns[column_name].get_name())
else:
new_index_columns.append(column_name)
if index_columns != new_index_columns:
index_name = None
if index['unique']:
new_blueprint.unique(new_index_columns, index_name)
else:
new_blueprint.index(index['columns'], index_name)
for foreign_key in foreign_keys:
fkey_from = foreign_key['from']
if fkey_from in renamed_columns:
fkey_from = renamed_columns[fkey_from].get_name()
new_blueprint.foreign(fkey_from)\
.references(foreign_key['to'])\
.on(foreign_key['table'])\
.on_delete(foreign_key['on_delete'])\
.on_update(foreign_key['on_update'])
# We create the table
sql += new_blueprint.to_sql(None, self)
# We reinsert the data into the new table
sql.append('INSERT INTO %s (%s) SELECT %s FROM %s'
% (self.wrap_table(table),
', '.join(new_column_names),
self.columnize(old_column_names),
self.wrap_table(temp_table)
))
# Finally we drop the temporary table
sql += Blueprint(temp_table).drop().to_sql(None, self)
return sql
def compile_change(self, blueprint, command, connection):
"""
Compile a change column command into a series of SQL statement.
:param blueprint: The blueprint
:type blueprint: orator.schema.Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
sql = []
schema = connection.get_schema_manager()
table = self.get_table_prefix() + blueprint.get_table()
columns = schema.list_table_columns(table).values()
indexes = schema.list_table_indexes(table)
foreign_keys = schema.list_table_foreign_keys(table)
diff = self._get_changed_diff(blueprint, schema)
blueprint_changed_columns = blueprint.get_changed_columns()
changed_columns = diff.changed_columns
temp_table = '__temp__' + self.get_table_prefix() + blueprint.get_table()
sql.append('CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s'
% (temp_table, self.columnize(list(map(lambda x: x.get_name(), columns))), table))
sql += Blueprint(table).drop().to_sql(None, self)
new_columns = []
for column in columns:
for column_name, changed_column in changed_columns.items():
if column_name == column.get_name():
for blueprint_column in blueprint_changed_columns:
if blueprint_column.name == column_name:
new_columns.append(blueprint_column)
break
break
new_blueprint = Blueprint(table)
new_blueprint.create()
primary = []
new_column_names = []
for column in columns:
type = column.get_type()
if type == 'smallint':
type = 'small_integer'
elif type == 'bigint':
type = 'big_integer'
elif type == 'blob':
type = 'binary'
if column.get_platform_option('pk'):
primary.append(column.get_name())
if column.get_name() not in changed_columns:
col = getattr(new_blueprint, type)(column.get_name())
if not column.get_notnull():
col.nullable()
new_column_names.append(column.get_name())
for column in new_columns:
column.change = False
new_blueprint._add_column(**column.get_attributes())
new_column_names.append(column.name)
if primary:
new_blueprint.primary(primary)
for index in indexes:
if index['unique']:
new_blueprint.unique(index['columns'], index['name'])
else:
new_blueprint.index(index['columns'], index['name'])
for foreign_key in foreign_keys:
new_blueprint.foreign(foreign_key['from'])\
.references(foreign_key['to'])\
.on(foreign_key['table'])\
.on_delete(foreign_key['on_delete'])\
.on_update(foreign_key['on_update'])
sql += new_blueprint.to_sql(None, self)
sql.append('INSERT INTO %s (%s) SELECT %s FROM %s'
% (self.wrap_table(table),
', '.join(sorted(new_column_names)),
self.columnize(sorted(list(map(lambda x: x.get_name(), columns)))),
self.wrap_table(temp_table)
))
sql += Blueprint(temp_table).drop().to_sql(None, self)
return sql
def compile_table_exists(self):
"""
Compile the query to determine if a table exists
:rtype: str
"""
return "SELECT * FROM sqlite_master WHERE type = 'table' AND name = ?"
def compile_column_exists(self, table):
"""
Compile the query to determine the list of columns.
"""
return 'PRAGMA table_info(%s)' % table.replace('.', '__')
def compile_create(self, blueprint, command, _):
"""
Compile a create table command.
"""
columns = ', '.join(self._get_columns(blueprint))
sql = 'CREATE TABLE %s (%s' % (self.wrap_table(blueprint), columns)
sql += self._add_foreign_keys(blueprint)
sql += self._add_primary_keys(blueprint)
return sql + ')'
def _add_foreign_keys(self, blueprint):
sql = ''
foreigns = self._get_commands_by_name(blueprint, 'foreign')
for foreign in foreigns:
sql += self._get_foreign_key(foreign)
if foreign.get('on_delete'):
sql += ' ON DELETE %s' % foreign.on_delete
if foreign.get('on_update'):
sql += ' ON UPDATE %s' % foreign.on_delete
return sql
def _get_foreign_key(self, foreign):
on = self.wrap_table(foreign.on)
columns = self.columnize(foreign.columns)
references = foreign.references
if not isinstance(references, list):
references = [references]
on_columns = self.columnize(references)
return ', FOREIGN KEY(%s) REFERENCES %s(%s)' % (columns, on, on_columns)
def _add_primary_keys(self, blueprint):
primary = self._get_command_by_name(blueprint, 'primary')
if primary:
columns = self.columnize(primary.columns)
return ', PRIMARY KEY (%s)' % columns
return ''
def compile_add(self, blueprint, command, _):
table = self.wrap_table(blueprint)
columns = self.prefix_list('ADD COLUMN', self._get_columns(blueprint))
statements = []
for column in columns:
statements.append('ALTER TABLE %s %s' % (table, column))
return statements
def compile_unique(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'CREATE UNIQUE INDEX %s ON %s (%s)' % (command.index, table, columns)
def compile_index(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'CREATE INDEX %s ON %s (%s)' % (command.index, table, columns)
def compile_foreign(self, blueprint, command, _):
pass
def compile_drop(self, blueprint, command, _):
return 'DROP TABLE %s' % self.wrap_table(blueprint)
def compile_drop_if_exists(self, blueprint, command, _):
return 'DROP TABLE IF EXISTS %s' % self.wrap_table(blueprint)
def compile_drop_column(self, blueprint, command, connection):
# The code is a little complex. It will propably change
# if we support complete diffs in dbal
sql = []
schema = connection.get_schema_manager()
table = self.get_table_prefix() + blueprint.get_table()
columns = schema.list_table_columns(table).values()
indexes = schema.list_table_indexes(table)
foreign_keys = schema.list_table_foreign_keys(table)
diff = self._get_table_diff(blueprint, schema)
for name in command.columns:
column = connection.get_column(blueprint.get_table(), name)
diff.removed_columns[name] = column
removed_columns = diff.removed_columns
old_column_names = list(map(lambda x: x.get_name(), columns))
# We build the new column names
new_column_names = []
for column_name in old_column_names:
if column_name not in removed_columns:
new_column_names.append(column_name)
# We create a temporary table and insert the data into it
temp_table = '__temp__' + self.get_table_prefix() + blueprint.get_table()
sql.append('CREATE TEMPORARY TABLE %s AS SELECT %s FROM %s'
% (temp_table, self.columnize(old_column_names), table))
# We drop the current table
sql += Blueprint(table).drop().to_sql(None, self)
# Here we will try to rebuild a new blueprint to create a new table
# with the original name
new_blueprint = Blueprint(table)
new_blueprint.create()
primary = []
for column in columns:
# If the column is not one that's been removed we reinsert it into the blueprint
if column.get_name() in new_column_names:
# Mapping the database type to the blueprint type
type = column.get_type()
if type == 'smallint':
type = 'small_integer'
elif type == 'bigint':
type = 'big_integer'
elif type == 'blob':
type = 'binary'
# If the column is a primary, we will add it to the blueprint later
if column.get_platform_option('pk'):
primary.append(column.get_name())
col = getattr(new_blueprint, type)(column.get_name())
# If the column is nullable, we flag it
if not column.get_notnull():
col.nullable()
# If the column has a default value, we add it
if column.get_default() is not None:
col.default(QueryExpression(column.get_default()))
# We add the primary keys
if primary:
new_blueprint.primary(primary)
# We rebuild the indexes
for index in indexes:
index_columns = index['columns']
new_index_columns = []
index_name = index['name']
removed = False
for column_name in index_columns:
if column_name not in removed_columns:
new_index_columns.append(column_name)
else:
removed = True
break
if removed:
continue
if index_columns != new_index_columns:
index_name = None
if index['unique']:
new_blueprint.unique(new_index_columns, index_name)
else:
new_blueprint.index(index['columns'], index_name)
for foreign_key in foreign_keys:
fkey_from = foreign_key['from']
if fkey_from in removed_columns:
continue
new_blueprint.foreign(fkey_from)\
.references(foreign_key['to'])\
.on(foreign_key['table'])\
.on_delete(foreign_key['on_delete'])\
.on_update(foreign_key['on_update'])
# We create the table
sql += new_blueprint.to_sql(None, self)
# We reinsert the data into the new table
sql.append('INSERT INTO %s (%s) SELECT %s FROM %s'
% (self.wrap_table(table),
self.columnize(new_column_names),
self.columnize(new_column_names),
self.wrap_table(temp_table)
))
# Finally we drop the temporary table
sql += Blueprint(temp_table).drop().to_sql(None, self)
return sql
def compile_drop_unique(self, blueprint, command, _):
return 'DROP INDEX %s' % command.index
def compile_drop_index(self, blueprint, command, _):
return 'DROP INDEX %s' % command.index
def compile_rename(self, blueprint, command, _):
from_ = self.wrap_table(blueprint)
return 'ALTER TABLE %s RENAME TO %s' % (from_, self.wrap_table(command.to))
def _type_char(self, column):
return 'VARCHAR'
def _type_string(self, column):
return 'VARCHAR'
def _type_text(self, column):
return 'TEXT'
def _type_medium_text(self, column):
return 'TEXT'
def _type_long_text(self, column):
return 'TEXT'
def _type_integer(self, column):
return 'INTEGER'
def _type_big_integer(self, column):
return 'INTEGER'
def _type_medium_integer(self, column):
return 'INTEGER'
def _type_tiny_integer(self, column):
return 'TINYINT'
def _type_small_integer(self, column):
return 'INTEGER'
def _type_float(self, column):
return 'FLOAT'
def _type_double(self, column):
return 'FLOAT'
def _type_decimal(self, column):
return 'NUMERIC'
def _type_boolean(self, column):
return 'TINYINT'
def _type_enum(self, column):
return 'VARCHAR'
def _type_json(self, column):
return 'TEXT'
def _type_date(self, column):
return 'DATE'
def _type_datetime(self, column):
return 'DATETIME'
def _type_time(self, column):
return 'TIME'
def _type_timestamp(self, column):
return 'DATETIME'
def _type_binary(self, column):
return 'BLOB'
def _modify_nullable(self, blueprint, column):
if column.get('nullable'):
return ' NULL'
return ' NOT NULL'
def _modify_default(self, blueprint, column):
if column.get('default') is not None:
return ' DEFAULT %s' % self._get_default_value(column.default)
return ''
def _modify_increment(self, blueprint, column):
if column.type in self._serials and column.auto_increment:
return ' PRIMARY KEY AUTOINCREMENT'
return ''
|
|
#!/usr/bin/env python
import binascii
import serial
import stm32_crc
import sys
import threading
import time
import traceback
import zipfile
from struct import pack, unpack
import os
import glob
import logging
import json
log = logging.getLogger()
logging.basicConfig(format='[%(levelname)-8s] %(message)s')
log.setLevel(logging.DEBUG)
DEFAULT_PEBBLE_ID = None #Triggers autodetection on unix-like systems
DEBUG_PROTOCOL = False
class PebbleBundle(object):
MANIFEST_FILENAME = 'manifest.json'
def __init__(self, bundle_path):
bundle_abs_path = os.path.abspath(bundle_path)
if not os.path.exists(bundle_abs_path):
raise "Bundle does not exist: " + bundle_path
self.zip = zipfile.ZipFile(bundle_abs_path)
self.path = bundle_abs_path
self.manifest = None
def get_manifest(self):
if (self.manifest):
return self.manifest
if self.MANIFEST_FILENAME not in self.zip.namelist():
raise "Could not find {}; are you sure this is a PebbleBundle?".format(self.MANIFEST_FILENAME)
self.manifest = json.loads(self.zip.read(self.MANIFEST_FILENAME))
return self.manifest
def close(self):
self.zip.close()
def is_firmware_bundle(self):
return 'firmware' in self.get_manifest()
def is_app_bundle(self):
return 'application' in self.get_manifest()
def has_resources(self):
return 'resources' in self.get_manifest()
def get_firmware_info(self):
if not self.is_firmware_bundle():
return None
return self.get_manifest()['firmware']
def get_application_info(self):
if not self.is_app_bundle():
return None
return self.get_manifest()['application']
def get_resources_info(self):
if not self.has_resources():
return None
return self.get_manifest()['resources']
class EndpointSync():
timeout = 10
def __init__(self, pebble, endpoint):
pebble.register_endpoint(endpoint, self.callback)
self.marker = threading.Event()
def callback(self, *args):
self.data = args
self.marker.set()
def get_data(self):
try:
self.marker.wait(timeout=self.timeout)
return self.data[1]
except:
return False
class PebbleError(Exception):
def __init__(self, id, message):
self._id = id
self._message = message
def __str__(self):
return "%s (ID:%s)" % (self._message, self._id)
class Pebble(object):
"""
A connection to a Pebble watch; data and commands may be sent
to the watch through an instance of this class.
"""
_remote_commands = {
"ITUNES": [
"""
osascript -e 'tell application "System Events"
tell application "iTunes" to activate
key code 49
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "iTunes" to activate
key code 124 using command down
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "iTunes" to activate
key code 123 using command down
end tell'
"""
],
"KEYNOTE": [
"""
osascript -e 'tell application "System Events"
tell application "Keynote" to activate
key code 35 using {command down, option down}
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "Keynote" to activate
key code 124 using command down
end tell'
""",
"""
osascript -e 'tell application "System Events"
tell application "Keynote" to activate
key code 123 using command down
end tell'
"""
],
"POWERPOINT": [
"""
osascript -e 'tell application "Microsoft PowerPoint"
activate
run slide show slide show settings of active presentation
end tell'
""",
"""
osascript -e 'tell application "Microsoft PowerPoint"
activate
go to next slide slide show view of slide show window 1
end tell'
""",
"""
osascript -e 'tell application "Microsoft PowerPoint"
activate
go to previous slide slide show view of slide show window 1
end tell'
"""
]
}
endpoints = {
"TIME": 11,
"VERSION": 16,
"PHONE_VERSION": 17,
"SYSTEM_MESSAGE": 18,
"MUSIC_CONTROL": 32,
"PHONE_CONTROL": 33,
"LOGS": 2000,
"PING": 2001,
"LOG_DUMP": 2002,
"RESET": 2003,
"APP": 2004,
"NOTIFICATION": 3000,
"RESOURCE": 4000,
"APP_MANAGER": 6000,
"PUTBYTES": 48879
}
@staticmethod
def AutodetectDevice():
if os.name != "posix": #i.e. Windows
raise NotImplementedError("Autodetection is only implemented on UNIX-like systems.")
pebbles = glob.glob("/dev/tty.Pebble????-SerialPortSe")
if len(pebbles) == 0:
raise PebbleError(None, "Autodetection could not find any Pebble devices")
elif len(pebbles) > 1:
log.warn("Autodetect found %d Pebbles; using most recent" % len(pebbles))
#NOTE: Not entirely sure if this is the correct approach
pebbles.sort(key=lambda x: os.stat(x).st_mtime, reverse=True)
id = pebbles[0][15:19]
log.info("Autodetect found a Pebble with ID %s" % id)
return id
def __init__(self, id = None):
if id is None:
id = Pebble.AutodetectDevice()
self.id = id
self._alive = True
self._endpoint_handlers = {}
self._internal_endpoint_handlers = {
self.endpoints["TIME"]: self._get_time_response,
self.endpoints["VERSION"]: self._version_response,
self.endpoints["SYSTEM_MESSAGE"]: self._system_message_response,
self.endpoints["LOGS"]: self._log_response,
self.endpoints["PING"]: self._ping_response,
self.endpoints["APP_MANAGER"]: self._appbank_status_response,
self.endpoints["MUSIC_CONTROL"]: self._remote_response
}
try:
devicefile = "/dev/tty.Pebble"+id+"-SerialPortSe"
log.debug("Attempting to open %s as Pebble device %s" % (devicefile, id))
self._ser = serial.Serial(devicefile, 115200, timeout=1)
log.debug("Connected, discarding null response")
# we get a null response when we connect, discard it
self._ser.read(5)
self._ser.write("\x00\x0d\x00\x11\x01\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x32")
# Eat any cruft that might be sitting in the serial buffer...
while self._ser.read():
pass
log.debug("Initializing reader thread")
self._read_thread = threading.Thread(target=self._reader)
self._read_thread.setDaemon(True)
self._read_thread.start()
log.debug("Reader thread loaded")
except:
raise PebbleError(id, "Failed to connect to Pebble")
def __del__(self):
try:
self._ser.close()
except:
pass
def _reader(self):
try:
while self._alive:
endpoint, resp = self._recv_message()
if resp == None:
continue
if DEBUG_PROTOCOL:
log.debug("Got message for endpoint %s of length %d" % (endpoint, len(resp)))
log.debug('<<< ' + resp.encode('hex'))
if endpoint in self._internal_endpoint_handlers:
resp = self._internal_endpoint_handlers[endpoint](endpoint, resp)
if endpoint in self._endpoint_handlers:
self._endpoint_handlers[endpoint](endpoint, resp)
except:
traceback.print_exc()
raise PebbleError(self.id, "Lost connection to Pebble")
self._alive = False
def _build_message(self, endpoint, data):
return pack("!HH", len(data), endpoint)+data
def _send_message(self, endpoint, data, callback = None):
if endpoint not in self.endpoints:
raise PebbleError(self.id, "Invalid endpoint specified")
msg = self._build_message(self.endpoints[endpoint], data)
if DEBUG_PROTOCOL:
log.debug('>>> ' + msg.encode('hex'))
self._ser.write(msg)
def _recv_message(self):
data = self._ser.read(4)
if len(data) == 0:
return (None, None)
elif len(data) < 4:
raise PebbleError(self.id, "Malformed response with length "+str(len(data)))
size, endpoint = unpack("!HH", data)
resp = self._ser.read(size)
return (endpoint, resp)
def register_endpoint(self, endpoint_name, func):
if endpoint_name not in self.endpoints:
raise PebbleError(self.id, "Invalid endpoint specified")
endpoint = self.endpoints[endpoint_name]
self._endpoint_handlers[endpoint] = func
def notification_sms(self, sender, body):
"""Send a 'SMS Notification' to the displayed on the watch."""
ts = str(int(time.time())*1000)
parts = [sender, body, ts]
data = "\x01"
for part in parts:
data += pack("!b", len(part))+part
self._send_message("NOTIFICATION", data)
def notification_email(self, sender, subject, body):
"""Send an 'Email Notification' to the displayed on the watch."""
ts = str(int(time.time())*1000)
parts = [sender, subject, ts, body]
data = "\x00"
for part in parts:
data += pack("!b", len(part))+part
self._send_message("NOTIFICATION", data)
def set_nowplaying_metadata(self, track, album, artist):
"""Update the song metadata displayed in Pebble's music app."""
parts = [artist, album, track]
data = pack("!b", 16)
for part in parts:
data += pack("!b", len(part))+part
self._send_message("MUSIC_CONTROL", data)
def get_versions(self, async = False):
"""
Retrieve a summary of version information for various software
(firmware, bootloader, etc) running on the watch.
"""
self._send_message("VERSION", "\x00")
if not async:
return EndpointSync(self, "VERSION").get_data()
def get_appbank_status(self, async = False):
"""
Retrieve a list of all installed watch-apps.
This is particularly useful when trying to locate a
free app-bank to use when installing a new watch-app.
"""
self._send_message("APP_MANAGER", "\x01")
if not async:
return EndpointSync(self, "APP_MANAGER").get_data()
def remove_app(self, appid, index):
"""Remove an installed application from the target app-bank."""
data = pack("!bII", 2, appid, index)
self._send_message("APP_MANAGER", data)
def get_time(self, async = False):
"""Retrieve the time from the Pebble's RTC."""
self._send_message("TIME", "\x00")
if not async:
return EndpointSync(self, "TIME").get_data()
def set_time(self, timestamp):
"""Set the time stored in the target Pebble's RTC."""
data = pack("!bL", 2, timestamp)
self._send_message("TIME", data)
def reinstall_app(self, name, pbz_path):
"""
A convenience method to uninstall and install an app.
This will only work if the app hasn't changed names between the new and old versions.
"""
apps = self.get_appbank_status()
for app in apps["apps"]:
if app["name"] == name:
self.remove_app(app["id"], app["index"])
self.install_app(pbz_path)
def install_app(self, pbz_path):
"""
Install an app bundle (*.pbw) to the target Pebble.
This will pick the first free app-bank available.
"""
bundle = PebbleBundle(pbz_path)
if not bundle.is_app_bundle():
raise PebbleError(self.id, "This is not an app bundle")
binary = bundle.zip.read(
bundle.get_application_info()['name'])
if bundle.has_resources():
resources = bundle.zip.read(
bundle.get_resources_info()['name'])
else:
resources = None
apps = self.get_appbank_status()
if not apps:
raise PebbleError(self.id, "could not obtain app list; try again")
first_free = 1
for app in apps["apps"]:
if app["index"] == first_free:
first_free += 1
if first_free == apps["banks"]:
raise PebbleError(self.id, "All %d app banks are full" % apps["banks"])
log.debug("Attempting to add app to bank %d of %d" % (first_free, apps["banks"]))
client = PutBytesClient(self, first_free, "BINARY", binary)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send application binary %s/pebble-app.bin" % pbz_path)
if resources:
client = PutBytesClient(self, first_free, "RESOURCES", resources)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send application resources %s/app_resources.pbpack" % pbz_path)
time.sleep(2)
self._add_app(first_free)
time.sleep(2)
def install_firmware(self, pbz_path, recovery=False):
"""Install a firmware bundle to the target watch."""
resources = None
with zipfile.ZipFile(pbz_path) as pbz:
binary = pbz.read("tintin_fw.bin")
if not recovery:
resources = pbz.read("system_resources.pbpack")
self.system_message("FIRMWARE_START")
time.sleep(2)
if resources:
client = PutBytesClient(self, 0, "SYS_RESOURCES", resources)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send firmware resources %s/system_resources.pbpack" % pbz_path)
client = PutBytesClient(self, 0, "RECOVERY" if recovery else "FIRMWARE", binary)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send firmware binary %s/tintin_fw.bin" % pbz_path)
self.system_message("FIRMWARE_COMPLETE")
def system_message(self, command):
"""
Send a 'system message' to the watch.
These messages are used to signal important events/state-changes to the watch firmware.
"""
commands = {
"FIRMWARE_AVAILABLE": 0,
"FIRMWARE_START": 1,
"FIRMWARE_COMPLETE": 2,
"FIRMWARE_FAIL": 3,
"FIRMWARE_UP_TO_DATE": 4,
"FIRMWARE_OUT_OF_DATE": 5,
"BLUETOOTH_START_DISCOVERABLE": 6,
"BLUETOOTH_END_DISCOVERABLE": 7
}
if command not in commands:
raise PebbleError(self.id, "Invalid command \"%s\"" % command)
data = pack("!bb", 0, commands[command])
log.debug("Sending command %s (code %d)" % (command, commands[command]))
self._send_message("SYSTEM_MESSAGE", data)
def ping(self, cookie = 0, async = False):
"""Send a 'ping' to the watch to test connectivity."""
data = pack("!bL", 0, cookie)
self._send_message("PING", data)
if not async:
return EndpointSync(self, "PING").get_data()
def reset(self):
"""Reset the watch remotely."""
self._send_message("RESET", "\x00")
def disconnect(self):
"""Disconnect from the target Pebble."""
self._alive = False
self._ser.close()
def remote(self, remote_app):
app_string = {
"ITUNES" : "iTunes",
"KEYNOTE" : "Keynote",
"POWERPOINT" : "PowerPoint"
}
self._remote_app = remote_app.upper()
log.info("Remote: Control " + app_string[self._remote_app] + " with Pebble" )
self.set_nowplaying_metadata(app_string[self._remote_app], "libpebble", "Pebble")
def _remote_response(self, endpoint, data):
res, = unpack("!b", data)
# log.info("Remote: %s" % res)
cmd = ""
if res == 1:
cmd = self._remote_commands[self._remote_app][0]
elif res == 4:
cmd = self._remote_commands[self._remote_app][1]
elif res == 5:
cmd = self._remote_commands[self._remote_app][2]
os.system(cmd)
def _add_app(self, index):
data = pack("!bI", 3, index)
self._send_message("APP_MANAGER", data)
def _ping_response(self, endpoint, data):
restype, retcookie = unpack("!bL", data)
return retcookie
def _get_time_response(self, endpoint, data):
restype, timestamp = unpack("!bL", data)
return timestamp
def _system_message_response(self, endpoint, data):
if len(data) == 2:
log.info("Got system message %s" % repr(unpack('!bb', data)))
else:
log.info("Got 'unknown' system message...")
def _log_response(self, endpoint, data):
if (len(data) < 8):
log.warn("Unable to decode log message (length %d is less than 8)" % len(data))
return;
timestamp, level, msgsize, linenumber = unpack("!Ibbh", data[:8])
filename = data[8:24].decode('utf-8')
message = data[24:24+msgsize].decode('utf-8')
log_levels = {
0: "*",
1: "E",
50: "W",
100: "I",
200: "D",
250: "V"
}
level = log_levels[level] if level in log_levels else "?"
print timestamp, level, filename, linenumber, message
def _appbank_status_response(self, endpoint, data):
apps = {}
restype, = unpack("!b", data[0])
if restype == 1:
apps["banks"], apps_installed = unpack("!II", data[1:9])
apps["apps"] = []
appinfo_size = 78
offset = 9
for i in xrange(apps_installed):
app = {}
app["id"], app["index"], app["name"], app["company"], app["flags"], app["version"] = \
unpack("!II32s32sIH", data[offset:offset+appinfo_size])
app["name"] = app["name"].replace("\x00", "")
app["company"] = app["company"].replace("\x00", "")
apps["apps"] += [app]
offset += appinfo_size
return apps
def _version_response(self, endpoint, data):
fw_names = {
0: "normal_fw",
1: "recovery_fw"
}
resp = {}
for i in xrange(2):
fwver_size = 47
offset = i*fwver_size+1
fw = {}
fw["timestamp"],fw["version"],fw["commit"],fw["is_recovery"], \
fw["hardware_platform"],fw["metadata_ver"] = \
unpack("!i32s8s?bb", data[offset:offset+fwver_size])
fw["version"] = fw["version"].replace("\x00", "")
fw["commit"] = fw["commit"].replace("\x00", "")
fw_name = fw_names[i]
resp[fw_name] = fw
resp["bootloader_timestamp"],resp["hw_version"],resp["serial"] = \
unpack("!L9s12s", data[95:120])
resp["hw_version"] = resp["hw_version"].replace("\x00","")
btmac_hex = binascii.hexlify(data[120:126])
resp["btmac"] = ":".join([btmac_hex[i:i+2].upper() for i in reversed(xrange(0, 12, 2))])
return resp
class PutBytesClient(object):
states = {
"NOT_STARTED": 0,
"WAIT_FOR_TOKEN": 1,
"IN_PROGRESS": 2,
"COMMIT": 3,
"COMPLETE": 4,
"FAILED": 5
}
transfer_types = {
"FIRMWARE": 1,
"RECOVERY": 2,
"SYS_RESOURCES": 3,
"RESOURCES": 4,
"BINARY": 5
}
def __init__(self, pebble, index, transfer_type, buffer):
self._pebble = pebble
self._state = self.states["NOT_STARTED"]
self._transfer_type = self.transfer_types[transfer_type]
self._buffer = buffer
self._index = index
self._done = False
self._error = False
def init(self):
data = pack("!bIbb", 1, len(self._buffer), self._transfer_type, self._index)
self._pebble._send_message("PUTBYTES", data)
self._state = self.states["WAIT_FOR_TOKEN"]
def wait_for_token(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
log.error("init failed with code %d" % res)
self._error = True
return
self._token, = unpack("!I", resp[1:])
self._left = len(self._buffer)
self._state = self.states["IN_PROGRESS"]
self.send()
def in_progress(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
if self._left > 0:
self.send()
log.debug("Sent %d of %d bytes" % (len(self._buffer)-self._left, len(self._buffer)))
else:
self._state = self.states["COMMIT"]
self.commit()
def commit(self):
data = pack("!bII", 3, self._token & 0xFFFFFFFF, stm32_crc.crc32(self._buffer))
self._pebble._send_message("PUTBYTES", data)
def handle_commit(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
self._state = self.states["COMPLETE"]
self.complete()
def complete(self):
data = pack("!bI", 5, self._token & 0xFFFFFFFF)
self._pebble._send_message("PUTBYTES", data)
def handle_complete(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
self._done = True
def abort(self):
msgdata = pack("!bI", 4, self._token & 0xFFFFFFFF)
self._pebble.send_message("PUTBYTES", msgdata)
self._error = True
def send(self):
datalen = min(self._left, 2000)
rg = len(self._buffer)-self._left
msgdata = pack("!bII", 2, self._token & 0xFFFFFFFF, datalen)
msgdata += self._buffer[rg:rg+datalen]
self._pebble._send_message("PUTBYTES", msgdata)
self._left -= datalen
def handle_message(self, endpoint, resp):
if self._state == self.states["WAIT_FOR_TOKEN"]:
self.wait_for_token(resp)
elif self._state == self.states["IN_PROGRESS"]:
self.in_progress(resp)
elif self._state == self.states["COMMIT"]:
self.handle_commit(resp)
elif self._state == self.states["COMPLETE"]:
self.handle_complete(resp)
|
|
# -*- coding: utf-8 -*-
"""
unittests for spike_train_surrogates module.
:copyright: Copyright 2014-2016 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
import elephant.spike_train_surrogates as surr
import numpy as np
import quantities as pq
import neo
np.random.seed(0)
class SurrogatesTestCase(unittest.TestCase):
def test_dither_spikes_output_format(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
dither = 10 * pq.ms
surrs = surr.dither_spikes(st, dither=dither, n=nr_surr)
self.assertIsInstance(surrs, list)
self.assertEqual(len(surrs), nr_surr)
for surrog in surrs:
self.assertIsInstance(surrs[0], neo.SpikeTrain)
self.assertEqual(surrog.units, st.units)
self.assertEqual(surrog.t_start, st.t_start)
self.assertEqual(surrog.t_stop, st.t_stop)
self.assertEqual(len(surrog), len(st))
def test_dither_spikes_empty_train(self):
st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
dither = 10 * pq.ms
surrog = surr.dither_spikes(st, dither=dither, n=1)[0]
self.assertEqual(len(surrog), 0)
def test_dither_spikes_output_decimals(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
dither = 10 * pq.ms
np.random.seed(42)
surrs = surr.dither_spikes(st, dither=dither, decimals=3, n=nr_surr)
np.random.seed(42)
dither_values = np.random.random_sample((nr_surr, len(st)))
expected_non_dithered = np.sum(dither_values==0)
observed_non_dithered = 0
for surrog in surrs:
for i in range(len(surrog)):
if surrog[i] - int(surrog[i]) * pq.ms == surrog[i] - surrog[i]:
observed_non_dithered += 1
self.assertEqual(observed_non_dithered, expected_non_dithered)
def test_dither_spikes_false_edges(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
dither = 10 * pq.ms
surrs = surr.dither_spikes(st, dither=dither, n=nr_surr, edges=False)
for surrog in surrs:
for i in range(len(surrog)):
self.assertLessEqual(surrog[i], st.t_stop)
def test_randomise_spikes_output_format(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
surrs = surr.randomise_spikes(st, n=nr_surr)
self.assertIsInstance(surrs, list)
self.assertEqual(len(surrs), nr_surr)
for surrog in surrs:
self.assertIsInstance(surrs[0], neo.SpikeTrain)
self.assertEqual(surrog.units, st.units)
self.assertEqual(surrog.t_start, st.t_start)
self.assertEqual(surrog.t_stop, st.t_stop)
self.assertEqual(len(surrog), len(st))
def test_randomise_spikes_empty_train(self):
st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
surrog = surr.randomise_spikes(st, n=1)[0]
self.assertEqual(len(surrog), 0)
def test_randomise_spikes_output_decimals(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
surrs = surr.randomise_spikes(st, n=nr_surr, decimals=3)
for surrog in surrs:
for i in range(len(surrog)):
self.assertNotEqual(surrog[i] - int(surrog[i]) * pq.ms,
surrog[i] - surrog[i])
def test_shuffle_isis_output_format(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
surrs = surr.shuffle_isis(st, n=nr_surr)
self.assertIsInstance(surrs, list)
self.assertEqual(len(surrs), nr_surr)
for surrog in surrs:
self.assertIsInstance(surrs[0], neo.SpikeTrain)
self.assertEqual(surrog.units, st.units)
self.assertEqual(surrog.t_start, st.t_start)
self.assertEqual(surrog.t_stop, st.t_stop)
self.assertEqual(len(surrog), len(st))
def test_shuffle_isis_empty_train(self):
st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
surrog = surr.shuffle_isis(st, n=1)[0]
self.assertEqual(len(surrog), 0)
def test_shuffle_isis_same_isis(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
surrog = surr.shuffle_isis(st, n=1)[0]
st_pq = st.view(pq.Quantity)
surr_pq = surrog.view(pq.Quantity)
isi0_orig = st[0] - st.t_start
ISIs_orig = np.sort([isi0_orig] + [isi for isi in np.diff(st_pq)])
isi0_surr = surrog[0] - surrog.t_start
ISIs_surr = np.sort([isi0_surr] + [isi for isi in np.diff(surr_pq)])
self.assertTrue(np.all(ISIs_orig == ISIs_surr))
def test_shuffle_isis_output_decimals(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
surrog = surr.shuffle_isis(st, n=1, decimals=95)[0]
st_pq = st.view(pq.Quantity)
surr_pq = surrog.view(pq.Quantity)
isi0_orig = st[0] - st.t_start
ISIs_orig = np.sort([isi0_orig] + [isi for isi in np.diff(st_pq)])
isi0_surr = surrog[0] - surrog.t_start
ISIs_surr = np.sort([isi0_surr] + [isi for isi in np.diff(surr_pq)])
self.assertTrue(np.all(ISIs_orig == ISIs_surr))
def test_dither_spike_train_output_format(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
shift = 10 * pq.ms
surrs = surr.dither_spike_train(st, shift=shift, n=nr_surr)
self.assertIsInstance(surrs, list)
self.assertEqual(len(surrs), nr_surr)
for surrog in surrs:
self.assertIsInstance(surrs[0], neo.SpikeTrain)
self.assertEqual(surrog.units, st.units)
self.assertEqual(surrog.t_start, st.t_start)
self.assertEqual(surrog.t_stop, st.t_stop)
self.assertEqual(len(surrog), len(st))
def test_dither_spike_train_empty_train(self):
st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
shift = 10 * pq.ms
surrog = surr.dither_spike_train(st, shift=shift, n=1)[0]
self.assertEqual(len(surrog), 0)
def test_dither_spike_train_output_decimals(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
shift = 10 * pq.ms
surrs = surr.dither_spike_train(st, shift=shift, n=nr_surr, decimals=3)
for surrog in surrs:
for i in range(len(surrog)):
self.assertNotEqual(surrog[i] - int(surrog[i]) * pq.ms,
surrog[i] - surrog[i])
def test_dither_spike_train_false_edges(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
shift = 10 * pq.ms
surrs = surr.dither_spike_train(
st, shift=shift, n=nr_surr, edges=False)
for surrog in surrs:
for i in range(len(surrog)):
self.assertLessEqual(surrog[i], st.t_stop)
def test_jitter_spikes_output_format(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
binsize = 100 * pq.ms
surrs = surr.jitter_spikes(st, binsize=binsize, n=nr_surr)
self.assertIsInstance(surrs, list)
self.assertEqual(len(surrs), nr_surr)
for surrog in surrs:
self.assertIsInstance(surrs[0], neo.SpikeTrain)
self.assertEqual(surrog.units, st.units)
self.assertEqual(surrog.t_start, st.t_start)
self.assertEqual(surrog.t_stop, st.t_stop)
self.assertEqual(len(surrog), len(st))
def test_jitter_spikes_empty_train(self):
st = neo.SpikeTrain([] * pq.ms, t_stop=500 * pq.ms)
binsize = 75 * pq.ms
surrog = surr.jitter_spikes(st, binsize=binsize, n=1)[0]
self.assertEqual(len(surrog), 0)
def test_jitter_spikes_same_bins(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
binsize = 100 * pq.ms
surrog = surr.jitter_spikes(st, binsize=binsize, n=1)[0]
bin_ids_orig = np.array((st.view(pq.Quantity) / binsize).rescale(
pq.dimensionless).magnitude, dtype=int)
bin_ids_surr = np.array((surrog.view(pq.Quantity) / binsize).rescale(
pq.dimensionless).magnitude, dtype=int)
self.assertTrue(np.all(bin_ids_orig == bin_ids_surr))
# Bug encountered when the original and surrogate trains have
# different number of spikes
self.assertEqual(len(st), len(surrog))
def test_jitter_spikes_unequal_binsize(self):
st = neo.SpikeTrain([90, 150, 180, 480] * pq.ms, t_stop=500 * pq.ms)
binsize = 75 * pq.ms
surrog = surr.jitter_spikes(st, binsize=binsize, n=1)[0]
bin_ids_orig = np.array((st.view(pq.Quantity) / binsize).rescale(
pq.dimensionless).magnitude, dtype=int)
bin_ids_surr = np.array((surrog.view(pq.Quantity) / binsize).rescale(
pq.dimensionless).magnitude, dtype=int)
self.assertTrue(np.all(bin_ids_orig == bin_ids_surr))
def test_surr_method(self):
st = neo.SpikeTrain([90, 150, 180, 350] * pq.ms, t_stop=500 * pq.ms)
nr_surr = 2
surrs = surr.surrogates(st, dt=3 * pq.ms, n=nr_surr,
surr_method='shuffle_isis', edges=False)
self.assertRaises(ValueError, surr.surrogates, st, n=1,
surr_method='spike_shifting',
dt=None, decimals=None, edges=True)
self.assertTrue(len(surrs) == nr_surr)
nr_surr2 = 4
surrs2 = surr.surrogates(st, dt=5 * pq.ms, n=nr_surr2,
surr_method='dither_spike_train', edges=True)
for surrog in surrs:
self.assertTrue(isinstance(surrs[0], neo.SpikeTrain))
self.assertEqual(surrog.units, st.units)
self.assertEqual(surrog.t_start, st.t_start)
self.assertEqual(surrog.t_stop, st.t_stop)
self.assertEqual(len(surrog), len(st))
self.assertTrue(len(surrs) == nr_surr)
for surrog in surrs2:
self.assertTrue(isinstance(surrs2[0], neo.SpikeTrain))
self.assertEqual(surrog.units, st.units)
self.assertEqual(surrog.t_start, st.t_start)
self.assertEqual(surrog.t_stop, st.t_stop)
self.assertEqual(len(surrog), len(st))
self.assertTrue(len(surrs2) == nr_surr2)
def suite():
suite = unittest.makeSuite(SurrogatesTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Script to write and assist SOAPdenovo assembly.
"""
import os.path as op
import sys
from jcvi.formats.fastq import guessoffset, readlen, is_fastq
from jcvi.assembly.base import FastqNamings, Library, get_libs
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update, sh
class FillLine(object):
def __init__(self, row):
args = row.split()
self.start = int(args[0])
self.end = int(args[1])
self.leftextend = int(args[2])
self.rightextend = int(args[3])
self.closed = int(args[4]) == 1
self.extendlength = int(args[5])
self.before = int(args[6])
self.after = int(args[7])
# Convert from unsigned to signed
# <http://stackoverflow.com/questions/1375897/how-to-get-the-signed-integer-value-of-a-long-in-python>
if self.after > 0 and (self.after & 0x80000000):
self.after += -0x100000000
@property
def delta(self):
return self.after - self.before
def main():
actions = (
("clean", "clean and dedup paired FASTQ files"),
("correct", "correct reads using ErrorCorrection"),
("prepare", "prepare SOAP config files and run script"),
("fillstats", "build stats on .fill file from GapCloser"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
SOAPHEADER = """
P={0}
K={1}
S=soap.config
G=soap.gc.config
C={2}
A=asm$K
"""
GCRUN = (
"GapCloser_v1.12 -a ${A}.scafSeq -b $G -l 155 -o ${A}.closed.scafSeq -p 31 -t $P"
)
GCRUNG = "GapCloser_v1.12 -a {0} -b $G -l 155 -o {1} -p 31 -t $P"
SOAPRUN = (
"""
$C pregraph -s $S -d 1 -K $K -o $A -R -p $P
$C contig -s $S -g $A -M 1 -R -p $P
$C map -s $S -g $A -p $P
$C scaff -g $A -F -p $P
"""
+ GCRUN
)
SCFRUN = (
"""
prepare -K $K -c %s -g $A
$C map -s $S -g $A -p $P
$C scaff -z -g $A -F -p $P
"""
+ GCRUN
)
def get_size(filename):
library_name = lambda x: "-".join(op.basename(x).split(".")[0].split("-")[:2])
lib = Library(library_name(filename))
return lib.size
def correct(args):
"""
%prog correct *.fastq
Correct reads using ErrorCorrection. Only PE will be used to build the K-mer
table.
"""
p = OptionParser(correct.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
lstfile = "reads2cor.lst"
fw = open(lstfile, "w")
print("\n".join(x for x in args if x[:2] == "PE"), file=fw)
fw.close()
p1 = args[0]
offset = guessoffset([p1])
cpus = opts.cpus
freq = "output.freq.cz"
freqlen = freq + ".len"
if need_update(args, (freq, freqlen)):
cmd = "KmerFreq_AR_v2.0 -k 17 -c -1 -q {0}".format(offset)
cmd += " -m 1 -t {0}".format(cpus)
cmd += " -p output {0}".format(lstfile)
sh(cmd)
fw = open(lstfile, "w")
print("\n".join(args), file=fw)
fw.close()
cmd = "Corrector_AR_v2.0 -k 17 -l 3 -m 5 -c 5 -a 0 -e 1 -w 0 -r 45"
cmd += " -Q {0} -q 30 -x 8 -t {1} -o 1 ".format(offset, cpus)
cmd += " {0} {1} {2}".format(freq, freqlen, lstfile)
sh(cmd)
def clean(args):
"""
%prog clean 1.fastq 2.fastq [insertsize]
Clean and dedup paired FASTQ files.
"""
p = OptionParser(clean.__doc__)
p.add_option("-a", default=0, type="int", help="Trim length at 5' end")
p.add_option("-b", default=50, type="int", help="Trim length at 3' end")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) == 2:
p1, p2 = args
size = get_size(p1)
elif len(args) == 3:
p1, p2, size = args
size = int(size)
else:
sys.exit(not p.print_help())
pf = p1.split(".")[0]
cpus = opts.cpus
offset = guessoffset([p1])
a, b = opts.a, opts.b
p1_clean = p1 + ".clean"
p1_cleangz = p1_clean + ".gz"
p2_clean = p2 + ".clean"
p2_cleangz = p2_clean + ".gz"
if need_update([p1, p2], [p1_cleangz, p2_cleangz]):
cmd = "SOAPfilter_v2.0 -t {0} -m 2000000 -p -y -z -g".format(cpus)
cmd += " -q {0} -w 10 -B 50 -f 0".format(offset)
cmd += " -l {0} -a {1} -b {2} -c {1} -d {2}".format(size, a, b, a, b)
cmd += " {0} {1} {2}.clean.stat {3} {4}".format(p1, p2, pf, p1_clean, p2_clean)
sh(cmd)
def fillstats(args):
"""
%prog fillstats genome.fill
Build stats on .fill file from GapCloser.
"""
from jcvi.utils.cbook import SummaryStats, percentage, thousands
p = OptionParser(fillstats.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(fillfile,) = args
fp = open(fillfile)
scaffolds = 0
gaps = []
for row in fp:
if row[0] == ">":
scaffolds += 1
continue
fl = FillLine(row)
gaps.append(fl)
print("{0} scaffolds in total".format(scaffolds), file=sys.stderr)
closed = [x for x in gaps if x.closed]
closedbp = sum(x.before for x in closed)
notClosed = [x for x in gaps if not x.closed]
notClosedbp = sum(x.before for x in notClosed)
totalgaps = len(closed) + len(notClosed)
print(
"Closed gaps: {0} size: {1} bp".format(
percentage(len(closed), totalgaps), thousands(closedbp)
),
file=sys.stderr,
)
ss = SummaryStats([x.after for x in closed])
print(ss, file=sys.stderr)
ss = SummaryStats([x.delta for x in closed])
print("Delta:", ss, file=sys.stderr)
print(
"Remaining gaps: {0} size: {1} bp".format(
percentage(len(notClosed), totalgaps), thousands(notClosedbp)
),
file=sys.stderr,
)
ss = SummaryStats([x.after for x in notClosed])
print(ss, file=sys.stderr)
def prepare(args):
"""
%prog prepare *.fastq
Scan input fastq files (see below) and write SOAP config files based
on inputfiles. Use "--scaffold contigs.fasta" to perform scaffolding.
"""
from jcvi.formats.base import write_file
p = OptionParser(prepare.__doc__ + FastqNamings)
p.add_option("-K", default=45, type="int", help="K-mer size")
p.add_option(
"--assemble_1st_rank_only",
default=False,
action="store_true",
help="Assemble the first rank only, other libs asm_flags=2",
)
p.add_option("--scaffold", help="Only perform scaffolding")
p.add_option("--gapclose", help="Only perform gap closure")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fnames = args
K = opts.K
for x in fnames:
assert op.exists(x), "File `{0}` not found.".format(x)
a1st = opts.assemble_1st_rank_only
cfgfile = "soap.config"
gc_cfgfile = "soap.gc.config"
fw = open(cfgfile, "w")
fw_gc = open(gc_cfgfile, "w")
libs = get_libs(fnames)
rank = 0
max_rd_len = max(readlen([f]) for f in fnames)
block = "max_rd_len={0}\n".format(max_rd_len)
for stream in (sys.stderr, fw, fw_gc):
print(block, file=stream)
# Collect singletons first
singletons = []
for lib, fs in libs:
if lib.size == 0:
singletons += fs
continue
for lib, fs in libs:
size = lib.size
if size == 0:
continue
rank += 1
block = "[LIB]\n"
block += "avg_ins={0}\n".format(size)
block += "reverse_seq={0}\n".format(lib.reverse_seq)
asm_flags = 2 if (rank > 1 and a1st) else lib.asm_flags
block += "asm_flags={0}\n".format(asm_flags)
block += "rank={0}\n".format(rank)
if lib.reverse_seq:
pair_num_cutoff = 3
block += "pair_num_cutoff={0}\n".format(pair_num_cutoff)
block += "map_len=35\n"
for f in fs:
if ".1." in f:
tag = "q1"
elif ".2." in f:
tag = "q2"
block += "{0}={1}\n".format(tag, f)
if rank == 1:
for s in singletons:
tag = "q" if is_fastq(s) else "f"
block += tag + "={0}\n".format(s)
print(block, file=sys.stderr)
print(block, file=fw)
if asm_flags > 2:
print(block, file=fw_gc)
runfile = "run.sh"
scaffold = opts.scaffold
bb = 63 if K <= 63 else 127
binary = "SOAPdenovo-{0}mer".format(bb)
header = SOAPHEADER.format(opts.cpus, K, binary)
if opts.gapclose:
gapclose = opts.gapclose
outfile = gapclose.rsplit(".", 1)[0] + ".closed.fasta"
template = header + GCRUNG.format(gapclose, outfile)
else:
template = header + (SCFRUN % scaffold if scaffold else SOAPRUN)
write_file(runfile, template)
fw.close()
fw_gc.close()
if __name__ == "__main__":
main()
|
|
import gzip
import pandas as pd
import numpy as np
import pandas.util.testing as tm
import os
import dask
from operator import getitem
import pytest
from toolz import valmap
import tempfile
import shutil
from time import sleep
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.io import (read_csv, file_size, categories_and_quantiles,
dataframe_from_ctable, from_array, from_bcolz, infer_header,
from_dask_array)
from dask.compatibility import StringIO
from dask.utils import filetext, tmpfile, ignoring
from dask.async import get_sync
########
# CSVS #
########
text = """
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""".strip()
def test_read_csv():
with filetext(text) as fn:
f = read_csv(fn, chunkbytes=30)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions > 1
result = f.compute(get=dask.get).sort('name')
assert (result.values == pd.read_csv(fn).sort('name').values).all()
def test_read_gzip_csv():
with filetext(text.encode(), open=gzip.open) as fn:
f = read_csv(fn, chunkbytes=30, compression='gzip')
assert list(f.columns) == ['name', 'amount']
assert f.npartitions > 1
result = f.compute(get=dask.get).sort('name')
assert (result.values == pd.read_csv(fn, compression='gzip').sort('name').values).all()
def test_file_size():
counts = (len(text), len(text) + text.count('\n'))
with filetext(text) as fn:
assert file_size(fn) in counts
with filetext(text.encode(), open=gzip.open) as fn:
assert file_size(fn, 'gzip') in counts
def test_categories_and_quantiles():
with filetext(text) as fn:
cats, quant = categories_and_quantiles(fn, (), {})
assert list(cats['name']) == ['Alice', 'Bob', 'Charlie', 'Dennis', 'Edith', 'Frank']
cats, quant = categories_and_quantiles(fn, (), {}, index='amount',
chunkbytes=30)
assert len(quant) == 4
assert (-600 < quant[1:]).all() and (600 > quant[:-1]).all()
assert quant[0] == -500
assert quant[-1] == 600
def test_read_multiple_csv():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
df = read_csv('_foo.*.csv')
assert (len(read_csv('_foo.*.csv').compute()) ==
len(read_csv('_foo.1.csv').compute()) * 2)
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
def test_read_csv_categorize():
with filetext(text) as fn:
f = read_csv(fn, chunkbytes=30, categorize=True)
assert list(f.dtypes) == ['category', 'i8']
expected = pd.read_csv(fn)
expected['name'] = expected.name.astype('category')
assert (f.dtypes == expected.dtypes).all()
assert len(f.compute().name.cat.categories) == 6
def normalize_text(s):
return '\n'.join(map(str.strip, s.strip().split('\n')))
def test_consistent_dtypes():
text = normalize_text("""
name,amount
Alice,100.5
Bob,-200.5
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = read_csv(fn, chunkbytes=30)
assert isinstance(df.amount.sum().compute(), float)
def test_infer_header():
with filetext('name,val\nAlice,100\nNA,200') as fn:
assert infer_header(fn) == True
with filetext('Alice,100\nNA,200') as fn:
assert infer_header(fn) == False
def eq(a, b):
if hasattr(a, 'dask'):
a = a.compute(get=dask.get)
if hasattr(b, 'dask'):
b = b.compute(get=dask.get)
if isinstance(a, pd.DataFrame):
a = a.sort_index()
b = b.sort_index()
tm.assert_frame_equal(a, b)
return True
if isinstance(a, pd.Series):
tm.assert_series_equal(a, b)
return True
assert np.allclose(a, b)
return True
datetime_csv_file = """
name,amount,when
Alice,100,2014-01-01
Bob,200,2014-01-01
Charlie,300,2014-01-01
Dan,400,2014-01-01
""".strip()
def test_read_csv_categorize_with_parse_dates():
with filetext(datetime_csv_file) as fn:
f = read_csv(fn, chunkbytes=30, categorize=True, parse_dates=['when'])
assert list(f.dtypes) == ['category', 'i8', 'M8[ns]']
def test_read_csv_categorize_and_index():
with filetext(text) as fn:
f = read_csv(fn, chunkbytes=20, index='amount')
result = f.compute(get=get_sync)
assert result.index.name == 'amount'
blocks = dd.DataFrame._get(f.dask, f._keys(), get=get_sync)
for i, block in enumerate(blocks):
if i < len(f.divisions):
assert (block.index <= f.divisions[i + 1]).all()
if i > 0:
assert (block.index > f.divisions[i]).all()
expected = pd.read_csv(fn).set_index('amount')
expected['name'] = expected.name.astype('category')
result = result.sort()
expected = expected.sort()
assert eq(result, expected)
def test_usecols():
with filetext(datetime_csv_file) as fn:
df = read_csv(fn, chunkbytes=30, usecols=['when', 'amount'])
expected = pd.read_csv(fn, usecols=['when', 'amount'])
assert (df.compute().values == expected.values).all()
####################
# Arrays and BColz #
####################
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert list(d.columns) == ['0', '1', '2']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list('abc'))
assert list(d.columns) == ['a', 'b', 'c']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
pytest.raises(ValueError, dd.from_array, np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i*10) for i in range(10)],
dtype=[('a', 'i4'), ('b', 'i4')])
d = dd.from_array(x, chunksize=4)
assert list(d.columns) == ['a', 'b']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == \
sorted(dd.from_bcolz(t, chunksize=2).dask)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != \
sorted(dd.from_bcolz(t, chunksize=3).dask)
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4., 'b'))
t.flush()
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != \
sorted(dsk)
def test_from_bcolz_filename():
bcolz = pytest.importorskip('bcolz')
with tmpfile('.bcolz') as fn:
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'],
rootdir=fn)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_skipinitialspace():
text = normalize_text("""
name, amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, skipinitialspace=True, chunkbytes=20)
assert 'amount' in df.columns
assert df.amount.max().compute() == 600
def test_consistent_dtypes():
text1 = normalize_text("""
name,amount
Alice,100
Bob,-200
Charlie,300
""")
text2 = normalize_text("""
name,amount
1,400
2,-500
Frank,600
""")
try:
with open('_foo.1.csv', 'w') as f:
f.write(text1)
with open('_foo.2.csv', 'w') as f:
f.write(text2)
df = dd.read_csv('_foo.*.csv', chunkbytes=25)
assert df.amount.max().compute() == 600
finally:
pass
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_compression_multiple_files():
tdir = tempfile.mkdtemp()
try:
f = gzip.open(os.path.join(tdir, 'a.csv.gz'), 'wb')
f.write(text.encode())
f.close()
f = gzip.open(os.path.join(tdir, 'b.csv.gz'), 'wb')
f.write(text.encode())
f.close()
df = dd.read_csv(os.path.join(tdir, '*.csv.gz'), compression='gzip')
assert len(df.compute()) == (len(text.split('\n')) - 1) * 2
finally:
shutil.rmtree(tdir)
def test_empty_csv_file():
with filetext('a,b') as fn:
df = dd.read_csv(fn)
assert len(df.compute()) == 0
assert list(df.columns) == ['a', 'b']
def test_from_pandas_dataframe():
a = list('aaaaaaabbbbbbbbccccccc')
df = pd.DataFrame(dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start='20120101', periods=len(a)))
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert type(ddf.divisions[0]) == type(df.index[0])
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({'x': [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n),
index=pd.date_range(start='20120101', periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert type(ds.divisions[0]) == type(s.index[0])
tm.assert_series_equal(s, ds.compute())
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = from_dask_array(x, ['a', 'b', 'c'])
assert list(df.columns) == ['a', 'b', 'c']
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(get=get_sync).values == x.compute(get=get_sync)).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=['a', 'b', 'c'])
assert df2.columns == df.columns
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = from_dask_array(x, 'a')
assert ser.name == 'a'
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(get=get_sync).values == x.compute(get=get_sync)).all()
ser = from_dask_array(x)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert eq(ser, ser2)
def test_from_dask_array_raises():
x = da.ones((3, 3, 3), chunks=2)
pytest.raises(ValueError, lambda: from_dask_array(x))
x = da.ones((10, 3), chunks=(3, 3))
pytest.raises(ValueError, lambda: from_dask_array(x)) # no columns
# Not enough columns
pytest.raises(ValueError, lambda: from_dask_array(x, columns=['a']))
try:
from_dask_array(x, columns=['hello'])
except Exception as e:
assert 'hello' in str(e)
assert '3' in str(e)
def test_to_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
b = c.to_dask()
try:
tm.assert_frame_equal(df, c[:])
tm.assert_frame_equal(b.compute(), df)
finally:
c.drop()
c = a.to_castra(categories=['x'])
try:
assert c[:].dtypes['x'] == 'category'
finally:
c.drop()
c = a.to_castra(sorted_index_column='y')
try:
tm.assert_frame_equal(c[:], df.set_index('y'))
finally:
c.drop()
dsk, keys = a.to_castra(compute=False)
assert isinstance(dsk, dict)
assert isinstance(keys, list)
c, last = keys
assert last[1] == a.npartitions - 1
def test_from_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
with_castra = dd.from_castra(c)
with_fn = dd.from_castra(c.path)
with_columns = dd.from_castra(c, 'x')
try:
tm.assert_frame_equal(df, with_castra.compute())
tm.assert_frame_equal(df, with_fn.compute())
tm.assert_series_equal(df.x, with_columns.compute())
finally:
# Calling c.drop() is a race condition on drop from `with_fn.__del__`
# and c.drop. Manually `del`ing gets around this.
del with_fn, c
def test_from_castra_with_selection():
""" Optimizations fuse getitems with load_partitions
We used to use getitem for both column access and selections
"""
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = dd.from_castra(a.to_castra())
assert eq(b[b.y > 3].x, df[df.y > 3].x)
def test_to_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
with tmpfile('h5') as fn:
a.x.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_series_equal(df.x, out[:])
def test_read_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data')
try:
dd.read_hdf(fn, '/data', chunksize=2)
assert False
except TypeError as e:
assert "format='table'" in str(e)
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data', format='table')
a = dd.read_hdf(fn, '/data', chunksize=2)
assert a.npartitions == 2
tm.assert_frame_equal(a.compute(), df)
tm.assert_frame_equal(
dd.read_hdf(fn, '/data', chunksize=2, start=1, stop=3).compute(),
pd.read_hdf(fn, '/data', start=1, stop=3))
assert sorted(dd.read_hdf(fn, '/data').dask) == \
sorted(dd.read_hdf(fn, '/data').dask)
def test_to_csv():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn)
result = pd.read_csv(fn, index_col=0)
tm.assert_frame_equal(result, df)
@pytest.mark.xfail
def test_to_csv_gzip():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn, compression='gzip')
result = pd.read_csv(fn, index_col=0, compression='gzip')
tm.assert_frame_equal(result, df)
def test_to_csv_series():
s = pd.Series([1, 2, 3], index=[10, 20, 30], name='foo')
a = dd.from_pandas(s, 2)
with tmpfile('csv') as fn:
with tmpfile('csv') as fn2:
a.to_csv(fn)
s.to_csv(fn2)
with open(fn) as f:
adata = f.read()
with open(fn2) as f:
sdata = f.read()
assert adata == sdata
def test_read_csv_with_nrows():
with filetext(text) as fn:
f = read_csv(fn, nrows=3)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions == 1
assert eq(read_csv(fn, nrows=3), pd.read_csv(fn, nrows=3))
def test_read_csv_raises_on_no_files():
try:
dd.read_csv('21hflkhfisfshf.*.csv')
assert False
except Exception as e:
assert "21hflkhfisfshf.*.csv" in str(e)
def test_read_csv_has_deterministic_name():
with filetext(text) as fn:
a = read_csv(fn)
b = read_csv(fn)
assert a._name == b._name
assert sorted(a.dask.keys()) == sorted(b.dask.keys())
assert isinstance(a._name, str)
c = read_csv(fn, skiprows=1, na_values=[0])
assert a._name != c._name
def test_multiple_read_csv_has_deterministic_name():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
a = read_csv('_foo.*.csv')
b = read_csv('_foo.*.csv')
assert sorted(a.dask.keys()) == sorted(b.dask.keys())
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_read_csv_of_modified_file_has_different_name():
with filetext(text) as fn:
mtime = os.path.getmtime(fn)
sleep(1)
a = read_csv(fn)
sleep(1)
with open(fn, 'a') as f:
f.write('\nGeorge,700')
os.fsync(f)
b = read_csv(fn)
assert sorted(a.dask) != sorted(b.dask)
def test_to_bag():
pytest.importorskip('dask.bag')
a = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute(get=get_sync) == list(a.itertuples(False))
assert ddf.to_bag(True).compute(get=get_sync) == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute(get=get_sync) == list(a.x.iteritems())
assert ddf.x.to_bag().compute(get=get_sync) == list(a.x)
def test_csv_expands_dtypes():
with filetext(text) as fn:
a = read_csv(fn, chunkbytes=30, dtype={})
a_kwargs = list(a.dask.values())[0][-1]
b = read_csv(fn, chunkbytes=30)
b_kwargs = list(b.dask.values())[0][-1]
assert a_kwargs['dtype'] == b_kwargs['dtype']
a = read_csv(fn, chunkbytes=30, dtype={'amount': float})
a_kwargs = list(a.dask.values())[0][-1]
assert a_kwargs['dtype']['amount'] == float
def test_report_dtype_correction_on_csvs():
text = 'numbers,names\n'
for i in range(1000):
text += '1,foo\n'
text += '1.5,bar\n'
with filetext(text) as fn:
try:
dd.read_csv(fn).compute(get=get_sync)
assert False
except ValueError as e:
assert "'numbers': 'float64'" in str(e)
|
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: checkpoint_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
_generate_vocab_remapping_outputs = ["remapping", "num_present"]
_GenerateVocabRemappingOutput = _collections.namedtuple(
"GenerateVocabRemapping", _generate_vocab_remapping_outputs)
def generate_vocab_remapping(new_vocab_file, old_vocab_file, new_vocab_offset, num_new_vocab, old_vocab_size=-1, name=None):
r"""Given a path to new and old vocabulary files, returns a remapping Tensor of
length `num_new_vocab`, where `remapping[i]` contains the row number in the old
vocabulary that corresponds to row `i` in the new vocabulary (starting at line
`new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
in the new vocabulary is not in the old vocabulary. The old vocabulary is
constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
default value of -1.
`num_vocab_offset` enables
use in the partitioned variable case, and should generally be set through
examining partitioning info. The format of the files should be a text file,
with each line containing a single entity within the vocabulary.
For example, with `new_vocab_file` a text file containing each of the following
elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
`num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
`[0, -1, 2]`.
The op also returns a count of how many entries in the new vocabulary
were present in the old vocabulary, which is used to calculate the number of
values to initialize in a weight matrix remapping
This functionality can be used to remap both row vocabularies (typically,
features) and column vocabularies (typically, classes) from TensorFlow
checkpoints. Note that the partitioning logic relies on contiguous vocabularies
corresponding to div-partitioned variables. Moreover, the underlying remapping
uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
use the corresponding index_table_from_file() as the FeatureColumn framework
does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
Args:
new_vocab_file: A `Tensor` of type `string`. Path to the new vocab file.
old_vocab_file: A `Tensor` of type `string`. Path to the old vocab file.
new_vocab_offset: An `int` that is `>= 0`.
How many entries into the new vocab file to start reading.
num_new_vocab: An `int` that is `>= 0`.
Number of entries in the new vocab file to remap.
old_vocab_size: An optional `int` that is `>= -1`. Defaults to `-1`.
Number of entries in the old vocab file to consider. If -1,
use the entire old vocabulary.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (remapping, num_present).
remapping: A `Tensor` of type `int64`.
num_present: A `Tensor` of type `int32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
new_vocab_offset = _execute.make_int(new_vocab_offset, "new_vocab_offset")
num_new_vocab = _execute.make_int(num_new_vocab, "num_new_vocab")
if old_vocab_size is None:
old_vocab_size = -1
old_vocab_size = _execute.make_int(old_vocab_size, "old_vocab_size")
_, _, _op = _op_def_lib._apply_op_helper(
"GenerateVocabRemapping", new_vocab_file=new_vocab_file,
old_vocab_file=old_vocab_file, new_vocab_offset=new_vocab_offset,
num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("new_vocab_offset", _op.get_attr("new_vocab_offset"),
"num_new_vocab", _op.get_attr("num_new_vocab"),
"old_vocab_size", _op.get_attr("old_vocab_size"))
_execute.record_gradient(
"GenerateVocabRemapping", _inputs_flat, _attrs, _result, name)
_result = _GenerateVocabRemappingOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"GenerateVocabRemapping", name, _ctx._post_execution_callbacks,
new_vocab_file, old_vocab_file, "new_vocab_offset", new_vocab_offset,
"num_new_vocab", num_new_vocab, "old_vocab_size", old_vocab_size)
_result = _GenerateVocabRemappingOutput._make(_result)
return _result
except _core._FallbackException:
return generate_vocab_remapping_eager_fallback(
new_vocab_file, old_vocab_file, new_vocab_offset=new_vocab_offset,
num_new_vocab=num_new_vocab, old_vocab_size=old_vocab_size,
name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def generate_vocab_remapping_eager_fallback(new_vocab_file, old_vocab_file, new_vocab_offset, num_new_vocab, old_vocab_size=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function generate_vocab_remapping
"""
_ctx = ctx if ctx else _context.context()
new_vocab_offset = _execute.make_int(new_vocab_offset, "new_vocab_offset")
num_new_vocab = _execute.make_int(num_new_vocab, "num_new_vocab")
if old_vocab_size is None:
old_vocab_size = -1
old_vocab_size = _execute.make_int(old_vocab_size, "old_vocab_size")
new_vocab_file = _ops.convert_to_tensor(new_vocab_file, _dtypes.string)
old_vocab_file = _ops.convert_to_tensor(old_vocab_file, _dtypes.string)
_inputs_flat = [new_vocab_file, old_vocab_file]
_attrs = ("new_vocab_offset", new_vocab_offset, "num_new_vocab",
num_new_vocab, "old_vocab_size", old_vocab_size)
_result = _execute.execute(b"GenerateVocabRemapping", 2,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"GenerateVocabRemapping", _inputs_flat, _attrs, _result, name)
_result = _GenerateVocabRemappingOutput._make(_result)
return _result
def load_and_remap_matrix(ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values, num_rows, num_cols, max_rows_in_memory=-1, name=None):
r"""Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
at `ckpt_path` and potentially reorders its rows and columns using the
specified remappings.
Most users should use one of the wrapper initializers (such as
`tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
function directly.
The remappings are 1-D tensors with the following properties:
* `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
matrix will be initialized from the row corresponding to index
`row_remapping[i]` in the old `Tensor` from the checkpoint.
* `col_remapping` must have either 0 entries (indicating that no column
reordering is needed) or `num_cols` entries. If specified, column `j` of the
output matrix will be initialized from the column corresponding to index
`col_remapping[j]` in the old `Tensor` from the checkpoint.
* A value of -1 in either of the remappings signifies a "missing" entry. In that
case, values from the `initializing_values` tensor will be used to fill that
missing row or column. If `row_remapping` has `r` missing entries and
`col_remapping` has `c` missing entries, then the following condition must be
true:
`(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
The remapping tensors can be generated using the GenerateVocabRemapping op.
As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
the value from row i, column j of the old tensor in the checkpoint, the output
matrix will look like the following:
[[w(1, 0), w(1, 2), 0.5],
[w(0, 0), w(0, 2), -0.5],
[0.25, -0.25, 42]]
Args:
ckpt_path: A `Tensor` of type `string`.
Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
which the old matrix `Tensor` will be loaded.
old_tensor_name: A `Tensor` of type `string`.
Name of the 2-D `Tensor` to load from checkpoint.
row_remapping: A `Tensor` of type `int64`.
An int `Tensor` of row remappings (generally created by
`generate_vocab_remapping`). Even if no row remapping is needed, this must
still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
col_remapping: A `Tensor` of type `int64`.
An int `Tensor` of column remappings (generally created by
`generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping
is to be done (e.g. column ordering is the same).
initializing_values: A `Tensor` of type `float32`.
A float `Tensor` containing values to fill in for cells
in the output matrix that are not loaded from the checkpoint. Length must be
exactly the same as the number of missing / new cells.
num_rows: An `int` that is `>= 0`.
Number of rows (length of the 1st dimension) in the output matrix.
num_cols: An `int` that is `>= 1`.
Number of columns (length of the 2nd dimension) in the output matrix.
max_rows_in_memory: An optional `int`. Defaults to `-1`.
The maximum number of rows to load from the checkpoint at
once. If less than or equal to 0, the entire matrix will be loaded into
memory. Setting this arg trades increased disk reads for lower memory usage.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
num_rows = _execute.make_int(num_rows, "num_rows")
num_cols = _execute.make_int(num_cols, "num_cols")
if max_rows_in_memory is None:
max_rows_in_memory = -1
max_rows_in_memory = _execute.make_int(max_rows_in_memory, "max_rows_in_memory")
_, _, _op = _op_def_lib._apply_op_helper(
"LoadAndRemapMatrix", ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name, row_remapping=row_remapping,
col_remapping=col_remapping, initializing_values=initializing_values,
num_rows=num_rows, num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("num_rows", _op.get_attr("num_rows"), "num_cols",
_op.get_attr("num_cols"), "max_rows_in_memory",
_op.get_attr("max_rows_in_memory"))
_execute.record_gradient(
"LoadAndRemapMatrix", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"LoadAndRemapMatrix", name, _ctx._post_execution_callbacks, ckpt_path,
old_tensor_name, row_remapping, col_remapping, initializing_values,
"num_rows", num_rows, "num_cols", num_cols, "max_rows_in_memory",
max_rows_in_memory)
return _result
except _core._FallbackException:
return load_and_remap_matrix_eager_fallback(
ckpt_path, old_tensor_name, row_remapping, col_remapping,
initializing_values, num_rows=num_rows, num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def load_and_remap_matrix_eager_fallback(ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values, num_rows, num_cols, max_rows_in_memory=-1, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function load_and_remap_matrix
"""
_ctx = ctx if ctx else _context.context()
num_rows = _execute.make_int(num_rows, "num_rows")
num_cols = _execute.make_int(num_cols, "num_cols")
if max_rows_in_memory is None:
max_rows_in_memory = -1
max_rows_in_memory = _execute.make_int(max_rows_in_memory, "max_rows_in_memory")
ckpt_path = _ops.convert_to_tensor(ckpt_path, _dtypes.string)
old_tensor_name = _ops.convert_to_tensor(old_tensor_name, _dtypes.string)
row_remapping = _ops.convert_to_tensor(row_remapping, _dtypes.int64)
col_remapping = _ops.convert_to_tensor(col_remapping, _dtypes.int64)
initializing_values = _ops.convert_to_tensor(initializing_values, _dtypes.float32)
_inputs_flat = [ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values]
_attrs = ("num_rows", num_rows, "num_cols", num_cols, "max_rows_in_memory",
max_rows_in_memory)
_result = _execute.execute(b"LoadAndRemapMatrix", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"LoadAndRemapMatrix", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "GenerateVocabRemapping"
# input_arg {
# name: "new_vocab_file"
# type: DT_STRING
# }
# input_arg {
# name: "old_vocab_file"
# type: DT_STRING
# }
# output_arg {
# name: "remapping"
# type: DT_INT64
# }
# output_arg {
# name: "num_present"
# type: DT_INT32
# }
# attr {
# name: "new_vocab_offset"
# type: "int"
# has_minimum: true
# }
# attr {
# name: "num_new_vocab"
# type: "int"
# has_minimum: true
# }
# attr {
# name: "old_vocab_size"
# type: "int"
# default_value {
# i: -1
# }
# has_minimum: true
# minimum: -1
# }
# }
# op {
# name: "LoadAndRemapMatrix"
# input_arg {
# name: "ckpt_path"
# type: DT_STRING
# }
# input_arg {
# name: "old_tensor_name"
# type: DT_STRING
# }
# input_arg {
# name: "row_remapping"
# type: DT_INT64
# }
# input_arg {
# name: "col_remapping"
# type: DT_INT64
# }
# input_arg {
# name: "initializing_values"
# type: DT_FLOAT
# }
# output_arg {
# name: "output_matrix"
# type: DT_FLOAT
# }
# attr {
# name: "num_rows"
# type: "int"
# has_minimum: true
# }
# attr {
# name: "num_cols"
# type: "int"
# has_minimum: true
# minimum: 1
# }
# attr {
# name: "max_rows_in_memory"
# type: "int"
# default_value {
# i: -1
# }
# }
# is_stateful: true
# }
_op_def_lib = _InitOpDefLibrary(b"\n\304\001\n\026GenerateVocabRemapping\022\022\n\016new_vocab_file\030\007\022\022\n\016old_vocab_file\030\007\032\r\n\tremapping\030\t\032\017\n\013num_present\030\003\"\031\n\020new_vocab_offset\022\003int(\001\"\026\n\rnum_new_vocab\022\003int(\001\"/\n\016old_vocab_size\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001(\0010\377\377\377\377\377\377\377\377\377\001\n\335\001\n\022LoadAndRemapMatrix\022\r\n\tckpt_path\030\007\022\023\n\017old_tensor_name\030\007\022\021\n\rrow_remapping\030\t\022\021\n\rcol_remapping\030\t\022\027\n\023initializing_values\030\001\032\021\n\routput_matrix\030\001\"\021\n\010num_rows\022\003int(\001\"\023\n\010num_cols\022\003int(\0010\001\"&\n\022max_rows_in_memory\022\003int\032\013\030\377\377\377\377\377\377\377\377\377\001\210\001\001")
|
|
"""Zookeeper Partitioner Implementation
:Maintainer: None
:Status: Unknown
:class:`SetPartitioner` implements a partitioning scheme using
Zookeeper for dividing up resources amongst members of a party.
This is useful when there is a set of resources that should only be
accessed by a single process at a time that multiple processes
across a cluster might want to divide up.
Example Use-Case
----------------
- Multiple workers across a cluster need to divide up a list of queues
so that no two workers own the same queue.
"""
from functools import partial
import logging
import os
import socket
from kazoo.exceptions import KazooException, LockTimeout
from kazoo.protocol.states import KazooState
from kazoo.recipe.watchers import PatientChildrenWatch
log = logging.getLogger(__name__)
class PartitionState(object):
"""High level partition state values
.. attribute:: ALLOCATING
The set needs to be partitioned, and may require an existing
partition set to be released before acquiring a new partition
of the set.
.. attribute:: ACQUIRED
The set has been partitioned and acquired.
.. attribute:: RELEASE
The set needs to be repartitioned, and the current partitions
must be released before a new allocation can be made.
.. attribute:: FAILURE
The set partition has failed. This occurs when the maximum
time to partition the set is exceeded or the Zookeeper session
is lost. The partitioner is unusable after this state and must
be recreated.
"""
ALLOCATING = "ALLOCATING"
ACQUIRED = "ACQUIRED"
RELEASE = "RELEASE"
FAILURE = "FAILURE"
class SetPartitioner(object):
"""Partitions a set amongst members of a party
This class will partition a set amongst members of a party such
that each member will be given zero or more items of the set and
each set item will be given to a single member. When new members
enter or leave the party, the set will be re-partitioned amongst
the members.
When the :class:`SetPartitioner` enters the
:attr:`~PartitionState.FAILURE` state, it is unrecoverable
and a new :class:`SetPartitioner` should be created.
Example:
.. code-block:: python
from kazoo.client import KazooClient
client = KazooClient()
client.start()
qp = client.SetPartitioner(
path='/work_queues', set=('queue-1', 'queue-2', 'queue-3'))
while 1:
if qp.failed:
raise Exception("Lost or unable to acquire partition")
elif qp.release:
qp.release_set()
elif qp.acquired:
for partition in qp:
# Do something with each partition
elif qp.allocating:
qp.wait_for_acquire()
**State Transitions**
When created, the :class:`SetPartitioner` enters the
:attr:`PartitionState.ALLOCATING` state.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.ACQUIRED`
Set was partitioned successfully, the partition list assigned
is accessible via list/iter methods or calling list() on the
:class:`SetPartitioner` instance.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.FAILURE`
Allocating the set failed either due to a Zookeeper session
expiration, or failure to acquire the items of the set within
the timeout period.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.RELEASE`
The members of the party have changed, and the set needs to be
repartitioned. :meth:`SetPartitioner.release` should be called
as soon as possible.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.FAILURE`
The current partition was lost due to a Zookeeper session
expiration.
:attr:`~PartitionState.RELEASE` ->
:attr:`~PartitionState.ALLOCATING`
The current partition was released and is being re-allocated.
"""
def __init__(self, client, path, set, partition_func=None,
identifier=None, time_boundary=30, max_reaction_time=1,
state_change_event=None):
"""Create a :class:`~SetPartitioner` instance
:param client: A :class:`~kazoo.client.KazooClient` instance.
:param path: The partition path to use.
:param set: The set of items to partition.
:param partition_func: A function to use to decide how to
partition the set.
:param identifier: An identifier to use for this member of the
party when participating. Defaults to the
hostname + process id.
:param time_boundary: How long the party members must be stable
before allocation can complete.
:param max_reaction_time: Maximum reaction time for party members
change.
:param state_change_event: An optional Event object that will be set
on every state change.
"""
# Used to differentiate two states with the same names in time
self.state_id = 0
self.state = PartitionState.ALLOCATING
self.state_change_event = state_change_event or \
client.handler.event_object()
self._client = client
self._path = path
self._set = set
self._partition_set = []
self._partition_func = partition_func or self._partitioner
self._identifier = identifier or '%s-%s' % (
socket.getfqdn(), os.getpid())
self._locks = []
self._lock_path = '/'.join([path, 'locks'])
self._party_path = '/'.join([path, 'party'])
self._time_boundary = time_boundary
self._max_reaction_time = max_reaction_time
self._acquire_event = client.handler.event_object()
# Create basic path nodes
client.ensure_path(path)
client.ensure_path(self._lock_path)
client.ensure_path(self._party_path)
# Join the party
self._party = client.ShallowParty(self._party_path,
identifier=self._identifier)
self._party.join()
self._state_change = client.handler.rlock_object()
client.add_listener(self._establish_sessionwatch)
# Now watch the party and set the callback on the async result
# so we know when we're ready
self._child_watching(self._allocate_transition, client_handler=True)
def __iter__(self):
"""Return the partitions in this partition set"""
for partition in self._partition_set:
yield partition
@property
def failed(self):
"""Corresponds to the :attr:`PartitionState.FAILURE` state"""
return self.state == PartitionState.FAILURE
@property
def release(self):
"""Corresponds to the :attr:`PartitionState.RELEASE` state"""
return self.state == PartitionState.RELEASE
@property
def allocating(self):
"""Corresponds to the :attr:`PartitionState.ALLOCATING`
state"""
return self.state == PartitionState.ALLOCATING
@property
def acquired(self):
"""Corresponds to the :attr:`PartitionState.ACQUIRED` state"""
return self.state == PartitionState.ACQUIRED
def wait_for_acquire(self, timeout=30):
"""Wait for the set to be partitioned and acquired
:param timeout: How long to wait before returning.
:type timeout: int
"""
self._acquire_event.wait(timeout)
def release_set(self):
"""Call to release the set
This method begins the step of allocating once the set has
been released.
"""
self._release_locks()
if self._locks: # pragma: nocover
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
else:
with self._state_change:
if self.failed:
return
self._set_state(PartitionState.ALLOCATING)
self._child_watching(self._allocate_transition, client_handler=True)
def finish(self):
"""Call to release the set and leave the party"""
self._release_locks()
self._fail_out()
def _fail_out(self):
with self._state_change:
self._set_state(PartitionState.FAILURE)
if self._party.participating:
try:
self._party.leave()
except KazooException: # pragma: nocover
pass
def _allocate_transition(self, result):
"""Called when in allocating mode, and the children settled"""
# Did we get an exception waiting for children to settle?
if result.exception: # pragma: nocover
self._fail_out()
return
children, async_result = result.get()
children_changed = self._client.handler.event_object()
def updated(result):
with self._state_change:
children_changed.set()
if self.acquired:
self._set_state(PartitionState.RELEASE)
with self._state_change:
# We can lose connection during processing the event
if not self.allocating:
return
# Remember the state ID to check later for race conditions
state_id = self.state_id
# updated() will be called when children change
async_result.rawlink(updated)
# Check whether the state has changed during the lock acquisition
# and abort the process if so.
def abort_if_needed():
if self.state_id == state_id:
if children_changed.is_set():
# The party has changed. Repartitioning...
self._abort_lock_acquisition()
return True
else:
return False
else:
if self.allocating or self.acquired:
# The connection was lost and user initiated a new
# allocation process. Abort it to eliminate race
# conditions with locks.
with self._state_change:
self._set_state(PartitionState.RELEASE)
return True
# Split up the set
partition_set = self._partition_func(
self._identifier, list(self._party), self._set)
# Proceed to acquire locks for the working set as needed
for member in partition_set:
lock = self._client.Lock(self._lock_path + '/' + str(member))
while True:
try:
# We mustn't lock without timeout because in that case we
# can get a deadlock if the party state will change during
# lock acquisition.
lock.acquire(timeout=self._max_reaction_time)
except LockTimeout:
if abort_if_needed():
return
except KazooException:
return self.finish()
else:
break
self._locks.append(lock)
if abort_if_needed():
return
# All locks acquired. Time for state transition.
with self._state_change:
if self.state_id == state_id and not children_changed.is_set():
self._partition_set = partition_set
self._set_state(PartitionState.ACQUIRED)
self._acquire_event.set()
return
if not abort_if_needed():
# This mustn't happen. Means a logical error.
self._fail_out()
def _release_locks(self):
"""Attempt to completely remove all the locks"""
self._acquire_event.clear()
for lock in self._locks[:]:
try:
lock.release()
except KazooException: # pragma: nocover
# We proceed to remove as many as possible, and leave
# the ones we couldn't remove
pass
else:
self._locks.remove(lock)
def _abort_lock_acquisition(self):
"""Called during lock acquisition if a party change occurs"""
self._release_locks()
if self._locks:
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
self._child_watching(self._allocate_transition, client_handler=True)
def _child_watching(self, func=None, client_handler=False):
"""Called when children are being watched to stabilize
This actually returns immediately, child watcher spins up a
new thread/greenlet and waits for it to stabilize before
any callbacks might run.
:param client_handler: If True, deliver the result using the
client's event handler.
"""
watcher = PatientChildrenWatch(self._client, self._party_path,
self._time_boundary)
asy = watcher.start()
if func is not None:
# We spin up the function in a separate thread/greenlet
# to ensure that the rawlink's it might use won't be
# blocked
if client_handler:
func = partial(self._client.handler.spawn, func)
asy.rawlink(func)
return asy
def _establish_sessionwatch(self, state):
"""Register ourself to listen for session events, we shut down
if we become lost"""
with self._state_change:
if self.failed:
pass
elif state == KazooState.LOST:
self._client.handler.spawn(self._fail_out)
elif not self.release:
self._set_state(PartitionState.RELEASE)
return state == KazooState.LOST
def _partitioner(self, identifier, members, partitions):
# Ensure consistent order of partitions/members
all_partitions = sorted(partitions)
workers = sorted(members)
i = workers.index(identifier)
# Now return the partition list starting at our location and
# skipping the other workers
return all_partitions[i::len(workers)]
def _set_state(self, state):
self.state = state
self.state_id += 1
self.state_change_event.set()
|
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Generic single neural network layer built to handle data from a particular
backend. We introduce several basic variants here to handle things like
dataset inputs (DataLayer), objective function being optimized (CostLayer),
and internal hidden WeightLayer and ActivationLayer
"""
import logging
import numpy as np
from neon.backends.cpu import CPU
from neon.optimizers.gradient_descent import (GradientDescent,
GradientDescentPretrain,
GradientDescentMomentum,
GradientDescentMomentumWeightDecay) # noqa
from neon.optimizers.adadelta import AdaDelta
from neon.util.compat import range
from neon.util.param import req_param, opt_param
from neon.util.persist import YAMLable
from neon.transforms.batch_norm import BatchNorm
from neon.transforms.linear import Linear
logger = logging.getLogger(__name__)
class Layer(YAMLable):
"""
Top-level generic neural network layer class from which all other layer
types inherit.
Attributes:
name (string): Name identifying this layer (in logs, etc.)
"""
def __init__(self, **kwargs):
self.initialized = False
self.__dict__.update(kwargs)
req_param(self, ['name'])
opt_param(self, ['pre_act_dtype', 'output_dtype', 'deltas_dtype',
'weight_dtype', 'updates_dtype'], np.float32)
opt_param(self, ['prev_layer'])
opt_param(self, ['activation'], Linear())
opt_param(self, ['is_local', 'is_data', 'is_cost'], False)
opt_param(self, ['skip_act', 'has_params'], False)
opt_param(self, ['prev_names'], [])
opt_param(self, ['backend_type'], 'np.float32')
if self.backend_type == 'np.float16':
logger.info("Setting layer dtype to float16")
for some_type in ['pre_act_dtype', 'output_dtype', 'deltas_dtype',
'weight_dtype', 'updates_dtype']:
setattr(self, some_type, np.float16)
def set_previous_layer(self, pl):
if pl.is_local:
if self.is_local:
self.ifmshape = pl.ofmshape
self.nifm = pl.nofm
self.nin = pl.nofm * np.prod(pl.ofmshape)
else:
if self.is_local:
if not hasattr(self, 'ifmshape'):
sqdim = np.int(np.sqrt(pl.nout))
self.ifmshape = (sqdim, sqdim)
self.nifm = 1
self.nin = pl.nout
self.prev_layer = pl
if self.is_local:
self.link_local()
self.set_weight_shape()
def initialize(self, kwargs):
if self.initialized:
return
self.__dict__.update(kwargs)
req_param(self, ['backend', 'batch_size'])
self.output = None
self.deltas = None
self.initialized = True
def set_weight_shape(self):
pass
def link_local(self):
req_param(self, ['nifm', 'ifmshape', 'fshape'])
opt_param(self, ['ofmlocs', 'links'])
opt_param(self, ['deltasbuf', 'outputbuf'])
opt_param(self, ['nofm'], self.nifm)
opt_param(self, ['pooling'], False)
opt_param(self, ['stride'], 1)
opt_param(self, ['pad'], 0)
assert len(self.ifmshape) == len(self.fshape)
ofmshape = []
for dim in range(len(self.ifmshape)):
assert self.ifmshape[dim] >= self.fshape[dim]
num = self.ifmshape[dim] - self.fshape[dim] + 1 + 2 * self.pad
ofmshape.extend([(num + self.stride - 1) / self.stride])
self.ofmshape = tuple(ofmshape)
self.negpad = -self.pad
self.ifmsize = np.prod(self.ifmshape)
self.ofmsize = np.prod(self.ofmshape)
self.fpsize = np.prod(self.fshape)
self.fsize = self.nifm * self.fpsize
self.nout = self.nofm * self.ofmsize
logger.debug('name=%s, nifm=%d, ifmshape=%s, ofmshape=%s',
self.name, self.nifm, self.ifmshape, self.ofmshape)
def initialize_local(self):
if isinstance(self.backend, CPU):
self.make_aux_buffers(self.nifm, self.ifmshape, self.nofm,
self.ofmshape, self.fshape, self.stride)
def __str__(self):
if self.is_local:
ionumstr = '{} x {} inputs, {} x {} nodes'.format(
self.nifm, self.format_tuple(self.ifmshape),
self.nofm, self.format_tuple(self.ofmshape))
else:
ionumstr = '{} inputs, {} nodes'.format(self.nin, self.nout)
ret = '{} {}: {}'.format(self.__class__.__name__, self.name, ionumstr)
ret += ', {} act_fn'.format(self.activation.__class__.__name__)
return ret
def format_tuple(self, tup):
result = '(' + str(tup[0])
for dim in range(1, len(tup)):
result += ' x ' + str(tup[dim])
return result + ')'
def allocate_output_bufs(self):
make_zbuf = self.backend.zeros
opt_param(self, ['out_shape'], (self.nout, self.batch_size))
opt_param(self, ['delta_shape'], (self.nin, self.batch_size))
self.output = make_zbuf(self.out_shape, self.output_dtype)
self.pre_act = self.activation.pre_act_buffer(self.backend,
self.output,
self.pre_act_dtype)
def set_deltas_buf(self, delta_pool, offset):
self.deltas = None
if self.prev_layer is None:
return
if self.prev_layer.is_data:
return
if delta_pool is None:
self.deltas = self.backend.zeros(self.delta_shape,
self.deltas_dtype)
else:
self.deltas = delta_pool[offset:(offset + self.delta_shape[0])]
def make_links(self, nifm, ifmsize, ifmshape, ofmshape, fshape, stride):
# Figure out local connections to the previous layer.
# This function works for any number of dimensions.
ndims = len(ifmshape)
dimsizes = np.empty(ndims, dtype='int32')
for dim in range(ndims):
dimsizes[dim] = np.prod(ifmshape[dim:])
links = []
for ofmdim in np.ndindex(ofmshape):
# This variable tracks the top left corner of
# the receptive field.
src = ofmdim[-1]
for dim in range(-1, -ndims, -1):
src += dimsizes[dim] * ofmdim[dim - 1]
src *= stride
indlist = list(range(src, src + fshape[-1]))
for dim in range(-1, -ndims, -1):
indarray = np.array(indlist)
for dimind in range(1, fshape[dim - 1]):
indlist.extend(list(indarray + dimind * dimsizes[dim]))
if self.pooling is False:
indarray = np.array(indlist)
for ifm in range(1, nifm):
indlist.extend(list(indarray + ifm * ifmsize))
links.append(indlist)
self.links = np.array(links, dtype='int32')
def make_aux_buffers(self, nifm, ifmshape, nofm, ofmshape, fshape, stride):
buf_size = self.batch_size * nifm
if (self.prev_layer is not None and not self.prev_layer.is_data):
self.deltasbuf = self.backend.empty((self.ifmsize, buf_size))
assert self.ofmsize is not 0
ofmstarts = np.arange(0, (self.ofmsize * nofm), self.ofmsize)
self.ofmlocs = np.empty((self.ofmsize, nofm), dtype='int32')
for dst in range(self.ofmsize):
self.ofmlocs[dst] = ofmstarts + dst
self.make_links(nifm, self.ifmsize, ifmshape, ofmshape, fshape, stride)
if self.pooling is True:
self.outputbuf = self.backend.empty((self.ofmsize, buf_size))
if self.op == 'max':
self.tempbuf = np.empty(
(self.ofmsize, self.batch_size * nifm), dtype='int32')
elif self.op == 'l2':
self.tempbuf = self.backend.empty(
(self.fpsize, self.batch_size * nifm))
def fprop(self, inputs):
raise NotImplementedError('This class should not be instantiated.')
def bprop(self, error):
raise NotImplementedError('This class should not be instantiated.')
def update(self, epoch):
pass
def set_train_mode(self, mode):
pass
class CostLayer(Layer):
"""
Pseudo-layer that should sit in the last level of the network defining the
objective function to be optimized.
"""
def __init__(self, **kwargs):
self.is_cost = True
self.nout = 1
super(CostLayer, self).__init__(**kwargs)
def initialize(self, kwargs):
super(CostLayer, self).initialize(kwargs)
req_param(self, ['cost', 'ref_layer'])
opt_param(self, ['ref_label'], 'targets')
opt_param(self, ['raw_label'], False)
opt_param(self, ['category_label'], 'l_id')
self.reference = None
self.cost.olayer = self.prev_layer
kwargs['raw_label'] = self.raw_label
self.cost.initialize(kwargs)
self.deltas = self.cost.get_deltabuf()
def __str__(self):
return ('{} {}: {} nodes, {} cost_fn'. format(
self.__class__.__name__, self.name, self.nin,
self.cost.__class__.__name__))
def set_reference(self):
if self.ref_layer is not None:
refs = getattr(self.ref_layer, self.ref_label)
if isinstance(refs, dict):
self.reference = refs[self.category_label]
else:
self.reference = refs
def fprop(self, inputs):
pass
def bprop(self, error):
# Since self.deltas already pointing to destination of act gradient
# we just have to scale by mini-batch size
self.set_reference()
self.cost.apply_derivative(self.reference)
self.backend.divide(self.deltas, self.backend.actual_batch_size,
out=self.deltas)
def get_cost(self):
self.set_reference()
scale_cost = (True if self.backend.__module__ == 'neon.backends.gpu'
else False)
result = self.cost.apply_function(self.reference,
scale_by_batchsize=scale_cost)
if not scale_cost: # Check for fp16 backend and use scaling
self.backend.divide(result, self.batch_size, result)
return result
def get_reference(self):
self.set_reference()
return self.reference
class DataLayer(Layer):
"""
Typically the first layer of a neural network. Connects a Dataset to the
network.
"""
def __init__(self, **kwargs):
self.is_data = True
opt_param(self, ['has_labels'], False)
super(DataLayer, self).__init__(**kwargs)
# req_param(self, ['dataset'])
def initialize(self, kwargs):
super(DataLayer, self).initialize(kwargs)
self.reset_counter()
if self.is_local is True:
req_param(self, ['nofm', 'ofmshape'])
self.nout = self.nofm * np.prod(self.ofmshape)
else:
req_param(self, ['nout'])
def init_dataset(self, dataset):
"""
Must be called prior to consuming data.
Allows us to switch to a new dataset (useful for changing sets after
training). No checking is done for input size, so should match the
dimensions of datasets between changes
"""
self.dataset = dataset
def __str__(self):
if self.is_local:
ionumstr = '{} x {} nodes'.format(self.nofm,
self.format_tuple(self.ofmshape))
else:
ionumstr = "{} nodes".format(self.nout)
return ("{} {}: {}".format(self.__class__.__name__,
self.name, ionumstr))
def set_previous_layer(self, pl):
pass
def has_more_data(self):
return True if (self.batch_idx < self.num_batches) else False
def reset_counter(self):
self.batch_idx = 0
def fprop(self, inputs):
self.output, self.targets = self.dataset.get_mini_batch(self.batch_idx)
self.batch_idx += 1
def bprop(self, error):
pass
def has_set(self, setname):
return self.dataset.has_set(setname)
def use_set(self, setname, predict=False):
self.num_batches = self.dataset.init_mini_batch_producer(
batch_size=self.batch_size,
setname=setname,
predict=predict)
self.reset_counter()
def cleanup(self):
# delete helper queues if any
self.dataset.del_mini_batch_producer()
class ImageDataLayer(DataLayer):
def __init__(self, **kwargs):
super(ImageDataLayer, self).__init__(**kwargs)
def fprop(self, inputs):
self.output, self.targets, self.labels = self.dataset.get_mini_batch(
self.batch_idx)
self.batch_idx += 1
class ActivationLayer(Layer):
"""
Just applies an activation to the inputs.
"""
def set_previous_layer(self, pl):
if pl.is_local:
self.is_local = True
self.ifmshape = pl.ofmshape
self.nifm = pl.nofm
self.nin = pl.nofm * np.prod(pl.ofmshape)
else:
self.nin = pl.nout
self.prev_layer = pl
def initialize(self, kwargs):
super(ActivationLayer, self).initialize(kwargs)
req_param(self, ['activation'])
self.nout = self.nin
self.allocate_output_bufs()
def fprop(self, inputs):
self.pre_act[:] = inputs
self.activation.fprop_func(self.backend, self.pre_act, self.output)
def bprop(self, error):
self.activation.bprop_func(self.backend, self.pre_act, error,
self.skip_act)
if self.deltas is not None:
self.deltas[:] = error
class SliceLayer(ActivationLayer):
"""
Just takes a portion of the inputs and passes it on
Useful for limitations of the GPU convolutional layer
for a local layer, takes 0:end_idx feature maps
for a flat layer, takes 0:end_idx inputs
"""
def __init__(self, **kwargs):
super(SliceLayer, self).__init__(**kwargs)
req_param(self, ['end_idx'])
def set_previous_layer(self, pl):
if pl.is_local:
self.is_local = True
self.ifmshape = pl.ofmshape
self.ofmshape = pl.ofmshape
self.nifm = pl.nofm
self.nin = pl.nofm * np.prod(pl.ofmshape)
self.nofm = self.end_idx
else:
self.nin = pl.nout
self.prev_layer = pl
def initialize(self, kwargs):
self.__dict__.update(kwargs)
req_param(self, ['backend', 'batch_size'])
self.output = None
self.deltas = None
if self.is_local:
self.nofm = self.end_idx
self.end_idx = np.prod(self.ifmshape) * self.end_idx
self.nout = self.end_idx
self.allocate_output_bufs()
def fprop(self, inputs):
self.output[:] = inputs[:self.end_idx]
def bprop(self, error):
self.deltas.fill(0.0)
self.deltas[:self.end_idx] = error
class WeightLayer(Layer):
"""
Typical hidden layer with weight parameters to be learned.
"""
def __init__(self, **kwargs):
super(WeightLayer, self).__init__(**kwargs)
self.distributable = True
self.has_params = True
self.params_initialized = False
def initialize(self, kwargs):
super(WeightLayer, self).initialize(kwargs)
req_param(self, ['weight_init', 'lrule_init', 'nin', 'nout'])
opt_param(self, ['accumulate'], False)
opt_param(self, ['batch_norm'], False)
self.weight_init.initialize(self.backend)
self.params = []
self.updates = []
if self.batch_norm:
self.bn = BatchNorm()
kwargs['layer'] = self
self.bn.initialize(kwargs)
def get_params(self):
np_params = dict()
for p in ['weights', 'biases']:
if hasattr(self, p):
p_tensor = getattr(self, p)
np_params[p] = np.array(p_tensor.asnumpyarray(),
dtype=p_tensor.dtype).reshape(
p_tensor.shape)
if self.batch_norm:
np_params.update(self.bn.get_params())
np_params.update(self.learning_rule.get_params())
return np_params
def set_params(self, params_dict):
for p in ['weights', 'biases']:
if p in params_dict:
getattr(self, p)[:] = params_dict[p]
if self.batch_norm:
self.bn.set_params(params_dict)
self.learning_rule.set_params(params_dict)
def allocate_param_bufs(self):
if self.params_initialized:
return
make_ebuf = self.backend.empty
self.weights = self.weight_init.generate(self.weight_shape,
self.weight_dtype)
self.weights.name = self.name # naming weights for timing diagnostics
self.weight_updates = make_ebuf(self.weight_shape, self.updates_dtype)
self.use_biases = 'bias_init' in self.weight_init.__dict__
opt_param(self, ['brule_init'], None)
if self.use_biases is True:
self.biases = make_ebuf(self.bias_shape, self.weight_dtype)
self.biases.fill(self.weight_init.bias_init)
self.bias_updates = make_ebuf(self.bias_shape, self.updates_dtype)
self.params.extend([self.weights, self.biases])
self.updates.extend([self.weight_updates, self.bias_updates])
else:
self.params.extend([self.weights])
self.updates.extend([self.weight_updates])
if self.accumulate:
self.utemp = map(lambda x: make_ebuf(x.shape, self.updates_dtype),
self.updates)
for upm in self.updates:
upm.fill(0.0)
self.learning_rule = self.init_learning_rule(self.lrule_init)
self.bias_rule = None
if self.brule_init is not None and self.use_biases:
self.bias_rule = self.init_learning_rule(self.brule_init)
self.bias_rule.allocate_state([self.updates[-1]])
self.learning_rule.allocate_state(self.updates[:-1])
else:
self.learning_rule.allocate_state(self.updates)
self.params_initialized = True
def update(self, epoch):
if self.bias_rule is None:
self.learning_rule.apply_rule(self.params, self.updates, epoch)
else:
self.learning_rule.apply_rule(self.params[:-1],
self.updates[:-1], epoch)
self.bias_rule.apply_rule([self.params[-1]],
[self.updates[-1]], epoch)
if self.accumulate:
for upm in self.updates:
upm.fill(0.0)
def normalize_weights(self, wts):
norms = self.backend.norm(wts, order=2, axis=1)
self.backend.divide(wts, norms.reshape((norms.shape[0], 1)), out=wts)
def set_train_mode(self, mode):
if self.batch_norm and mode is False:
self.bn.set_inference_mode()
def init_learning_rule(self, lrule_init):
dtype = self.weight_dtype # TODO: Cool to reuse this here?
lrname = self.name + '_lr'
if lrule_init['type'] == 'gradient_descent':
lr = GradientDescent(name=lrname,
lr_params=lrule_init['lr_params'])
elif lrule_init['type'] == 'gradient_descent_pretrain':
lr = GradientDescentPretrain(
name=lrname, lr_params=lrule_init['lr_params'])
elif lrule_init['type'] == 'gradient_descent_momentum':
lr = GradientDescentMomentum(
name=lrname, lr_params=lrule_init['lr_params'],
param_dtype=dtype, gradient_dtype=dtype)
elif lrule_init['type'] == 'gradient_descent_momentum_weight_decay':
lr = GradientDescentMomentumWeightDecay(
name=lrname, lr_params=lrule_init['lr_params'],
param_dtype=dtype, gradient_dtype=dtype)
elif lrule_init['type'] == 'adadelta':
lr = AdaDelta(name=lrname, lr_params=lrule_init['lr_params'])
else:
raise AttributeError("invalid learning rule params specified")
lr.initialize(self.backend)
return lr
|
|
from __future__ import print_function
import sys
import glob
import serial
import time
import datetime
import struct
import os
MAX_ALARM_DID = chr(0x3F)
DID_ALARM_LEN = 12
SER_TIMEOUT = .1 # Serial timeout, seconds
eeprom = None
class EEPROMError(Exception):
pass
class MsgDef:
def __init__(self, v, const):
v = v.strip()
v = v[1:-1]
val, msg_size, callback = v.split(',')
msg_size = msg_size.strip()
if 'x' in val:
self.val = int(val, 16)
else:
if val not in const:
raise Exception('Unknown constant %s' % val)
self.val = const[val]
try:
self.n_byte = int(msg_size)
except:
try:
self.n_byte = const[msg_size]
except:
raise
self.callback = callback
def __str__(self):
return chr(self.val)
def read_constants(fn):
f = open(fn)
out = {}
for line in f.readlines():
if line.startswith('const'):
line = line.split(';')[0]
try:
d, v = line.split('=')
c, t, n = d.split()
if 'int' in t:
if 'X' in v.upper():
base = 16
else:
base = 10
out[n] = int(v, base)
elif t == 'MsgDef':
try:
out[n] = MsgDef(v, out)
except Exception, e:
if '*' not in line:
print('problem with ', line, e)
else:
out[n] = v
except:
pass
return out
class Struct:
def __init__(self, **dict):
self.d = dict
def __getattr__(self, name):
return self.d[name]
def __add__(self, other):
out = {}
for key in self.d:
out[key] = self.d[key]
for key in other.d:
out[key] = other.d[key]
return Struct(**out)
const = {'MAX_EEPROM_ADDR':1023}
c_files = glob.glob("*.ino")
c_files.extend(glob.glob("*.h"))
for file in c_files:
next = read_constants(file)
for key in next:
# print '%s = %s' % (key, next[key])
const[key] = next[key]
const = Struct(**const)
def set_gmt_offset(offset):
global gmt_offset
gmt_offset = offset
def getSerialports():
if sys.platform.startswith('win'): # windows
out = []
import scanwin32
for order, port, desc, hwid in sorted(scanwin32.comports()):
print( "%-10s: %s (%s) ->" % (port, desc, hwid),)
try:
s = serial.Serial(port) # test open
s.close()
except serial.serialutil.SerialException:
print( "can't be opened")
else:
print("Ready")
out.append(port)
elif sys.platform.startswith('darwin'): # mac
out = glob.glob('/dev/tty.usb*')
out.sort()
else: # assume linux
out = glob.glob('/dev/ttyUSB*')
out.sort()
return out
# def connect(serialport='/dev/ttyUSB0', _gmt_offset=None):
def connect(serialport='/dev/ttyUSB0', _gmt_offset=None):
if _gmt_offset is None:
local_time = time.localtime()
_gmt_offset = (local_time.tm_isdst * 3600 - time.timezone)
global ser
set_gmt_offset(_gmt_offset)
try:
ser.close() # need to close serial port on windows.
except:
pass
# raw_input('...')
print( 'serialport', serialport)
ser = serial.Serial(serialport,
baudrate=112500,
timeout=SER_TIMEOUT)
return ser
gmt_offset = -7 * 3600
def flush():
dat = ser.read(1000)
while len(dat) > 0:
dat = ser.read(1000)
def time_req():
# flush serial data
flush()
ser.write(str(const.ABS_TIME_REQ))
id = ser.read(1)
assert id == str(const.ABS_TIME_SET)
dat = ser.read(4)
if len(dat) < 4:
out = 0
else:
out = c3_to_wall_clock(dat)
return out
def time_set(now=None):
flush()
if now is None:
now = int(round(time.time()) + gmt_offset)
# now = time.mktime(time.localtime())
dat = wall_clock_to_c3(now)
ser.write(str(const.ABS_TIME_SET))
ser.write(dat)
def c3_to_wall_clock(bytes):
return struct.unpack('<I', bytes)[0]
def wall_clock_to_c3(t):
return struct.pack('<I', int(round(t)))
def to_gmt(t):
return t + gmt_offset
def from_gmt(t):
return t - gmt_offset
def fmt_time(when):
return '%02d/%02d/%04d %d:%02d:%02d' % (when.tm_mday, when.tm_mon, when.tm_year,
when.tm_hour, when.tm_min, when.tm_sec)
def main():
ser.flush()
now = time_req()
now = time.gmtime(time_req())
year = now.tm_year
print('year', year)
time_set()
print (time_req())
if __name__ == '__main__':
connect(getSerialports()[0])
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg == 'set':
time_set()
print (' P2 time', fmt_time(time.gmtime(time_req())))
elif arg == 'time':
print (' P2 time', fmt_time(time.gmtime(time_req())))
elif arg == 'pc_time':
print (' PC TIME:', fmt_time(time.gmtime(to_gmt(time.time()))))
else:
print ('huh?', arg)
else:
# read_write_test()
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
UserDefinedType,
)
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
return operand.spark.transform(lambda scol: scol.cast(spark_type))
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = SF.lit(-1)
else:
kvs = chain(
*[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol.getItem(index_ops.spark.column), SF.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type).alias(index_ops._internal.data_fields[0].name),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(BooleanType())
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(
index_ops.spark.column.cast(BooleanType())
)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_string_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)
) -> IndexOpsLike:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column.
"""
from pyspark.pandas.internal import InternalField
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(StringType())
else:
casted = index_ops.spark.column.cast(StringType())
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
def _as_other_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import (
DecimalOps,
FractionalExtensionOps,
FractionalOps,
IntegralExtensionOps,
IntegralOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, DecimalType):
return object.__new__(DecimalOps)
elif isinstance(spark_type, FractionalType):
if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:
return object.__new__(FractionalExtensionOps)
else:
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
if extension_dtypes_available and type(dtype) in [
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
]:
return object.__new__(IntegralExtensionOps)
else:
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
if extension_object_dtypes_available and isinstance(dtype, StringDtype):
return object.__new__(StringExtensionOps)
else:
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return left.__and__(right)
def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
return left.__or__(right)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Training a deep NN on MovieLens with differentially private Adam optimizer."""
from absl import app
from absl import flags
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_eps_poisson
from tensorflow_privacy.privacy.analysis.gdp_accountant import compute_mu_poisson
from tensorflow_privacy.privacy.optimizers import dp_optimizer
#### FLAGS
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
'dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')
flags.DEFINE_float('learning_rate', .01, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 0.55,
'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 5, 'Clipping norm')
flags.DEFINE_integer('epochs', 25, 'Number of epochs')
flags.DEFINE_integer('max_mu', 2, 'GDP upper limit')
flags.DEFINE_string('model_dir', None, 'Model directory')
sampling_batch = 10000
microbatches = 10000
num_examples = 800167
def nn_model_fn(features, labels, mode):
"""NN adapted from github.com/hexiangnan/neural_collaborative_filtering."""
n_latent_factors_user = 10
n_latent_factors_movie = 10
n_latent_factors_mf = 5
user_input = tf.reshape(features['user'], [-1, 1])
item_input = tf.reshape(features['movie'], [-1, 1])
# number of users: 6040; number of movies: 3706
mf_embedding_user = tf.keras.layers.Embedding(
6040, n_latent_factors_mf, input_length=1)
mf_embedding_item = tf.keras.layers.Embedding(
3706, n_latent_factors_mf, input_length=1)
mlp_embedding_user = tf.keras.layers.Embedding(
6040, n_latent_factors_user, input_length=1)
mlp_embedding_item = tf.keras.layers.Embedding(
3706, n_latent_factors_movie, input_length=1)
# GMF part
# Flatten the embedding vector as latent features in GMF
mf_user_latent = tf.keras.layers.Flatten()(mf_embedding_user(user_input))
mf_item_latent = tf.keras.layers.Flatten()(mf_embedding_item(item_input))
# Element-wise multiply
mf_vector = tf.keras.layers.multiply([mf_user_latent, mf_item_latent])
# MLP part
# Flatten the embedding vector as latent features in MLP
mlp_user_latent = tf.keras.layers.Flatten()(mlp_embedding_user(user_input))
mlp_item_latent = tf.keras.layers.Flatten()(mlp_embedding_item(item_input))
# Concatenation of two latent features
mlp_vector = tf.keras.layers.concatenate([mlp_user_latent, mlp_item_latent])
predict_vector = tf.keras.layers.concatenate([mf_vector, mlp_vector])
logits = tf.keras.layers.Dense(5)(predict_vector)
# Calculate loss as a vector (to support microbatches in DP-SGD).
vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
# Define mean of loss across minibatch (for reporting through tf.Estimator).
scalar_loss = tf.reduce_mean(vector_loss)
# Configure the training op (for TRAIN mode).
if mode == tf.estimator.ModeKeys.TRAIN:
if FLAGS.dpsgd:
# Use DP version of GradientDescentOptimizer. Other optimizers are
# available in dp_optimizer. Most optimizers inheriting from
# tf.compat.v1.train.Optimizer should be wrappable in differentially
# private counterparts by calling dp_optimizer.optimizer_from_args().
optimizer = dp_optimizer.DPAdamGaussianOptimizer(
l2_norm_clip=FLAGS.l2_norm_clip,
noise_multiplier=FLAGS.noise_multiplier,
num_microbatches=microbatches,
learning_rate=FLAGS.learning_rate)
opt_loss = vector_loss
else:
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate)
opt_loss = scalar_loss
global_step = tf.compat.v1.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
# In the following, we pass the mean of the loss (scalar_loss) rather than
# the vector_loss because tf.estimator requires a scalar loss. This is only
# used for evaluation and debugging by tf.estimator. The actual loss being
# minimized is opt_loss defined above and passed to optimizer.minimize().
return tf.estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode).
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'rmse':
tf.compat.v1.metrics.root_mean_squared_error(
labels=tf.cast(labels, tf.float32),
predictions=tf.tensordot(
a=tf.nn.softmax(logits, axis=1),
b=tf.constant(np.array([0, 1, 2, 3, 4]), dtype=tf.float32),
axes=1))
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=scalar_loss, eval_metric_ops=eval_metric_ops)
return None
def load_movielens():
"""Loads MovieLens 1M as from https://grouplens.org/datasets/movielens/1m."""
data = pd.read_csv(
'ratings.dat',
sep='::',
header=None,
names=['userId', 'movieId', 'rating', 'timestamp'])
n_users = len(set(data['userId']))
n_movies = len(set(data['movieId']))
print('number of movie: ', n_movies)
print('number of user: ', n_users)
# give unique dense movie index to movieId
data['movieIndex'] = stats.rankdata(data['movieId'], method='dense')
# minus one to reduce the minimum value to 0, which is the start of col index
print('number of ratings:', data.shape[0])
print('percentage of sparsity:',
(1 - data.shape[0] / n_users / n_movies) * 100, '%')
train, test = train_test_split(data, test_size=0.2, random_state=100)
return train.values - 1, test.values - 1, np.mean(train['rating'])
def main(unused_argv):
tf.compat.v1.logging.set_verbosity(3)
# Load training and test data.
train_data, test_data, _ = load_movielens()
# Instantiate the tf.Estimator.
ml_classifier = tf.estimator.Estimator(
model_fn=nn_model_fn, model_dir=FLAGS.model_dir)
# Create tf.Estimator input functions for the training and test data.
eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={
'user': test_data[:, 0],
'movie': test_data[:, 4]
},
y=test_data[:, 2],
num_epochs=1,
shuffle=False)
# Training loop.
steps_per_epoch = num_examples // sampling_batch
test_accuracy_list = []
for epoch in range(1, FLAGS.epochs + 1):
for _ in range(steps_per_epoch):
whether = np.random.random_sample(num_examples) > (
1 - sampling_batch / num_examples)
subsampling = [i for i in np.arange(num_examples) if whether[i]]
global microbatches
microbatches = len(subsampling)
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={
'user': train_data[subsampling, 0],
'movie': train_data[subsampling, 4]
},
y=train_data[subsampling, 2],
batch_size=len(subsampling),
num_epochs=1,
shuffle=True)
# Train the model for one step.
ml_classifier.train(input_fn=train_input_fn, steps=1)
# Evaluate the model and print results
eval_results = ml_classifier.evaluate(input_fn=eval_input_fn)
test_accuracy = eval_results['rmse']
test_accuracy_list.append(test_accuracy)
print('Test RMSE after %d epochs is: %.3f' % (epoch, test_accuracy))
# Compute the privacy budget expended so far.
if FLAGS.dpsgd:
eps = compute_eps_poisson(epoch, FLAGS.noise_multiplier, num_examples,
sampling_batch, 1e-6)
mu = compute_mu_poisson(epoch, FLAGS.noise_multiplier, num_examples,
sampling_batch)
print('For delta=1e-6, the current epsilon is: %.2f' % eps)
print('For delta=1e-6, the current mu is: %.2f' % mu)
if mu > FLAGS.max_mu:
break
else:
print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__':
app.run(main)
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import copy
import logging
from operator import attrgetter
from typing import Dict, List, Optional, Tuple, Set
import mxnet as mx
import numpy as np
logger = logging.getLogger(__name__)
# Represents a list of raw constraints for a sentence. Each constraint is a list of target-word IDs.
RawConstraintList = List[List[int]]
class AvoidTrie:
"""
Represents a set of phrasal constraints for an input sentence.
These are organized into a trie.
"""
def __init__(self,
raw_phrases: Optional[RawConstraintList] = None) -> None:
self.final_ids = set() # type: Set[int]
self.children = {} # type: Dict[int,'AvoidTrie']
if raw_phrases:
for phrase in raw_phrases:
self.add_phrase(phrase)
def __str__(self) -> str:
s = '({}'.format(list(self.final_ids))
for child_id in self.children.keys():
s += ' -> {} {}'.format(child_id, self.children[child_id])
s += ')'
return s
def __len__(self) -> int:
"""
Returns the number of avoid phrases represented in the trie.
"""
phrase_count = len(self.final_ids)
for child in self.children.values():
phrase_count += len(child)
return phrase_count
def add_trie(self,
trie: 'AvoidTrie',
phrase: Optional[List[int]] = None) -> None:
self.final_ids |= trie.final()
for child_id, child in trie.children.items():
if child_id not in self.children:
self.children[child_id] = AvoidTrie()
self.children[child_id].add_trie(child)
def add_phrase(self,
phrase: List[int]) -> None:
"""
Recursively adds a phrase to this trie node.
:param phrase: A list of word IDs to add to this trie node.
"""
if len(phrase) == 1:
self.final_ids.add(phrase[0])
else:
next_word = phrase[0]
if next_word not in self.children:
self.children[next_word] = AvoidTrie()
self.step(next_word).add_phrase(phrase[1:])
def step(self, word_id: int) -> Optional['AvoidTrie']:
"""
Returns the child node along the requested arc.
:param phrase: A list of word IDs to add to this trie node.
:return: The child node along the requested arc, or None if no such arc exists.
"""
return self.children.get(word_id, None)
def final(self) -> Set[int]:
"""
Returns the set of final ids at this node.
:return: The set of word IDs that end a constraint at this state.
"""
return self.final_ids
class AvoidState:
"""
Represents the state of a hypothesis in the AvoidTrie.
The offset is used to return actual positions in the one-dimensionally-resized array that
get set to infinity.
:param avoid_trie: The trie containing the phrases to avoid.
:param state: The current state (defaults to root).
"""
def __init__(self,
avoid_trie: AvoidTrie,
state: AvoidTrie = None) -> None:
self.root = avoid_trie
self.state = state if state else self.root
def consume(self, word_id: int) -> 'AvoidState':
"""
Consumes a word, and updates the state based on it. Returns new objects on a state change.
The next state for a word can be tricky. Here are the cases:
(1) If the word is found in our set of outgoing child arcs, we take that transition.
(2) If the word is not found, and we are not in the root state, we need to reset.
This means we pretend we were in the root state, and see if we can take a step
(3) Otherwise, if we are not already in the root state (i.e., we were partially through
the trie), we need to create a new object whose state is the root state
(4) Finally, if we couldn't advance and were already in the root state, we can reuse
this object.
:param word_id: The word that was just generated.
"""
if word_id in self.state.children:
return AvoidState(self.root, self.state.step(word_id))
elif word_id in self.root.children:
return AvoidState(self.root, self.root.step(word_id))
elif self.state != self.root:
return AvoidState(self.root, self.root)
else:
return self
def avoid(self) -> Set[int]:
"""
Returns a set of word IDs that should be avoided. This includes the set of final states from the
root node, which are single tokens that must never be generated.
:return: A set of integers representing words that must not be generated next by this hypothesis.
"""
return self.root.final() | self.state.final()
def __str__(self) -> str:
return str(self.state)
class AvoidBatch:
"""
Represents a set of phrasal constraints for all items in the batch.
For each hypotheses, there is an AvoidTrie tracking its state.
:param batch_size: The batch size.
:param beam_size: The beam size.
:param avoid_list: The list of lists (raw phrasal constraints as IDs, one for each item in the batch).
:param global_avoid_trie: A translator-level vocabulary of items to avoid.
"""
def __init__(self,
batch_size: int,
beam_size: int,
avoid_list: Optional[List[RawConstraintList]] = None,
global_avoid_trie: Optional[AvoidTrie] = None) -> None:
self.global_avoid_states = [] # type: List[AvoidState]
self.local_avoid_states = [] # type: List[AvoidState]
# Store the global trie for each hypothesis
if global_avoid_trie is not None:
self.global_avoid_states = [AvoidState(global_avoid_trie)] * batch_size * beam_size
# Store the sentence-level tries for each item in their portions of the beam
if avoid_list is not None:
for raw_phrases in avoid_list:
self.local_avoid_states += [AvoidState(AvoidTrie(raw_phrases))] * beam_size
def reorder(self, indices: mx.nd.NDArray) -> None:
"""
Reorders the avoid list according to the selected row indices.
This can produce duplicates, but this is fixed if state changes occur in consume().
:param indices: An mx.nd.NDArray containing indices of hypotheses to select.
"""
if self.global_avoid_states:
self.global_avoid_states = [self.global_avoid_states[x] for x in indices.asnumpy()]
if self.local_avoid_states:
self.local_avoid_states = [self.local_avoid_states[x] for x in indices.asnumpy()]
def consume(self, word_ids: mx.nd.NDArray) -> None:
"""
Consumes a word for each trie, updating respective states.
:param word_ids: The set of word IDs.
"""
word_ids = word_ids.asnumpy().tolist()
for i, word_id in enumerate(word_ids):
if self.global_avoid_states:
self.global_avoid_states[i] = self.global_avoid_states[i].consume(word_id)
if self.local_avoid_states:
self.local_avoid_states[i] = self.local_avoid_states[i].consume(word_id)
def avoid(self) -> Tuple[Tuple[int], Tuple[int]]:
"""
Assembles a list of per-hypothesis words to avoid. The indices are (x, y) pairs into the scores
array, which has dimensions (beam_size, target_vocab_size). These values are then used by the caller
to set these items to np.inf so they won't be selected. Words to be avoided are selected by
consulting both the global trie of phrases and the sentence-specific one.
:return: Two lists of indices: the x coordinates and y coordinates.
"""
to_avoid = set() # type: Set[Tuple[int, int]]
for i, state in enumerate(self.global_avoid_states):
for word_id in state.avoid():
if word_id > 0:
to_avoid.add((i, word_id))
for i, state in enumerate(self.local_avoid_states):
for word_id in state.avoid():
if word_id > 0:
to_avoid.add((i, word_id))
return tuple(zip(*to_avoid)) # type: ignore
class ConstrainedHypothesis:
"""
Represents a set of words and phrases that must appear in the output.
A constraint is of two types: sequence or non-sequence.
A non-sequence constraint is a single word and can therefore be followed by anything,
whereas a sequence constraint must be followed by a particular word (the next word in the sequence).
This class also records which constraints have been met.
A list of raw constraints is maintained internally as two parallel arrays. The following raw constraint
represents two phrases that must appear in the output: 14 and 19 35 14.
raw constraint: [[14], [19, 35, 14]]
This is represented internally as:
constraints: [14 19 35 14]
is_sequence: [False False True True]
That is, the constraints are simply concatenated, and we maintain a parallel array indicating whether each
token ID must be followed by the next token ID. The same token ID can be present any number of times.
:param constraint_list: A list of zero or raw constraints (each represented as a list of integers).
:param eos_id: The end-of-sentence ID.
"""
def __init__(self,
constraint_list: RawConstraintList,
eos_id: int) -> None:
# `constraints` records the words of the constraints, as a list (duplicates allowed).
# `is_sequence` is a parallel array that records, for each corresponding constraint,
# whether the current word is the non-final word of a phrasal constraint.
self.constraints = [] # type: List[int]
self.is_sequence = [] # type: List[bool]
for phrase in constraint_list:
self.constraints += phrase
self.is_sequence += [True] * len(phrase)
self.is_sequence[-1] = False
self.eos_id = eos_id
# no constraints have been met
self.met = [False for x in self.constraints]
self.last_met = -1
def __len__(self) -> int:
"""
:return: The number of constraints.
"""
return len(self.constraints)
def __str__(self) -> str:
s = []
for i, word_id in enumerate(self.constraints):
s.append(str(word_id) if self.met[i] is False else 'X')
if self.is_sequence[i]:
s.append('->')
return ' '.join(s)
def size(self) -> int:
"""
:return: the number of constraints
"""
return len(self.constraints)
def num_met(self) -> int:
"""
:return: the number of constraints that have been met.
"""
return sum(self.met)
def num_needed(self) -> int:
"""
:return: the number of un-met constraints.
"""
return self.size() - self.num_met()
def allowed(self) -> Set[int]:
"""
Returns the set of constrained words that could follow this one.
For unfinished phrasal constraints, it is the next word in the phrase.
In other cases, it is the list of all unmet constraints.
If all constraints are met, an empty set is returned.
:return: The ID of the next required word, or -1 if any word can follow
"""
items = set() # type: Set[int]
# Add extensions of a started-but-incomplete sequential constraint
if self.last_met != -1 and self.is_sequence[self.last_met] == 1:
word_id = self.constraints[self.last_met + 1]
if word_id != self.eos_id or self.num_needed() == 1:
items.add(word_id)
# Add all constraints that aren't non-initial sequences
else:
for i, word_id in enumerate(self.constraints):
if not self.met[i] and (i == 0 or not self.is_sequence[i - 1]):
if word_id != self.eos_id or self.num_needed() == 1:
items.add(word_id)
return items
def finished(self) -> bool:
"""
Return true if all the constraints have been met.
:return: True if all the constraints are met.
"""
return self.num_needed() == 0
def is_valid(self, wordid) -> bool:
"""
Ensures </s> is only generated when the hypothesis is completed.
:param wordid: The wordid to validate.
:return: True if all constraints are already met or the word ID is not the EOS id.
"""
return self.finished() or wordid != self.eos_id or (self.num_needed() == 1 and self.eos_id in self.allowed())
def advance(self, word_id: int) -> 'ConstrainedHypothesis':
"""
Updates the constraints object based on advancing on word_id.
There is a complication, in that we may have started but not
yet completed a multi-word constraint. We need to allow constraints
to be added as unconstrained words, so if the next word is
invalid, we must "back out" of the current (incomplete) phrase,
re-setting all of its words as unmet.
:param word_id: The word ID to advance on.
:return: A deep copy of the object, advanced on word_id.
"""
obj = copy.deepcopy(self)
# First, check if we're updating a sequential constraint.
if obj.last_met != -1 and obj.is_sequence[obj.last_met] == 1:
if word_id == obj.constraints[obj.last_met + 1]:
# Here, the word matches what we expect next in the constraint, so we update everything
obj.met[obj.last_met + 1] = True
obj.last_met += 1
else:
# Here, the word is not the expected next word of the constraint, so we back out of the constraint.
index = obj.last_met
while obj.is_sequence[index]:
obj.met[index] = False
index -= 1
obj.last_met = -1
# If not, check whether we're meeting a single-word constraint
else:
# Build a list from all constraints of tuples of the
# form (constraint, whether it's a non-initial sequential, whether it's been met)
constraint_tuples = list(zip(obj.constraints, [False] + obj.is_sequence[:-1], obj.met))
# We are searching for an unmet constraint (word_id) that is not the middle of a phrase and is not met
query = (word_id, False, False)
try:
pos = constraint_tuples.index(query)
obj.met[pos] = True
obj.last_met = pos
except ValueError:
# query not found; identical but duplicated object will be returned
pass
return obj
def init_batch(raw_constraints: List[Optional[RawConstraintList]],
beam_size: int,
start_id: int,
eos_id: int) -> List[Optional[ConstrainedHypothesis]]:
"""
:param raw_constraints: The list of raw constraints (list of list of IDs).
:param beam_size: The beam size.
:param start_id: The target-language vocabulary ID of the SOS symbol.
:param eos_id: The target-language vocabulary ID of the EOS symbol.
:return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).
"""
constraints = [None] * (len(raw_constraints) * beam_size) # type: List[Optional[ConstrainedHypothesis]]
if any(raw_constraints):
for i, raw_list in enumerate(raw_constraints):
num_constraints = sum([len(phrase) for phrase in raw_list]) if raw_list is not None else 0
if num_constraints > 0:
hyp = ConstrainedHypothesis(raw_list, eos_id)
idx = i * beam_size
constraints[idx:idx + beam_size] = [hyp.advance(start_id) for x in range(beam_size)]
return constraints
def get_bank_sizes(num_constraints: int,
beam_size: int,
candidate_counts: List[int]) -> List[int]:
"""
Evenly distributes the beam across the banks, where each bank is a portion of the beam devoted
to hypotheses having met the same number of constraints, 0..num_constraints.
After the assignment, banks with more slots than candidates are adjusted.
:param num_constraints: The number of constraints.
:param beam_size: The beam size.
:param candidate_counts: The empirical counts of number of candidates in each bank.
:return: A distribution over banks.
"""
num_banks = num_constraints + 1
bank_size = beam_size // num_banks
remainder = beam_size - bank_size * num_banks
# Distribute any remainder to the end
assigned = [bank_size for x in range(num_banks)]
assigned[-1] += remainder
# Now, moving right to left, push extra allocation to earlier buckets.
# This encodes a bias for higher buckets, but if no candidates are found, space
# will be made in lower buckets. This may not be the best strategy, but it is important
# that you start pushing from the bucket that is assigned the remainder, for cases where
# num_constraints >= beam_size.
for i in reversed(range(num_banks)):
overfill = assigned[i] - candidate_counts[i]
if overfill > 0:
assigned[i] -= overfill
assigned[(i - 1) % num_banks] += overfill
return assigned
class ConstrainedCandidate:
"""
Object used to hold candidates for the beam in topk().
:param row: The row in the scores matrix.
:param col: The column (word ID) in the scores matrix.
:param score: the associated accumulated score.
:param hypothesis: The ConstrainedHypothesis containing information about met constraints.
"""
__slots__ = ('row', 'col', 'score', 'hypothesis')
def __init__(self,
row: int,
col: int,
score: float,
hypothesis: ConstrainedHypothesis) -> None:
self.row = row
self.col = col
self.score = score
self.hypothesis = hypothesis
def __hash__(self):
return hash((self.row, self.col))
def __eq__(self, other):
return self.row == other.row and self.col == other.col
def __str__(self):
return '({}, {}, {}, {})'.format(self.row, self.col, self.score, self.hypothesis.num_met())
def topk(timestep: int,
batch_size: int,
beam_size: int,
inactive: mx.nd.NDArray,
scores: mx.nd.NDArray,
hypotheses: List[ConstrainedHypothesis],
best_ids: mx.nd.NDArray,
best_word_ids: mx.nd.NDArray,
seq_scores: mx.nd.NDArray) -> Tuple[np.array, np.array, np.array, List[ConstrainedHypothesis], mx.nd.NDArray]:
"""
Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints.
These items are built from three different types: (1) the best items across the whole
scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row.
:param timestep: The current decoder timestep.
:param batch_size: The number of segments in the batch.
:param beam_size: The length of the beam for each segment.
:param inactive: Array listing inactive rows (shape: (beam_size,)).
:param scores: The scores array (shape: (batch_size if t==1 else beam_size, target_vocab_size)).
:param hypotheses: The list of hypothesis objects.
:param best_ids: The current list of best hypotheses (shape: (beam_size,)).
:param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)).
:param seq_scores: (shape: (beam_size, 1)).
:return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores,
the updated constrained hypotheses, and the updated set of inactive hypotheses.
"""
if timestep == 1:
beam_size = 1
for sentno in range(batch_size):
rows = slice(sentno * beam_size, (sentno + 1) * beam_size)
if hypotheses[rows.start] is not None and hypotheses[rows.start].size() > 0:
best_ids[rows], best_word_ids[rows], seq_scores[rows], \
hypotheses[rows], inactive[rows] = _topk(timestep,
beam_size,
inactive[rows],
scores[rows],
hypotheses[rows],
best_ids[rows] - rows.start,
best_word_ids[rows],
seq_scores[rows])
# offsetting since the returned smallest_k() indices were slice-relative
best_ids[rows] += rows.start
else:
# If there are no constraints for this sentence in the batch, everything stays
# the same, except we need to mark all hypotheses as active
inactive[rows] = 0
return best_ids, best_word_ids, seq_scores, hypotheses, inactive
def _topk(timestep: int,
beam_size: int,
inactive: mx.nd.NDArray,
scores: mx.nd.NDArray,
hypotheses: List[ConstrainedHypothesis],
best_ids: mx.nd.NDArray,
best_word_ids: mx.nd.NDArray,
sequence_scores: mx.nd.NDArray) -> Tuple[np.array, np.array, np.array,
List[ConstrainedHypothesis], mx.nd.NDArray]:
"""
Builds a new topk list such that the beam contains hypotheses having completed different numbers of constraints.
These items are built from three different types: (1) the best items across the whole
scores matrix, (2) the set of words that must follow existing constraints, and (3) k-best items from each row.
:param timestep: The current decoder timestep.
:param beam_size: The length of the beam for each segment.
:param inactive: Array listing inactive rows (shape: (beam_size,)).
:param scores: The scores array (shape: (beam_size, target_vocab_size)).
:param hypotheses: The list of hypothesis objects.
:param best_ids: The current list of best hypotheses (shape: (beam_size,)).
:param best_word_ids: The parallel list of best word IDs (shape: (beam_size,)).
:param sequence_scores: (shape: (beam_size, 1)).
:return: A tuple containing the best hypothesis rows, the best hypothesis words, the scores,
the updated constrained hypotheses, and the updated set of inactive hypotheses.
"""
num_constraints = hypotheses[0].size()
candidates = set()
# (1) Add all of the top-k items (which were passed) in as long as they pass the constraints
for row, col, seq_score in zip(best_ids, best_word_ids, sequence_scores):
row = int(row.asscalar())
col = int(col.asscalar())
if hypotheses[row].is_valid(col):
seq_score = float(seq_score.asscalar())
new_item = hypotheses[row].advance(col)
cand = ConstrainedCandidate(row, col, seq_score, new_item)
candidates.add(cand)
# For each hypothesis, we add (2) all the constraints that could follow it and
# (3) the best item (constrained or not) in that row
best_next = mx.nd.argmin(scores, axis=1)
for row in range(beam_size):
if inactive[row] or (timestep == 1 and row > 0):
continue
hyp = hypotheses[row]
# (2) add all the constraints that could extend this
nextones = hyp.allowed()
# (3) add the single-best item after this (if it's valid)
col = int(best_next[row].asscalar())
if hyp.is_valid(col):
nextones.add(col)
# Now, create new candidates for each of these items
for col in nextones:
new_item = hyp.advance(col)
score = scores[row, col].asscalar()
cand = ConstrainedCandidate(row, col, score, new_item)
candidates.add(cand)
# Sort the candidates. After allocating the beam across the banks, we will pick the top items
# for each bank from this list
sorted_candidates = sorted(candidates, key=attrgetter('score'))
# The number of hypotheses in each bank
counts = [0 for _ in range(num_constraints + 1)]
for cand in sorted_candidates:
counts[cand.hypothesis.num_met()] += 1
# Adjust allocated bank sizes if there are too few candidates in any of them
bank_sizes = get_bank_sizes(num_constraints, beam_size, counts)
# Sort the candidates into the allocated banks
pruned_candidates = [] # type: List[ConstrainedCandidate]
for i, cand in enumerate(sorted_candidates):
bank = cand.hypothesis.num_met()
if bank_sizes[bank] > 0:
pruned_candidates.append(cand)
bank_sizes[bank] -= 1
num_pruned_candidates = len(pruned_candidates)
inactive[:num_pruned_candidates] = 0
# Pad the beam so array assignment still works
if num_pruned_candidates < beam_size:
inactive[num_pruned_candidates:] = 1
pruned_candidates += [pruned_candidates[num_pruned_candidates - 1]] * (beam_size - num_pruned_candidates)
return (np.array([x.row for x in pruned_candidates]),
np.array([x.col for x in pruned_candidates]),
np.array([[x.score] for x in pruned_candidates]),
[x.hypothesis for x in pruned_candidates],
inactive)
def main(args):
"""
Usage: python3 -m sockeye.lexical_constraints [--bpe BPE_MODEL]
Reads sentences and constraints on STDIN (tab-delimited) and generates the JSON format
that can be used when passing `--json-input` to sockeye.translate. It supports both positive
constraints (phrases that must appear in the output) and negative constraints (phrases that
must *not* appear in the output).
e.g.,
echo -e "Das ist ein Test .\tThis is\ttest" | python3 -m sockeye.lexical_constraints
will produce the following JSON object:
{ "text": "Das ist ein Test .", "constraints": ["This is", "test"] }
If you pass `--avoid` to the script, the constraints will be generated as negative constraints, instead:
echo -e "Das ist ein Test .\tThis is\ttest" | python3 -m sockeye.lexical_constraints --avoid
will produce the following JSON object (note the new keyword):
{ "text": "Das ist ein Test .", "avoid": ["This is", "test"] }
Make sure you apply all preprocessing (tokenization, BPE, etc.) to both the source and the target-side constraints.
You can then translate this object by passing it to Sockeye on STDIN as follows:
python3 -m sockeye.translate -m /path/to/model --json-input --beam-size 20 --beam-prune 20
Note the recommended Sockeye parameters. Beam pruning isn't needed for negative constraints.
"""
import sys
import json
for line in sys.stdin:
line = line.rstrip()
# Constraints are in fields 2+
source, *restrictions = line.split('\t')
obj = {'text': source}
constraints = []
avoid_list = []
for item in restrictions:
if args.avoid:
avoid_list.append(item)
else:
constraints.append(item)
if constraints:
obj['constraints'] = constraints
if avoid_list:
obj['avoid'] = avoid_list
print(json.dumps(obj, ensure_ascii=False), flush=True)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--avoid', action='store_true', help='Constraints are negative constraints')
args = parser.parse_args()
main(args)
|
|
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Generate stats of CQ usage.'''
import argparse
import calendar
import collections
import datetime
import dateutil.parser
import dateutil.tz
from xml.etree import ElementTree
import infra_libs.logs
import json
import logging
from multiprocessing.pool import ThreadPool
import numbers
import numpy
import re
import simplejson
import subprocess
import sys
import time
import urllib
import urllib2
import urlparse
import requests
from requests.packages import urllib3
STATS_URL = 'http://chromium-cq-status.appspot.com'
# Expects % project.
TREE_STATUS_URL = 'http://%s-status.appspot.com'
PROJECTS = {
'chromium': {
'tree-status': TREE_STATUS_URL % 'chromium',
'type': 'git',
'repo': 'https://chromium.googlesource.com/chromium/src',
},
'blink': {
'tree-status': TREE_STATUS_URL % 'blink',
'type': 'svn',
'repo': 'svn://svn.chromium.org/blink/trunk/',
},
'skia': {
'tree-status': TREE_STATUS_URL % 'skia-tree',
'type': 'git',
'repo': 'https://skia.googlesource.com/skia',
},
}
# Map of intervals to minutes.
INTERVALS = {
'week': 60 * 24 * 7,
'day': 60 * 24,
'hour': 60,
'15min': 15,
}
VALID_REASONS = collections.OrderedDict([
('manual-cancel', {
'item': 'stopped manually',
'message': 'stopped manually (CQ box was unchecked)',
}),
('missing-lgtm', {
'item': 'missed LGTM',
'message': 'are missing LGTM',
}),
('not-lgtm', {
'item': 'NOT LGTMs',
'message': 'have been disapproved (NOT LGTM)',
}),
('failed-patch', {
'item': 'failures',
'message': 'failed to apply patch',
}),
('invalid-delimiter', {
'item': 'errors',
'message': 'have incorrect CQ_EXTRA_TRYBOTS flag',
}),
('failed-presubmit-bot', {
'item': 'failures',
'message': 'failed presubmit bot (often due to missing OWNERS LGTM)',
}),
('failed-remote-ref-presubmit', {
'item': 'failures',
'message': 'did not contain NOTRY & NOPRESUBMIT for non master remote '
'ref',
}),
])
FLAKY_REASONS = collections.OrderedDict([
('failed-commit', {
'item': 'failures',
'message': 'failed to commit',
}),
('failed-jobs', {
'item': 'failures',
'message': 'failed jobs (excluding presubmit)',
}),
('failed-to-trigger', {
'item': 'failures',
'message': 'failed to trigger jobs',
}),
('failed-presubmit-check', {
'item': 'failures',
'message': 'failed presubmit check',
}),
])
KNOWN_REASONS = collections.OrderedDict()
KNOWN_REASONS.update(VALID_REASONS)
KNOWN_REASONS.update(FLAKY_REASONS)
REASONS = collections.OrderedDict()
REASONS.update(KNOWN_REASONS)
REASONS['failed-unknown'] = {
'item': 'failures',
'message': 'failed for any other reason',
}
# These values are buildbot constants used for Build and BuildStep.
# This line was copied from master/buildbot/status/builder.py.
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, TRY_PENDING = range(7)
def parse_args():
parser = argparse.ArgumentParser(description=sys.modules['__main__'].__doc__)
parser.add_argument(
'--project',
required=True,
choices=PROJECTS.keys(),
help='Collect stats about this project.')
parser.add_argument(
'--bot', type=str, dest='bots',
action='append',
default=['[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
'[email protected]',
],
help=('Add an author to be treated as a bot. '
'Repeat to add several bots. Default: %(default)s.'))
parser.add_argument(
'--seq', action='store_true',
help='Run everything sequentially for debugging.')
parser.add_argument(
'--thread-pool', type=int, default=200,
help='Fetch data using this many parallel threads. Default=%(default)s.')
parser.add_argument(
'--list-rejections', action='store_true',
help='List rejected CLs and reasons for rejection.')
parser.add_argument(
'--list-false-rejections', action='store_true',
help='List CLs that were committed in more than one attempt.')
parser.add_argument(
'--list-uncategorized-flakes', action='store_true',
help='List CLs which have not been assigned to existing flake '
'categories.')
parser.add_argument(
'--use-logs', action='store_true',
default=True,
help=('On by default. '
'Fetch the detailed logs and recompute the stats in this script, '
'instead of fetching precomputed stats. '
'Slower, but more accurate, and helps validate the cached stats.'))
parser.add_argument(
'--use-cache',
dest='use_logs',
action='store_false',
help=('Fetch the cached stats from the app. Opposite to --use-logs.'))
parser.add_argument(
'--date',
help='Start date of stats YYYY-MM-DD[ HH[:MM]]. Default: --range ago.')
parser.add_argument('--range',
choices=INTERVALS.keys(),
default='week',
help='Time range to print stats for.')
infra_libs.logs.add_argparse_options(parser, default_level=logging.ERROR)
args = parser.parse_args()
if args.date:
args.date = date_from_string(args.date)
else:
args.date = (datetime.datetime.now() -
datetime.timedelta(minutes=INTERVALS[args.range]))
return args
def date_from_string(iso_str):
try:
return dateutil.parser.parse(iso_str)
except ValueError:
pass
raise ValueError('Unrecognized date/time format: %s' % iso_str)
def date_from_timestamp(timestamp):
return datetime.datetime.utcfromtimestamp(float(timestamp))
def date_from_git(date_str):
"""If date_str is not valid or None, return None."""
if not date_str:
return None
date = None
try:
date = dateutil.parser.parse(date_str)
if date.tzinfo:
# Convert date to UTC timezone.
date = date.astimezone(dateutil.tz.tzutc())
# Make date offset-naive like the other date objects in this module.
date = date.replace(tzinfo=None)
except ValueError:
pass
return date
def local_date_to_timestamp(date):
return time.mktime(date.timetuple())
def local_to_utc(local_time):
timestamp = local_date_to_timestamp(local_time)
utcTime = datetime.datetime.utcfromtimestamp(timestamp)
return utcTime
session = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(
max_retries=urllib3.util.Retry(total=4, backoff_factor=0.5),
pool_block=True)
session.mount('http://', http_adapter)
session.mount('https://', http_adapter)
def fetch_json(url):
return session.get(url).json()
def fetch_tree_status(project, end_date, start_date=None, limit=1000):
"""Fetch all tree events in the given interval.
Args:
project (str): e.g. 'chromium' or 'blink'.
end_date (datetime):
start_date (datetime): define the time interval in local timezone.
limit (int): max. number of events.
Returns:
List of events {'open': bool, 'date': datetime} sorted by date.
"""
end_timestamp = int(time.mktime(end_date.timetuple()))
params = {
'format': 'json',
'limit': limit,
# Confusingly, chromium-status app defines the range as
# endTime <= t < startTime.
'startTime': end_timestamp,
}
if start_date:
params['endTime'] = int(time.mktime(start_date.timetuple()))
query = 'allstatus?' + urllib.urlencode(params)
url = urlparse.urljoin(PROJECTS[project]['tree-status'], query)
logging.debug('Fetching %s', url)
status = fetch_json(url)
# Bug in AE app: it may return events outside of time range.
def entry(event):
date_utc = date_from_string(event['date'])
date_local = date_from_timestamp(calendar.timegm(date_utc.utctimetuple()))
return {'date': date_local,
'open': event['general_state'] == 'open'}
def in_range(entry):
logging.debug('in_range(%r)', entry)
if entry['date'] >= end_date:
return False
if not start_date:
return True
return start_date <= entry['date']
if not status or type(status) is not list:
status = []
status = [entry(event) for event in status]
status = sorted([e for e in status if in_range(e)], key=lambda e: e['date'])
return status
def fetch_git_page(repo_url, cursor=None, page_size=2000):
"""Fetch one page worth of logs from gitiles."""
params = {
'pretty': 'full',
'format': 'JSON',
'n': page_size,
}
if cursor:
params.update({'s': cursor})
url = '%s/%s?%s' % (repo_url, '/+log/master', urllib.urlencode(params))
logging.debug('fetch_git_page: url = %s', url)
try:
# Strip off the anti-XSS string from the response.
response = urllib2.urlopen(url)
lines = [l.rstrip() for l in response if l.rstrip() != ")]}'"]
raw_data = ''.join(lines)
page = json.loads(raw_data)
except (IOError, ValueError) as e:
page = {}
logging.error('Failed to fetch a page: %s', e)
return page
def fetch_git_logs(repo, from_date, to_date):
"""Fetch all logs from Gitiles for the given date range.
Gitiles does not natively support time ranges, so we just fetch
everything until the range is covered. Assume that logs are ordered
in reverse chronological order.
"""
cursor = ''
commit_date = to_date
data = []
while cursor is not None:
page = fetch_git_page(repo, cursor)
logs = page.get('log', [])
cursor = page.get('next')
for log in logs:
committer = log.get('committer', {})
commit_date = date_from_git(committer.get('time'))
if not commit_date:
continue
if commit_date > to_date:
continue
if commit_date < from_date:
break
data.append({
'author': log.get('author', {}).get('email'),
'date': commit_date,
'commit-bot': bool('commit-bot' in committer.get('email', '')),
'revision': log.get('commit'),
})
if commit_date < from_date:
break
return data
def fetch_svn_logs(repo, from_date, to_date):
from_date = local_to_utc(from_date)
to_date = local_to_utc(to_date)
range_str = (
'{%s +0000}:{%s +0000}' % (from_date, to_date))
out = subprocess.check_output(
['svn', 'log', '--with-all-revprops', '--xml', repo, '-r', range_str])
data = []
for logentry in ElementTree.XML(out).findall('logentry'):
date_str = logentry.find('date').text
date = date_from_string(date_str)
entry = {
'author': logentry.find('author').text,
'date': date,
'revprops': {},
'commit-bot': False,
}
revprops = logentry.find('revprops')
if revprops is not None:
for revprop in revprops.findall('property'):
entry['revprops'][revprop.attrib['name']] = revprop.text
if revprop.attrib['name'] == 'commit-bot':
entry['commit-bot'] = True
data.append(entry)
return data
def fetch_stats(args, begin_date=None, stats_range=None):
if not begin_date:
begin_date = args.date
if not stats_range:
stats_range = args.range
if begin_date:
timestamp = (int(local_date_to_timestamp(begin_date)) +
INTERVALS[stats_range] * 60)
else:
timestamp = int(time.time())
params = {
'project': args.project,
'interval_minutes': INTERVALS[stats_range],
'end': timestamp,
'count': 2, # Fetch requested and previous set, for comparison.
}
query = 'stats/query?' + urllib.urlencode(params)
url = urlparse.urljoin(STATS_URL, query)
logging.debug('Fetching %s', url)
return fetch_json(url)
# "Dangerous default value []": pylint: disable=W0102
def fetch_cq_logs(start_date=None, end_date=None, filters=[]):
begin_time = None
end_time = None
if start_date:
begin_time = int(time.mktime(start_date.timetuple()))
if end_date:
end_time = int(time.mktime(end_date.timetuple()))
results = []
cursor = None
while True:
params = {}
if begin_time:
params['begin'] = begin_time
if end_time:
params['end'] = end_time
if cursor:
params['cursor'] = cursor
query = 'query/%s?%s' % ('/'.join(filters), urllib.urlencode(params))
url = urlparse.urljoin(STATS_URL, query)
logging.debug('Fetching %s', url)
data = fetch_json(url)
results.extend(data.get('results', []))
logging.info('fetch_cq_logs: Got %d results', len(results))
logging.debug(' %s', '\n '.join(['%s %s' % (
patch_url((r.get('fields', {}).get('issue', 0),
r.get('fields', {}).get('patchset', 0))),
r.get('fields', {}).get('action', '')) for r in results]))
cursor = data.get('cursor', None)
if not data.get('more', False) or not cursor:
break
return results
def default_stats():
"""Generate all the required stats fields with default values."""
stats = {
'begin': datetime.datetime.now(),
'end': datetime.datetime(1, 1, 1),
'issue-count': 0,
'patchset-count': 0,
'attempt-count': 0,
'patch_stats': {},
'patchset-false-reject-count': 0, # Deprecated stats?
'attempt-reject-count': 0, # Num. of rejected attempts
'attempt-false-reject-count': 0, # Num. of falsely rejected attempts
'false-rejections': [], # patches with falsely rejected attempts
'rejections': [], # patches with rejected attempts
'rejected-patches': set(), # Patches that never committed
'patchset-commit-count': 0,
'patchset-total-commit-queue-durations': derive_list_stats([0]),
'patchset-durations': derive_list_stats([0]),
'patchset-committed-durations': derive_list_stats([0]),
'patchset-attempts': derive_list_stats([0]),
'patchset-committed-attempts': derive_list_stats([0]),
'patchset-committed-tryjob-retries': derive_list_stats([0]),
'patchset-committed-global-retry-quota': derive_list_stats([0]),
'tree': {'open': 0.0, 'total': 0.0},
'usage': {},
}
for reason in REASONS:
stats[reason] = []
return stats
def organize_stats(stats, init=None):
"""Changes cached lists of stats into dictionaries.
Args:
stats (dict): set of stats as returned by chromium-cq-status.
Returns:
result (dict): mapping stat.name -> <stats json>. If init is given,
add to those stats rather than compute them from scratch.
"""
if 'results' not in stats:
return None
result = init if init else default_stats()
for dataset in stats['results']:
result['begin'] = min(
date_from_timestamp(dataset['begin']),
result.get('begin', datetime.datetime.now()))
result['end'] = max(date_from_timestamp(dataset['end']), result['end'])
for data in dataset['stats']:
if data['type'] == 'count':
result[data['name']] = data['count']
else:
assert data['type'] == 'list'
result[data['name']] = {
'10': data['percentile_10'],
'25': data['percentile_25'],
'50': data['percentile_50'],
'75': data['percentile_75'],
'90': data['percentile_90'],
'95': data['percentile_95'],
'99': data['percentile_99'],
'min': data['min'],
'max': data['max'],
'mean': data['mean'],
'size': data['sample_size'],
}
return result
def derive_list_stats(series):
if not series:
series = [0]
return {
'10': numpy.percentile(series, 10),
'25': numpy.percentile(series, 25),
'50': numpy.percentile(series, 50),
'75': numpy.percentile(series, 75),
'90': numpy.percentile(series, 90),
'95': numpy.percentile(series, 95),
'99': numpy.percentile(series, 99),
'min': min(series),
'max': max(series),
'mean': numpy.mean(series),
'size': len(series),
'raw': series,
}
def sort_by_count(elements):
return sorted(elements, key=lambda p: p['count'], reverse=True)
def stats_by_count_entry(patch_stats, name, patch, reasons):
entry = {
'count': patch_stats[name],
'patch_id': patch,
'failed-jobs-details': patch_stats['failed-jobs-details']
}
for n in reasons:
if n in patch_stats:
entry[n] = patch_stats[n]
assert type(entry[n]) is int, 'Bad type in %s[%s]: %r\nEntry=%r' % (
patch, n, entry[n], entry)
return entry
# "Dangerous default value []": pylint: disable=W0102
def stats_by_count(patch_stats, name, reasons=[]):
return sort_by_count([
stats_by_count_entry(patch_stats[p], name, p, reasons)
for p in patch_stats if patch_stats[p].get(name)])
def _derive_stats_from_patch_stats(stats):
patch_stats = stats['patch_stats']
stats['attempt-count'] = sum(
patch_stats[p]['attempts'] for p in patch_stats)
stats['patchset-false-reject-count'] = sum(
patch_stats[p]['false-rejections'] for p in patch_stats)
stats['attempt-reject-count'] = sum(
patch_stats[p]['rejections'] for p in patch_stats)
stats['rejected-patches'] = set(
p for p in patch_stats if not patch_stats[p]['committed'])
stats['false-rejections'] = stats_by_count(
patch_stats, 'false-rejections', REASONS)
stats['rejections'] = stats_by_count(patch_stats, 'rejections', REASONS)
for r in REASONS:
stats[r] = stats_by_count(patch_stats, r, set(REASONS) - set([r]))
stats['patchset-commit-count'] = len([
p for p in patch_stats if patch_stats[p]['committed']])
stats['patchset-total-commit-queue-durations'] = derive_list_stats([
patch_stats[p]['patchset-duration-wallclock'] for p in patch_stats])
stats['patchset-durations'] = derive_list_stats([
patch_stats[p]['patchset-duration'] for p in patch_stats])
stats['patchset-committed-durations'] = derive_list_stats([
patch_stats[p]['patchset-duration'] for p in patch_stats
if patch_stats[p]['committed']])
stats['patchset-attempts'] = derive_list_stats([
patch_stats[p]['attempts'] for p in patch_stats])
stats['patchset-committed-attempts'] = derive_list_stats([
patch_stats[p]['attempts'] for p in patch_stats
if patch_stats[p]['committed']])
stats['patchset-committed-tryjob-retries'] = derive_list_stats([
patch_stats[p]['tryjob-retries'] for p in patch_stats
if patch_stats[p]['committed']])
stats['patchset-committed-global-retry-quota'] = derive_list_stats([
patch_stats[p]['global-retry-quota'] for p in patch_stats
if patch_stats[p]['committed']])
def derive_stats(args, begin_date, init_stats=None):
"""Process raw CQ updates log and derive stats.
Fetches raw CQ events and returns the same format as organize_stats().
If ``init_stats`` are given, preserve the jobs stats and replace the
other stats.
"""
stats = init_stats or default_stats()
filters = ['project=%s' % args.project, 'action=patch_stop']
end_date = begin_date + datetime.timedelta(minutes=INTERVALS[args.range])
results = fetch_cq_logs(begin_date, end_date, filters=filters)
if not results:
return stats
stats['begin'] = date_from_timestamp(results[-1]['timestamp'])
stats['end'] = date_from_timestamp(results[0]['timestamp'])
# Create map issue:patchset -> #attempts
patches, issues = set(), set()
for reason in results:
issues.add(reason['fields']['issue'])
patches.add((reason['fields']['issue'], reason['fields']['patchset']))
stats['issue-count'] = len(issues)
stats['patchset-count'] = len(patches)
patch_stats = {}
# Fetch and process each patchset log
def get_patch_stats(patch_id):
return derive_patch_stats(begin_date, end_date, patch_id)
if args.seq or not args.thread_pool:
iterable = map(get_patch_stats, patches)
else:
pool = ThreadPool(min(args.thread_pool, len(patches)))
iterable = pool.imap_unordered(get_patch_stats, patches)
for patch_id, pstats in iterable:
if not pstats['supported']:
continue
patch_stats[patch_id] = pstats
stats['patch_stats'] = patch_stats
_derive_stats_from_patch_stats(stats)
return stats
def patch_url(patch_id):
return '%s/patch-status/%s/%s' % ((STATS_URL,) + patch_id)
def parse_json(obj, return_type=None):
"""Attempt to interpret a string as JSON.
Guarantee the return type if given, pass through otherwise.
"""
result = obj
if type(obj) in [str, unicode]:
try:
result = json.loads(obj)
except ValueError as e:
logging.error('Could not decode json in "%s": %s', obj, e)
# If the type is wrong, return an empty object of the correct type.
# In most cases, casting to the required type will not work anyway
# (e.g. list to dict).
if return_type and type(result) is not return_type:
result = return_type()
return result
def parse_failing_tryjobs(message):
"""Parse the message to extract failing try jobs."""
builders = []
msg_lines = message.splitlines()
for line in msg_lines[1:]:
words = line.split(None, 1)
if not words:
continue
builder = words[0]
builders.append(builder)
return builders
def derive_patch_stats(begin_date, end_date, patch_id):
"""``patch_id`` is a tuple (issue, patchset)."""
results = fetch_cq_logs(start_date=begin_date, end_date=end_date, filters=[
'issue=%s' % patch_id[0], 'patchset=%s' % patch_id[1]])
# The results should already ordered, but sort it again just to be sure.
results = sorted(results, key=lambda r: r['timestamp'], reverse=True)
logging.debug('derive_patch_stats(%r): fetched %d entries.',
patch_id, len(results))
# Group by attempts
attempts = []
def new_attempt():
attempt_empty = {
'id': 0,
'begin': 0.0,
'end': 0.0,
'duration': 0.0,
'actions': [],
'committed': False,
'reason': {},
'tryjob-retries': 0,
'global-retry-quota': 0,
'supported': True,
}
for reason in REASONS:
attempt_empty[reason] = False
return attempt_empty
def add_attempt(attempt, counter):
"""Create a new attempt from accumulated actions."""
assert attempt['actions']
attempt['id'] = counter
attempt['duration'] = attempt['end'] - attempt['begin']
known_reasons = [r for r in KNOWN_REASONS if attempt[r]]
if not attempt['committed'] and not known_reasons:
attempt['failed-unknown'] = True
logging.debug(
'add_attempt: #%d (%s)',
attempt['id'],
', '.join([r for r in REASONS if attempt[r]]))
attempts.append(attempt)
# An attempt is a set of actions between patch_start and patch_stop
# actions. Repeated patch_start / patch_stop actions are ignored.
attempt = new_attempt()
failing_builders = {}
state = 'stop'
attempt_counter = 0
for result in reversed(results):
action = result['fields'].get('action')
verifier = result['fields'].get('verifier')
dry_run = result['fields'].get('dry_run')
if state == 'stop':
if action == 'patch_start' and not dry_run:
state = 'start'
attempt['begin'] = result['timestamp']
if state != 'start':
continue
attempt['actions'].append(result)
if action == 'patch_stop':
attempt['end'] = result['timestamp']
message = result['fields'].get('message', '')
if 'CQ bit was unchecked on CL' in message:
attempt['manual-cancel'] = True
if 'No LGTM' in message:
attempt['missing-lgtm'] = True
if 'A disapproval has been posted' in message:
attempt['not-lgtm'] = True
if 'Transient error: Invalid delimiter' in message:
attempt['invalid-delimiter'] = True
if 'Failed to commit' in message:
attempt['failed-commit'] = True
if('Failed to apply patch' in message or
'Failed to apply the patch' in message):
attempt['failed-patch'] = True
if 'Presubmit check' in message:
attempt['failed-presubmit-check'] = True
if 'CLs for remote refs other than refs/heads/master' in message:
attempt['failed-remote-ref-presubmit'] = True
if 'Try jobs failed' in message:
if 'presubmit' in message:
attempt['failed-presubmit-bot'] = True
else:
attempt['failed-jobs'] = message
builders = parse_failing_tryjobs(message)
for b in builders:
failing_builders.setdefault(b, 0)
failing_builders[b] += 1
if 'Exceeded time limit waiting for builds to trigger' in message:
attempt['failed-to-trigger'] = True
attempt_counter += 1
add_attempt(attempt, attempt_counter)
attempt = new_attempt()
state = 'stop'
continue
if action == 'patch_committed':
attempt['committed'] = True
# TODO(sergeyberezin): enable this after this action is stable in CQ.
if action == 'patch_failed':
attempt['reason'] = parse_json(
result['fields'].get('reason', {}), return_type=dict)
logging.info('Attempt reason: %r', attempt['reason'])
if attempt['reason'].get('fail_type') == 'reviewer_lgtm':
attempt['missing-lgtm'] = True
if attempt['reason'].get('fail_type') == 'commit':
attempt['failed-commit'] = True
if attempt['reason'].get('fail_type') == 'simple try job':
failed_jobs = parse_json(attempt['reason'].get(
'fail_details', [('unknown_master', 'unknown_bot')]))
# Remove presubmit bot - it's accounted separately.
failed_jobs = [j for j in failed_jobs if 'presubmit' in j[1]]
attempt['failed-jobs'] = failed_jobs
if action == 'verifier_custom_trybots':
attempt['supported'] = False
if action == 'verifier_retry':
attempt['tryjob-retries'] += 1
if verifier == 'try job' and action in ('verifier_pass', 'verifier_fail'):
# There should be only one pass or fail per attempt. In case there are
# more (e.g. due to CQ being stateless), just take the maximum seen value.
attempt['global-retry-quota'] = max(
attempt['global-retry-quota'],
result['fields'].get('global_retry_quota', 0))
stats = {}
committed_set = set(a['id'] for a in attempts if a['committed'])
stats['committed'] = len(committed_set)
stats['attempts'] = len(attempts)
stats['rejections'] = stats['attempts'] - stats['committed']
stats['supported'] = all(a['supported'] for a in attempts)
logging.info('derive_patch_stats: %s has %d attempts, committed=%d',
patch_url(patch_id), len(attempts), stats['committed'])
valid_reasons_set = set()
for reason in VALID_REASONS:
s = set(a['id'] for a in attempts if a[reason])
stats[reason] = len(s)
valid_reasons_set.update(s)
for reason in set(REASONS) - set(VALID_REASONS):
stats[reason] = len(set(a['id'] for a in attempts if a[reason]))
# Populate failed builders.
stats['failed-jobs-details'] = failing_builders
stats['false-rejections'] = 0
if stats['committed']:
stats['false-rejections'] = len(
set(a['id'] for a in attempts) - committed_set - valid_reasons_set)
# Sum of attempt duration.
stats['patchset-duration'] = sum(a['duration'] for a in attempts)
if attempts:
stats['patchset-duration-wallclock'] = (
attempts[-1]['end'] - attempts[0]['begin'])
else:
stats['patchset-duration-wallclock'] = 0.0
stats['tryjob-retries'] = sum(a['tryjob-retries'] for a in attempts)
stats['global-retry-quota'] = sum(a['global-retry-quota'] for a in attempts)
return patch_id, stats
def derive_tree_stats(project, start_date, end_date):
"""Given a list of tree status events, derive tree closure stats."""
# Fetch one more event right before the range, so we know the
# initial tree status.
status = (fetch_tree_status(project, end_date=start_date, limit=1) +
fetch_tree_status(project, end_date, start_date))
stats = {'open': 0.0, 'total': (end_date - start_date).total_seconds()}
if not status:
return stats
logging.debug('Tree status:\n%s', '\n'.join([' %r' % e for e in status]))
is_open = status[0]['open']
curr_date = start_date
for event in status[1:]:
delta = event['date'] - curr_date
if is_open and not event['open']:
stats['open'] += delta.total_seconds()
logging.debug('Tree was open from %s to %s for %s (total of %s)',
curr_date, event['date'],
delta, datetime.timedelta(seconds=stats['open']))
if not is_open:
curr_date = event['date']
is_open = event['open']
# Account for the remaining time after the last event.
if is_open:
delta = end_date - curr_date
stats['open'] += delta.total_seconds()
logging.debug('Tree was open from %s to %s for %s (total of %s)',
curr_date, end_date,
delta, datetime.timedelta(seconds=stats['open']))
return stats
def derive_log_stats(log_data, bots):
# Calculate stats.
cq_commits = [v for v in log_data if v['commit-bot']]
users = {}
for commit in cq_commits:
users[commit['author']] = users.get(commit['author'], 0) + 1
committers = {}
manual_committers = {}
bot_committers = {}
bot_manual_committers = {}
for commit in log_data:
committers[commit['author']] = committers.get(commit['author'], 0) + 1
if not commit['commit-bot']:
manual_committers[commit['author']] = manual_committers.get(
commit['author'], 0) + 1
if commit['author'] in bots:
bot_committers[commit['author']] = bot_committers.get(
commit['author'], 0) + 1
if not commit['commit-bot']:
bot_manual_committers[commit['author']] = bot_manual_committers.get(
commit['author'], 0) + 1
stats = {}
stats['cq_commits'] = len(cq_commits)
stats['total_commits'] = len(log_data)
stats['users'] = len(users)
stats['committers'] = len(committers)
stats['manual_committers'] = len(manual_committers)
stats['manual_commits'] = sum(x for x in manual_committers.itervalues())
stats['bot_committers'] = len(bot_committers)
stats['bot_commits'] = sum(x for x in bot_committers.itervalues())
stats['bot_manual_commits'] = sum(
x for x in bot_manual_committers.itervalues())
stats['manual_only_committers'] = {
a: c for a, c in committers.iteritems()
if c == manual_committers.get(a, 0)}
return stats
def derive_git_stats(project, start_date, end_date, bots):
log_data = fetch_git_logs(PROJECTS[project]['repo'], start_date, end_date)
return derive_log_stats(log_data, bots)
def derive_svn_stats(project, start_date, end_date, bots):
log_data = fetch_svn_logs(PROJECTS[project]['repo'], start_date, end_date)
return derive_log_stats(log_data, bots)
def percentage_tuple(data, total):
num_data = data if isinstance(data, numbers.Number) else len(data)
num_total = total if isinstance(total, numbers.Number) else len(total)
percent = 100. * num_data / num_total if num_total else 0.
return num_data, num_total, percent
def percentage(data, total):
return percentage_tuple(data, total)[2]
def round_timedelta(seconds):
# We never care about the milliseconds when printing timedeltas:
return datetime.timedelta(seconds=round(seconds))
def output(fmt='', *args):
"""An equivalent of print to mock out in testing."""
print fmt % args
def print_attempt_counts(stats, name, message, item_name='',
details=False, committed=None, indent=0,
print_zero=False):
"""Print a summary of a ``name`` slice of attempts.
|committed|: None=print all, True=only committed patches, False=only
rejected patches.
|print_zero|: print stats even if no attempts match."""
if not item_name:
item_name = message
patches = [
p for p in stats[name]
if committed is None or
bool(stats['patch_stats'][p['patch_id']]['committed']) is committed]
count = sum(p['count'] for p in patches)
if not print_zero and not count:
return
failing_builders = {}
for p in patches:
for b, cnt in p['failed-jobs-details'].iteritems():
failing_builders.setdefault(b, {})
failing_builders[b][p['patch_id']] = cnt
indent_str = ''.join(' ' for _ in range(indent))
if message.startswith('failed jobs'):
output(
'%s%4d attempt%s (%.1f%% of %d attempts) %s: %d in %d%s patches',
indent_str, count, ' ' if count == 1 else 's',
percentage(count, stats['attempt-count']),
stats['attempt-count'],
message,
sum(sum(d.values()) for d in failing_builders.values()),
len(patches),
{True: ' committed', False: ' uncommitted'}.get(committed, ''))
else:
output(
'%s%4d attempt%s (%.1f%% of %d attempts) %s in %d%s patches',
indent_str, count, ' ' if count == 1 else 's',
percentage(count, stats['attempt-count']),
stats['attempt-count'],
message,
len(patches),
{True: ' committed', False: ' uncommitted'}.get(committed, ''))
if details:
lines = []
for p in patches:
line = ' %d %s %s' % (
p['count'], item_name, patch_url(p['patch_id']))
causes = ['%d %s' % (p['failed-jobs-details'][k], k)
for k in p['failed-jobs-details']]
line += ' (%s)' % ', '.join(causes)
lines.append(line)
output('\n'.join(lines))
output()
def print_duration(name, stats, print_name=None):
if not print_name:
print_name = name.capitalize()
cq_only = round_timedelta(stats['patchset-durations'][name])
wallclock = round_timedelta(
stats['patchset-total-commit-queue-durations'][name])
output('\n%s duration in CQ trying a patch:', print_name)
output(
' wallclock: %8s (%3d min).',
wallclock, round(wallclock.total_seconds() / 60.0))
output(
' sum of attempts: %8s (%3d min).',
cq_only, round(cq_only.total_seconds() / 60.0))
def print_usage(stats):
if not stats['usage']:
return
output()
output(
'CQ users: %6d out of %6d total committers %6.2f%%',
stats['usage']['users'], stats['usage']['committers'],
percentage(stats['usage']['users'], stats['usage']['committers']))
fmt_str = (
' Committed %6d out of %6d commits %6.2f%%. ')
data = percentage_tuple(stats['usage']['cq_commits'],
stats['usage']['total_commits'])
if stats['usage']['bot_manual_commits']:
fmt_str += ' (%6.2f%% by humans)'
data += (percentage(stats['usage']['cq_commits'],
stats['usage']['total_commits'] -
stats['usage']['bot_manual_commits']),)
output(fmt_str, *data)
output()
output('Bots: %6d out of %6d total committers %6.2f%%',
*percentage_tuple(stats['usage']['bot_committers'],
stats['usage']['committers']))
output(' Committed by CQ %6d out of %6d commits %6.2f%%',
*percentage_tuple(stats['usage']['bot_commits'],
stats['usage']['total_commits']))
output(' Committed directly %6d out of %6d commits %6.2f%%',
*percentage_tuple(stats['usage']['bot_manual_commits'],
stats['usage']['total_commits']))
output()
output('Manual committers: %6d out of all %6d users %6.2f%%',
*percentage_tuple(stats['usage']['manual_committers'],
stats['usage']['committers']))
output(' Committed %6d out of %6d commits %6.2f%%',
*percentage_tuple(stats['usage']['manual_commits'],
stats['usage']['total_commits']))
def print_tree_status(stats):
output()
output(
'Total time tree open: %.1f hours of %.1f hours (%.2f%%). ',
stats['tree']['open'] / 3600.0,
stats['tree']['total'] / 3600.0,
percentage(stats['tree']['open'], stats['tree']['total']))
def print_flakiness_stats(args, stats):
def get_flakiness_stats(issue_patchset):
issue, patchset = issue_patchset
try:
try_job_results = fetch_json(
'https://codereview.chromium.org/api/%d/%d/try_job_results' % (
issue, patchset))
except simplejson.JSONDecodeError as e:
# This can happen e.g. for private issues where we can't fetch the JSON
# without authentication.
logging.warn('%r (issue:%d, patchset:%d)', e, issue, patchset)
return {}
result_counts = {}
uncategorized_flakes = collections.defaultdict(list)
for result in try_job_results:
master, builder = result['master'], result['builder']
build_properties = json.loads(result.get('build_properties', '{}'))
result_counts.setdefault((master, builder), {
'successes': 0,
'failures': 0,
'infra_failures': 0,
'compile_failures': 0,
'test_failures': 0,
'invalid_results_failures': 0,
'patch_failures': 0,
'other_failures': 0,
})
if result['result'] in (SUCCESS, WARNINGS):
result_counts[(master, builder)]['successes'] += 1
elif result['result'] in (FAILURE, EXCEPTION):
result_counts[(master, builder)]['failures'] += 1
if result['result'] == EXCEPTION:
result_counts[(master, builder)]['infra_failures'] += 1
elif build_properties.get('failure_type') == 'COMPILE_FAILURE':
result_counts[(master, builder)]['compile_failures'] += 1
elif build_properties.get('failure_type') == 'TEST_FAILURE':
result_counts[(master, builder)]['test_failures'] += 1
elif build_properties.get('failure_type') == 'INVALID_TEST_RESULTS':
result_counts[(master, builder)]['invalid_results_failures'] += 1
elif build_properties.get('failure_type') == 'PATCH_FAILURE':
result_counts[(master, builder)]['patch_failures'] += 1
else:
result_counts[(master, builder)]['other_failures'] += 1
uncategorized_flakes[(master, builder)].append(result)
try_job_stats = {}
for master, builder in result_counts.iterkeys():
total = (result_counts[(master, builder)]['successes'] +
result_counts[(master, builder)]['failures'])
flakes = 0
if result_counts[(master, builder)]['successes'] > 0:
flakes = result_counts[(master, builder)]['failures']
try_job_stats[(master, builder)] = {
'total': total,
'flakes': flakes,
'infra_failures': result_counts[(master, builder)][
'infra_failures'] if flakes else 0,
'compile_failures': result_counts[(master, builder)][
'compile_failures'] if flakes else 0,
'test_failures': result_counts[(master, builder)][
'test_failures'] if flakes else 0,
'invalid_results_failures': result_counts[(master, builder)][
'invalid_results_failures'] if flakes else 0,
'patch_failures': result_counts[(master, builder)][
'patch_failures'] if flakes else 0,
'other_failures': result_counts[(master, builder)][
'other_failures'] if flakes else 0,
'uncategorized_flakes': uncategorized_flakes.get(
(master, builder), []) if flakes else [],
}
return try_job_stats
if args.seq or not args.thread_pool:
iterable = map(get_flakiness_stats, stats['patch_stats'].keys())
else:
pool = ThreadPool(min(args.thread_pool, len(stats['patch_stats'].keys())))
iterable = pool.imap_unordered(
get_flakiness_stats, stats['patch_stats'].keys())
try_job_stats = {}
for result in iterable:
for master, builder in result.iterkeys():
keys = (
'total',
'flakes',
'infra_failures',
'compile_failures',
'test_failures',
'invalid_results_failures',
'patch_failures',
'other_failures'
)
init_dict = {key: 0 for key in keys}
init_dict['uncategorized_flakes'] = []
try_job_stats.setdefault((master, builder), init_dict)
for key in keys:
try_job_stats[(master, builder)][key] += result[(master, builder)][key]
try_job_stats[(master, builder)]['uncategorized_flakes'].extend(
result[(master, builder)]['uncategorized_flakes'])
output()
output('Top flaky builders (which fail and succeed in the same patch):')
def flakiness(master_builder):
return percentage(try_job_stats[master_builder]['flakes'],
try_job_stats[master_builder]['total'])
builders = sorted(try_job_stats.iterkeys(), key=flakiness, reverse=True)
format_string = '%-15s %-55s %-16s|%-7s|%-7s|%-7s|%-7s|%-7s|%-7s'
output(format_string,
'Master', 'Builder', 'Flakes',
'Infra', 'Compile', 'Test', 'Invalid', 'Patch', 'Other')
for master_builder in builders:
master, builder = master_builder
output(format_string,
master.replace('tryserver.', ''), builder,
'%4d/%4d (%3.0f%%)' % (try_job_stats[master_builder]['flakes'],
try_job_stats[master_builder]['total'],
flakiness(master_builder)),
'%6.0f%%' % percentage(
try_job_stats[master_builder]['infra_failures'],
try_job_stats[master_builder]['flakes']),
'%6.0f%%' % percentage(
try_job_stats[master_builder]['compile_failures'],
try_job_stats[master_builder]['flakes']),
'%6.0f%%' % percentage(
try_job_stats[master_builder]['test_failures'],
try_job_stats[master_builder]['flakes']),
'%6.0f%%' % percentage(
try_job_stats[master_builder]['invalid_results_failures'],
try_job_stats[master_builder]['flakes']),
'%6.0f%%' % percentage(
try_job_stats[master_builder]['patch_failures'],
try_job_stats[master_builder]['flakes']),
'%6.0f%%' % percentage(
try_job_stats[master_builder]['other_failures'],
try_job_stats[master_builder]['flakes']))
if args.list_uncategorized_flakes:
uncategorized_flakes = try_job_stats[
master_builder]['uncategorized_flakes']
if uncategorized_flakes:
output(' Uncategorized flakes:')
for result in uncategorized_flakes:
output(' %s' % result['url'])
def print_stats(args, stats):
if not stats:
output('No stats to display.')
return
output('Statistics for project %s', args.project)
if stats['begin'] > stats['end']:
output(' No stats since %s', args.date)
return
output('from %s till %s (local time).',
stats['begin'], stats['end'])
print_usage(stats)
print_tree_status(stats)
output()
output(
'%4d issues (%d patches) were tried by CQ, '
'resulting in %d attempts.',
stats['issue-count'], stats['patchset-count'], stats['attempt-count'])
output(
'%4d patches (%.1f%% of tried patches, %.1f%% of attempts) '
'were committed by CQ,',
stats['patchset-commit-count'],
percentage(stats['patchset-commit-count'], stats['patchset-count']),
percentage(stats['patchset-commit-count'], stats['attempt-count']))
output()
output('Rejections:')
print_attempt_counts(stats, 'rejections', 'were unsuccessful',
item_name='failures',
committed=False)
output(' This includes:')
for reason in REASONS:
print_attempt_counts(stats, reason, REASONS[reason]['message'], indent=2,
details=args.list_rejections,
item_name=REASONS[reason]['item'], committed=False)
# TODO(sergeyberezin): add gave up count (committed manually after trying CQ).
# TODO(sergeyberezin): add count of NOTRY=true (if possible).
output()
output('False Rejections:')
if args.use_logs:
print_attempt_counts(stats, 'false-rejections', 'were false rejections',
item_name='flakes', committed=True)
else:
output(
' %4d attempts (%.1f%% of %d attempts) were false rejections',
stats['attempt-false-reject-count'],
percentage(stats['attempt-false-reject-count'],
stats['attempt-count']),
stats['attempt-count'])
output(' False rejections include:')
for reason in FLAKY_REASONS.keys() + ['failed-unknown']:
print_attempt_counts(stats, reason, REASONS[reason]['message'], indent=2,
item_name=REASONS[reason]['item'], committed=True,
details=args.list_false_rejections)
output(' Other rejections in committed patches for valid reasons:')
for reason in VALID_REASONS.keys():
print_attempt_counts(stats, reason, REASONS[reason]['message'], indent=2,
item_name=REASONS[reason]['item'], committed=True,
details=args.list_false_rejections)
print_duration('mean', stats)
print_duration('50', stats, 'Median')
output()
output('Patches which eventually land percentiles:')
for p in ['10', '25', '50', '75', '90', '95', '99', 'max']:
output('%3s: %4.1f hrs, %2d attempts, %2d tryjob retries, '
'%2d global retry quota',
p, stats['patchset-committed-durations'][p] / 3600.0,
stats['patchset-committed-attempts'][p],
stats['patchset-committed-tryjob-retries'][p],
stats['patchset-committed-global-retry-quota'][p])
output()
output('Slowest CLs:')
slowest_cls = sorted(
stats['patch_stats'],
key=lambda p: stats['patch_stats'][p]['patchset-duration'],
reverse=True)
for p in slowest_cls[:40]:
output('%s (%s hrs)' % (
patch_url(p),
round(stats['patch_stats'][p]['patchset-duration'] / 3600.0, 1)))
print_flakiness_stats(args, stats)
def acquire_stats(args, add_tree_stats=True):
stats = {}
logging.info('Acquiring stats for project %s for a %s of %s using %s',
args.project, args.range, args.date,
'logs' if args.use_logs else 'cache')
end_date = args.date + datetime.timedelta(minutes=INTERVALS[args.range])
if args.use_logs:
init_stats = default_stats()
assert args.date
# For weekly stats, collect job flakiness from daily cached stats.
if args.range == 'week':
for day in range(7):
d = args.date + datetime.timedelta(minutes=INTERVALS['day']*day)
raw_stats = fetch_stats(args, d, 'day')
init_stats = organize_stats(raw_stats, init=init_stats)
elif args.range == 'day':
for hour in range(24):
d = args.date + datetime.timedelta(minutes=INTERVALS['hour']*hour)
raw_stats = fetch_stats(args, d, 'hour')
init_stats = organize_stats(raw_stats, init=init_stats)
else:
init_stats = organize_stats(fetch_stats(args))
stats = derive_stats(
args, args.date, init_stats=init_stats)
else:
stats = organize_stats(fetch_stats(args))
if add_tree_stats:
stats['tree'] = derive_tree_stats(args.project, args.date, end_date)
if PROJECTS[args.project]['type'] == 'git':
stats['usage'] = derive_git_stats(
args.project, args.date, end_date, args.bots)
else:
stats['usage'] = derive_svn_stats(
args.project, args.date, end_date, args.bots)
return stats
def main():
args = parse_args()
logger = logging.getLogger()
# TODO(sergeyberezin): how do I derive local timezone string?
# Need to be able to pass dateutil.tz.tzlocal() directly.
infra_libs.logs.process_argparse_options(args, logger)
stats = acquire_stats(args)
print_stats(args, stats)
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Dump the state of the world for post mortem."""
import argparse
import datetime
from distutils import spawn
import fnmatch
import os
import os.path
import subprocess
import sys
GMR_PROCESSES = (
'nova-compute',
'neutron-dhcp-agent',
'neutron-l3-agent',
'neutron-linuxbridge-agent',
'neutron-metadata-agent',
'neutron-openvswitch-agent',
'cinder-volume',
)
def get_options():
parser = argparse.ArgumentParser(
description='Dump world state for debugging')
parser.add_argument('-d', '--dir',
default='.',
help='Output directory for worlddump')
parser.add_argument('-n', '--name',
default='',
help='Additional name to tag into file')
return parser.parse_args()
def filename(dirname, name=""):
now = datetime.datetime.utcnow()
fmt = "worlddump-%Y-%m-%d-%H%M%S"
if name:
fmt += "-" + name
fmt += ".txt"
return os.path.join(dirname, now.strftime(fmt))
def warn(msg):
print("WARN: %s" % msg)
def _dump_cmd(cmd):
print(cmd)
print("-" * len(cmd))
print()
try:
subprocess.check_call(cmd, shell=True)
print()
except subprocess.CalledProcessError as e:
print("*** Failed to run '%(cmd)s': %(err)s" % {'cmd': cmd, 'err': e})
def _find_cmd(cmd):
if not spawn.find_executable(cmd):
print("*** %s not found: skipping" % cmd)
return False
return True
def _header(name):
print()
print(name)
print("=" * len(name))
print()
def _bridge_list():
process = subprocess.Popen(['sudo', 'ovs-vsctl', 'list-br'],
stdout=subprocess.PIPE)
stdout, _ = process.communicate()
return stdout.split()
# This method gets a max openflow version supported by openvswitch.
# For example 'ovs-ofctl --version' displays the following:
#
# ovs-ofctl (Open vSwitch) 2.0.2
# Compiled Dec 9 2015 14:08:08
# OpenFlow versions 0x1:0x4
#
# The above shows that openvswitch supports from OpenFlow10 to OpenFlow13.
# This method gets max version searching 'OpenFlow versions 0x1:0x'.
# And return a version value converted to an integer type.
def _get_ofp_version():
process = subprocess.Popen(['ovs-ofctl', '--version'], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
find_str = 'OpenFlow versions 0x1:0x'
offset = stdout.find(find_str)
return int(stdout[offset + len(find_str):-1]) - 1
def disk_space():
# the df output
_header("File System Summary")
dfraw = os.popen("df -Ph").read()
df = [s.split() for s in dfraw.splitlines()]
for fs in df:
try:
if int(fs[4][:-1]) > 95:
warn("Device %s (%s) is %s full, might be an issue" % (
fs[0], fs[5], fs[4]))
except ValueError:
# if it doesn't look like an int, that's fine
pass
print(dfraw)
def ebtables_dump():
tables = ['filter', 'nat', 'broute']
_header("EB Tables Dump")
if not _find_cmd('ebtables'):
return
for table in tables:
_dump_cmd("sudo ebtables -t %s -L" % table)
def iptables_dump():
tables = ['filter', 'nat', 'mangle']
_header("IP Tables Dump")
for table in tables:
_dump_cmd("sudo iptables --line-numbers -L -nv -t %s" % table)
def _netns_list():
process = subprocess.Popen(['ip', 'netns'], stdout=subprocess.PIPE)
stdout, _ = process.communicate()
return stdout.split()
def network_dump():
_header("Network Dump")
_dump_cmd("brctl show")
_dump_cmd("arp -n")
ip_cmds = ["addr", "link", "route"]
for cmd in ip_cmds + ['netns']:
_dump_cmd("ip %s" % cmd)
for netns_ in _netns_list():
for cmd in ip_cmds:
args = {'netns': netns_, 'cmd': cmd}
_dump_cmd('sudo ip netns exec %(netns)s ip %(cmd)s' % args)
def ovs_dump():
_header("Open vSwitch Dump")
# NOTE(cdent): If we're not using neutron + ovs these commands
# will not be present so
if not _find_cmd('ovs-vsctl'):
return
bridges = _bridge_list()
ofctl_cmds = ('show', 'dump-ports-desc', 'dump-ports', 'dump-flows')
ofp_max = _get_ofp_version()
vers = 'OpenFlow10'
for i in range(1, ofp_max + 1):
vers += ',OpenFlow1' + str(i)
_dump_cmd("sudo ovs-vsctl show")
for ofctl_cmd in ofctl_cmds:
for bridge in bridges:
args = {'vers': vers, 'cmd': ofctl_cmd, 'bridge': bridge}
_dump_cmd("sudo ovs-ofctl --protocols=%(vers)s %(cmd)s %(bridge)s" % args)
def process_list():
_header("Process Listing")
_dump_cmd("ps axo "
"user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args")
def compute_consoles():
_header("Compute consoles")
for root, dirnames, filenames in os.walk('/opt/stack'):
for filename in fnmatch.filter(filenames, 'console.log'):
fullpath = os.path.join(root, filename)
_dump_cmd("sudo cat %s" % fullpath)
def guru_meditation_reports():
for service in GMR_PROCESSES:
_header("%s Guru Meditation Report" % service)
try:
subprocess.check_call(['pgrep', '-f', service])
except subprocess.CalledProcessError:
print("Skipping as %s does not appear to be running" % service)
continue
_dump_cmd("killall -e -USR2 %s" % service)
print("guru meditation report in %s log" % service)
def main():
opts = get_options()
fname = filename(opts.dir, opts.name)
print("World dumping... see %s for details" % fname)
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
with open(fname, 'w') as f:
os.dup2(f.fileno(), sys.stdout.fileno())
disk_space()
process_list()
network_dump()
ovs_dump()
iptables_dump()
ebtables_dump()
compute_consoles()
guru_meditation_reports()
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
sys.exit(1)
|
|
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
"""
Test suite to automatically test emulator REST API endpoints.
"""
import os
import unittest
import requests
import simplejson as json
import time
from emuvim.test.api_base_openstack import ApiBaseOpenStack
class testRestApi(ApiBaseOpenStack):
"""
Tests to check the REST API endpoints of the emulator.
"""
def setUp(self):
# create network
self.createNet(nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, autolinkswitches=True)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.h[1], self.dc[1])
self.net.addLink(self.dc[0], self.dc[1])
# start api
self.startApi()
# start Mininet network
self.startNet()
def testNovaDummy(self):
print('->>>>>>> test Nova Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_create_stack.json")).read()
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
requests.post(url, data=json.dumps(json.loads(test_heatapi_template_create_stack)),
headers=headers)
print('->>>>>>> test Nova List Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/"
listapiversionnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionnovaresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["id"], "v2.1")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["status"], "CURRENT")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["version"], "2.38")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["min_version"], "2.1")
self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["updated"], "2013-07-23T11:33:21Z")
print(" ")
print('->>>>>>> test Nova Version Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla"
listapiversion21novaresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversion21novaresponse.status_code, 200)
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["id"], "v2.1")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["status"], "CURRENT")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["version"], "2.38")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["min_version"], "2.1")
self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["updated"], "2013-07-23T11:33:21Z")
print(" ")
print('->>>>>>> test Nova Version List Server APIs ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
listserverapisnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisnovaresponse.status_code, 200)
self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
print(" ")
print('->>>>>>> test Nova Delete Server APIs ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisnovaresponse.content)["servers"][0]["id"])
deleteserverapisnovaresponse = requests.delete(url, headers=headers)
self.assertEqual(deleteserverapisnovaresponse.status_code, 204)
print(" ")
print('->>>>>>> test Nova Delete Non-Existing Server APIs ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/non-existing-ix"
deleteserverapisnovaresponse = requests.delete(url, headers=headers)
self.assertEqual(deleteserverapisnovaresponse.status_code, 404)
print(" ")
print('->>>>>>> testNovaVersionListServerAPIs_withPortInformation ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/andPorts"
listserverapisnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisnovaresponse.status_code, 200)
self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
print(" ")
print('->>>>>>> test Nova List Flavors ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors"
listflavorsresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsresponse.status_code, 200)
self.assertIn(json.loads(listflavorsresponse.content)["flavors"][0]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsresponse.content)["flavors"][1]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsresponse.content)["flavors"][2]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
print(" ")
print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors"
addflavorsresponse = requests.post(url,
data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
headers=headers)
self.assertEqual(addflavorsresponse.status_code, 200)
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
print(" ")
print('->>>>>>> test Nova List Flavors Detail ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/detail"
listflavorsdetailresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsdetailresponse.status_code, 200)
self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][1]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][2]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
print(" ")
print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/detail"
addflavorsresponse = requests.post(url,
data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
headers=headers)
self.assertEqual(addflavorsresponse.status_code, 200)
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
print(" ")
print('->>>>>>> test Nova List Flavor By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/%s" % (json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"])
listflavorsbyidresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsbyidresponse.status_code, 200)
self.assertEqual(json.loads(listflavorsbyidresponse.content)["flavor"]["id"], json.loads(listflavorsdetailresponse.content)["flavors"][0]["id"])
print(" ")
print('->>>>>>> test Nova List Images ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images"
listimagesresponse = requests.get(url, headers=headers)
self.assertEqual(listimagesresponse.status_code, 200)
print(listimagesresponse.content)
# deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
#self.assertIn(json.loads(listimagesresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
print(" ")
print('->>>>>>> test Nova List Images Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images/detail"
listimagesdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listimagesdetailsresponse.status_code, 200)
# deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
#self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
#self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
self.assertEqual(json.loads(listimagesdetailsresponse.content)["images"][0]["metadata"]["architecture"],"x86_64")
print(" ")
print('->>>>>>> test Nova List Image By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images/%s" % (json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
listimagebyidresponse = requests.get(url, headers=headers)
self.assertEqual(listimagebyidresponse.status_code, 200)
self.assertEqual(json.loads(listimagebyidresponse.content)["image"]["id"],json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
print(" ")
print('->>>>>>> test Nova List Image By Non-Existend Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images/non_existing_id"
listimagebynonexistingidresponse = requests.get(url, headers=headers)
self.assertEqual(listimagebynonexistingidresponse.status_code, 404)
print(" ")
#find ubuntu id
for image in json.loads(listimagesresponse.content)["images"]:
if image["name"] == "ubuntu:trusty":
ubuntu_image_id = image["id"]
print('->>>>>>> test Nova Create Server Instance ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
createserverinstance = requests.post(url, data=data, headers=headers)
self.assertEqual(createserverinstance.status_code, 200)
self.assertEqual(json.loads(createserverinstance.content)["server"]["image"]["id"], ubuntu_image_id)
print(" ")
print('->>>>>>> test Nova Create Server Instance With Already Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
createserverinstance = requests.post(url, data=data, headers=headers)
self.assertEqual(createserverinstance.status_code, 409)
print(" ")
print('->>>>>>> test Nova Version List Server APIs Detailed ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/detail"
listserverapisdetailedresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisdetailedresponse.status_code, 200)
self.assertEqual(json.loads(listserverapisdetailedresponse.content)["servers"][0]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Nova Show Server Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisdetailedresponse.content)["servers"][0]["id"])
listserverdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listserverdetailsresponse.status_code, 200)
self.assertEqual(json.loads(listserverdetailsresponse.content)["server"]["flavor"]["links"][0]["rel"], "bookmark")
print(" ")
print('->>>>>>> test Nova Show Non-Existing Server Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/non_existing_server_id"
listnonexistingserverdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listnonexistingserverdetailsresponse.status_code, 404)
print(" ")
def testNeutronDummy(self):
print('->>>>>>> test Neutron Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_create_stack.json")).read()
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
requests.post(url, data=json.dumps(json.loads(test_heatapi_template_create_stack)), headers=headers)
# test_heatapi_keystone_get_token = open("test_heatapi_keystone_get_token.json").read()
print('->>>>>>> test Neutron List Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v2.0")
print(" ")
print('->>>>>>> test Neutron Show API v2.0 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0"
listapiversionv20response = requests.get(url, headers=headers)
self.assertEqual(listapiversionv20response.status_code, 200)
self.assertEqual(json.loads(listapiversionv20response.content)["resources"][0]["name"], "subnet")
self.assertEqual(json.loads(listapiversionv20response.content)["resources"][1]["name"], "network")
self.assertEqual(json.loads(listapiversionv20response.content)["resources"][2]["name"], "ports")
print(" ")
print('->>>>>>> test Neutron List Networks ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
listnetworksesponse1 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse1.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse1.content)["networks"][0]["status"], "ACTIVE")
listNetworksId = json.loads(listnetworksesponse1.content)["networks"][0]["id"]
listNetworksName = json.loads(listnetworksesponse1.content)["networks"][0]["name"]
listNetworksId2 = json.loads(listnetworksesponse1.content)["networks"][1]["id"]
print(" ")
print('->>>>>>> test Neutron List Non-Existing Networks ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?name=non_existent_network_name"
listnetworksesponse2 = requests.get(url,headers=headers)
self.assertEqual(listnetworksesponse2.status_code, 404)
print(" ")
print('->>>>>>> test Neutron List Networks By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?name=" + listNetworksName #tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
listnetworksesponse3 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse3.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse3.content)["networks"][0]["name"], listNetworksName)
print(" ")
print('->>>>>>> test Neutron List Networks By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
listnetworksesponse4 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse4.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse4.content)["networks"][0]["id"], listNetworksId)
print(" ")
print('->>>>>>> test Neutron List Networks By Multiple Ids ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId + "&id="+ listNetworksId2 # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
listnetworksesponse5 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse5.status_code, 200)
self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][0]["id"], listNetworksId)
self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][1]["id"], listNetworksId2)
print(" ")
print('->>>>>>> test Neutron Show Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/"+listNetworksId
shownetworksesponse = requests.get(url, headers=headers)
self.assertEqual(shownetworksesponse.status_code, 200)
self.assertEqual(json.loads(shownetworksesponse.content)["network"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Show Network Non-ExistendNetwork ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/non_existent_network_id"
shownetworksesponse2 = requests.get(url, headers=headers)
self.assertEqual(shownetworksesponse2.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
createnetworkresponse = requests.post(url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
self.assertEqual(createnetworkresponse.status_code, 201)
self.assertEqual(json.loads(createnetworkresponse.content)["network"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Create Network With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
createnetworkresponsefailure = requests.post(url,data='{"network": {"name": "sample_network","admin_state_up": true}}',headers=headers)
self.assertEqual(createnetworkresponsefailure.status_code, 400)
print(" ")
print('->>>>>>> test Neutron Update Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
updatenetworkresponse = requests.put(url, data='{"network": {"status": "ACTIVE", "admin_state_up":true, "tenant_id":"abcd123", "name": "sample_network_new_name", "shared":false}}' , headers=headers)
self.assertEqual(updatenetworkresponse.status_code, 200)
self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["name"], "sample_network_new_name")
self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["tenant_id"], "abcd123")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/non-existing-name123"
updatenetworkresponse = requests.put(url, data='{"network": {"name": "sample_network_new_name"}}', headers=headers)
self.assertEqual(updatenetworkresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron List Subnets ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
listsubnetsresponse = requests.get(url, headers=headers)
listSubnetName = json.loads(listsubnetsresponse.content)["subnets"][0]["name"]
listSubnetId = json.loads(listsubnetsresponse.content)["subnets"][0]["id"]
listSubnetId2 = json.loads(listsubnetsresponse.content)["subnets"][1]["id"]
self.assertEqual(listsubnetsresponse.status_code, 200)
self.assertNotIn('None', listSubnetName)
print(" ")
print('->>>>>>> test Neutron List Subnets By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets?name="+listSubnetName
listsubnetByNameresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetByNameresponse.status_code, 200)
self.assertNotIn('None', json.loads(listsubnetByNameresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron List Subnets By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId
listsubnetsbyidresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetsbyidresponse.status_code, 200)
self.assertNotIn("None", json.loads(listsubnetsbyidresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron List Subnets By Multiple Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId +"&id="+listSubnetId2
listsubnetsbymultipleidsresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetsbymultipleidsresponse.status_code, 200)
self.assertNotIn("None", json.loads(listsubnetsbymultipleidsresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron Show Subnet->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(listsubnetsresponse.content)["subnets"][0]["id"])
showsubnetsresponse = requests.get(url, headers=headers)
self.assertEqual(showsubnetsresponse.status_code, 200)
self.assertNotIn("None", json.loads(showsubnetsresponse.content)["subnet"]["name"])
print(" ")
print('->>>>>>> test Neutron Show Non-Existing Subnet->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/non-existing-id123"
showsubnetsresponse = requests.get(url, headers=headers)
self.assertEqual(showsubnetsresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createsubnetresponse = requests.post(url, data=createsubnetdata, headers=headers)
self.assertEqual(createsubnetresponse.status_code, 201)
self.assertEqual(json.loads(createsubnetresponse.content)["subnet"]["name"], "new_subnet")
print(" ")
print('->>>>>>> test Neutron Create Second Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createsubnetfailureresponse = requests.post(url, data=createsubnetdata, headers=headers)
self.assertEqual(createsubnetfailureresponse.status_code, 409)
print(" ")
print('->>>>>>> test Neutron Update Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(createsubnetresponse.content)["subnet"]["id"])
updatesubnetdata = '{"subnet": {"name": "new_subnet_new_name", "network_id":"some_id", "tenant_id":"new_tenant_id", "allocation_pools":"change_me", "gateway_ip":"192.168.1.120", "ip_version":4, "cidr":"10.0.0.1/24", "id":"some_new_id", "enable_dhcp":true} }'
updatesubnetresponse = requests.put(url, data=updatesubnetdata, headers=headers)
self.assertEqual(updatesubnetresponse.status_code, 200)
self.assertEqual(json.loads(updatesubnetresponse.content)["subnet"]["name"], "new_subnet_new_name")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/non-existing-subnet-12345"
updatenonexistingsubnetdata = '{"subnet": {"name": "new_subnet_new_name"} }'
updatenonexistingsubnetresponse = requests.put(url, data=updatenonexistingsubnetdata, headers=headers)
self.assertEqual(updatenonexistingsubnetresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron List Ports ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
listportsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsesponse.status_code, 200)
self.assertEqual(json.loads(listportsesponse.content)["ports"][0]["status"], "ACTIVE")
listPortsName = json.loads(listportsesponse.content)["ports"][0]["name"]
listPortsId1 = json.loads(listportsesponse.content)["ports"][0]["id"]
listPortsId2 = json.loads(listportsesponse.content)["ports"][1]["id"]
print(" ")
print('->>>>>>> test Neutron List Ports By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?name=" + listPortsName
listportsbynameesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbynameesponse.status_code, 200)
self.assertEqual(json.loads(listportsbynameesponse.content)["ports"][0]["name"], listPortsName)
print(" ")
print('->>>>>>> test Neutron List Ports By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1
listportsbyidesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbyidesponse.status_code, 200)
self.assertEqual(json.loads(listportsbyidesponse.content)["ports"][0]["id"], listPortsId1)
print(" ")
print('->>>>>>> test Neutron List Ports By Multiple Ids ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1 +"&id="+listPortsId2
listportsbymultipleidsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbymultipleidsesponse.status_code, 200)
self.assertEqual(json.loads(listportsbymultipleidsesponse.content)["ports"][0]["id"], listPortsId1)
print(" ")
print('->>>>>>> test Neutron List Non-Existing Ports ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?id=non-existing-port-id"
listportsbynonexistingidsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbynonexistingidsesponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Show Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(listportsesponse.content)["ports"][0]["id"])
showportresponse = requests.get(url, headers=headers)
self.assertEqual(showportresponse.status_code, 200)
self.assertEqual(json.loads(showportresponse.content)["port"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Show Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports/non-existing-portid123"
shownonexistingportresponse = requests.get(url, headers=headers)
self.assertEqual(shownonexistingportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Port In Non-Existing Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createnonexistingportdata = '{"port": {"name": "new_port", "network_id": "non-existing-id"} }'
createnonexistingnetworkportresponse = requests.post(url, data=createnonexistingportdata, headers=headers)
self.assertEqual(createnonexistingnetworkportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createportdata = '{"port": {"name": "new_port", "network_id": "%s", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","id":"new_id1234", "mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createportresponse = requests.post(url, data=createportdata, headers=headers)
self.assertEqual(createportresponse.status_code, 201)
print (createportresponse.content)
self.assertEqual(json.loads(createportresponse.content)["port"]["name"], "new_port")
print(" ")
print('->>>>>>> test Neutron Create Port With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createportwithexistingnamedata = '{"port": {"name": "new_port", "network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createportwithexistingnameresponse = requests.post(url, data=createportwithexistingnamedata, headers=headers)
self.assertEqual(createportwithexistingnameresponse.status_code, 500)
print(" ")
print('->>>>>>> test Neutron Create Port Without Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createportdatawithoutname = '{"port": {"network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
createportwithoutnameresponse = requests.post(url, data=createportdatawithoutname, headers=headers)
self.assertEqual(createportwithoutnameresponse.status_code, 201)
self.assertIn("port:cp", json.loads(createportwithoutnameresponse.content)["port"]["name"])
print(" ")
print('->>>>>>> test Neutron Update Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(json.loads(createportresponse.content)["port"]["name"])
url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["name"])
updateportdata = '{"port": {"name": "new_port_new_name", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123", "network_id":"network_id123"} }'
updateportresponse = requests.put(url, data=updateportdata, headers=headers)
self.assertEqual(updateportresponse.status_code, 200)
self.assertEqual(json.loads(updateportresponse.content)["port"]["name"], "new_port_new_name")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports/non-existing-port-ip"
updatenonexistingportdata = '{"port": {"name": "new_port_new_name"} }'
updatenonexistingportresponse = requests.put(url, data=updatenonexistingportdata, headers=headers)
self.assertEqual(updatenonexistingportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Delete Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
righturl = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["id"])
deleterightportresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deleterightportresponse.status_code, 204)
print(" ")
print('->>>>>>> test Neutron Delete Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/ports/unknownid"
deletewrongportresponse = requests.delete(wrongurl, headers=headers)
self.assertEqual(deletewrongportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Delete Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/subnets/unknownid"
righturl = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(updatesubnetresponse.content)["subnet"]["id"])
deletewrongsubnetresponse = requests.delete(wrongurl, headers=headers)
deleterightsubnetresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deletewrongsubnetresponse.status_code, 404)
self.assertEqual(deleterightsubnetresponse.status_code, 204)
print(" ")
print('->>>>>>> test Neutron Delete Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
righturl = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
deleterightnetworkresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deleterightnetworkresponse.status_code, 204)
print(" ")
print('->>>>>>> test Neutron Delete Non-Existing Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/networks/unknownid"
deletewrongnetworkresponse = requests.delete(wrongurl, headers=headers)
self.assertEqual(deletewrongnetworkresponse.status_code, 404)
print(" ")
def testKeystomeDummy(self):
print('->>>>>>> test Keystone Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_keystone_get_token = open(os.path.join(os.path.dirname(__file__), "test_heatapi_keystone_get_token.json")).read()
print('->>>>>>> test Keystone List Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"]["values"][0]["id"], "v2.0")
print(" ")
print('->>>>>>> test Keystone Show ApiV2 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/v2.0"
showapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(showapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(showapiversionstackresponse.content)["version"]["id"], "v2.0")
print(" ")
print('->>>>>>> test Keystone Get Token ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/v2.0/tokens"
gettokenstackresponse = requests.post(url, data=json.dumps(json.loads(test_heatapi_keystone_get_token)), headers=headers)
self.assertEqual(gettokenstackresponse.status_code, 200)
self.assertEqual(json.loads(gettokenstackresponse.content)["access"]["user"]["name"], "tenantName")
print(" ")
def testHeatDummy(self):
print('->>>>>>> test Heat Dummy Class->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(" ")
headers = {'Content-type': 'application/json'}
test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_create_stack.json")).read()
test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(__file__), "test_heatapi_template_update_stack.json")).read()
print('->>>>>>> test Heat List API Versions Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v1.0")
print(" ")
print('->>>>>>> test Create Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
createstackresponse = requests.post(url, data=json.dumps(json.loads(test_heatapi_template_create_stack)), headers=headers)
self.assertEqual(createstackresponse.status_code, 201)
self.assertNotEqual(json.loads(createstackresponse.content)["stack"]["id"], "")
print(" ")
print('->>>>>>> test Create Stack With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
createstackwithexistingnameresponse = requests.post(url, data='{"stack_name" : "s1"}', headers=headers)
self.assertEqual(createstackwithexistingnameresponse.status_code, 409)
print(" ")
print('->>>>>>> test Create Stack With Unsupported Version ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
createstackwitheunsupportedversionresponse = requests.post(url, data='{"stack_name" : "stackname123", "template" : {"heat_template_version": "2015-04-29"}}', headers=headers)
self.assertEqual(createstackwitheunsupportedversionresponse.status_code, 400)
print(" ")
print('->>>>>>> test List Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
liststackresponse = requests.get(url, headers=headers)
self.assertEqual(liststackresponse.status_code, 200)
self.assertEqual(json.loads(liststackresponse.content)["stacks"][0]["stack_status"], "CREATE_COMPLETE")
print(" ")
print('->>>>>>> test Show Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
liststackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(liststackdetailsresponse.status_code, 200)
self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "CREATE_COMPLETE")
print(" ")
print('->>>>>>> test Show Non-Exisitng Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/non_exisitng_id123"
listnonexistingstackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listnonexistingstackdetailsresponse.status_code, 404)
print(" ")
print('->>>>>>> test Update Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
updatestackresponse = requests.put(url, data=json.dumps(json.loads(test_heatapi_template_update_stack)),
headers=headers)
self.assertEqual(updatestackresponse.status_code, 202)
liststackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "UPDATE_COMPLETE")
print(" ")
print('->>>>>>> test Update Non-Existing Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/non_existing_id_1234"
updatenonexistingstackresponse = requests.put(url, data={"non": "sense"}, headers=headers)
self.assertEqual(updatenonexistingstackresponse.status_code, 404)
print(" ")
print('->>>>>>> test Delete Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s" % \
json.loads(createstackresponse.content)['stack']['id']
deletestackdetailsresponse = requests.delete(url, headers=headers)
self.assertEqual(deletestackdetailsresponse.status_code, 204)
print(" ")
def testNeutronSFC(self):
"""
Tests the Neutron Service Function Chaining implementation. As Some functions build up on others, a
complete environment is created here:
Ports: p1, p2, p3, p4
Port Pairs: pp1(p1, p2), pp2(p3, p4)
Port Pair Groups: ppg1(pp1, pp2)
Flow Classifiers: fc1
Port Chain: pc1(ppg1, fc1)
"""
headers = {'Content-type': 'application/json'}
print('->>>>>>> Create ports p1 - p4 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
# Get network id
network_resp = requests.get("http://0.0.0.0:19696/v2.0/networks?name=default", headers=headers)
self.assertEqual(network_resp.status_code, 200)
network_id = json.loads(network_resp.content)["networks"][0]["id"]
url = "http://0.0.0.0:19696/v2.0/ports"
port_request = '{"port": {"name": "%s", "network_id": "%s"}}'
p1_resp = requests.post(url, data=port_request % ("p1", network_id), headers=headers)
self.assertEqual(p1_resp.status_code, 201)
p2_resp = requests.post(url, data=port_request % ("p2", network_id), headers=headers)
self.assertEqual(p2_resp.status_code, 201)
p3_resp = requests.post(url, data=port_request % ("p3", network_id), headers=headers)
self.assertEqual(p3_resp.status_code, 201)
p4_resp = requests.post(url, data=port_request % ("p4", network_id), headers=headers)
self.assertEqual(p4_resp.status_code, 201)
p1_id = json.loads(p1_resp.content)["port"]["id"]
p2_id = json.loads(p2_resp.content)["port"]["id"]
p3_id = json.loads(p3_resp.content)["port"]["id"]
p4_id = json.loads(p4_resp.content)["port"]["id"]
print('->>>>>>> test Neutron SFC Port Pair Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
pp1_resp = requests.post(url, data='{"port_pair": {"name": "pp1", "ingress": "%s", "egress": "%s"}}' % (p1_id, p2_id), headers=headers)
self.assertEqual(pp1_resp.status_code, 201)
pp2_resp = requests.post(url, data='{"port_pair": {"name": "pp2", "ingress": "%s", "egress": "%s"}}' % (p3_id, p4_id), headers=headers)
self.assertEqual(pp2_resp.status_code, 201)
pp3_resp = requests.post(url, data='{"port_pair": {"name": "pp3", "ingress": "%s", "egress": "%s"}}' % (p3_id, p4_id), headers=headers)
self.assertEqual(pp3_resp.status_code, 201)
pp1_id = json.loads(pp1_resp.content)["port_pair"]["id"]
pp2_id = json.loads(pp2_resp.content)["port_pair"]["id"]
pp3_id = json.loads(pp3_resp.content)["port_pair"]["id"]
print('->>>>>>> test Neutron SFC Port Pair Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp3_id
pp3_update_resp = requests.put(url, data='{"port_pair": {"description": "port_pair_update"}}', headers=headers)
self.assertEqual(pp3_update_resp.status_code, 200)
self.assertEqual(json.loads(pp3_update_resp.content)["port_pair"]["description"], "port_pair_update")
print('->>>>>>> test Neutron SFC Port Pair Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp3_id
pp3_delete_resp = requests.delete(url, headers=headers)
self.assertEqual(pp3_delete_resp.status_code, 204)
print('->>>>>>> test Neutron SFC Port Pair List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
pp_list_resp = requests.get(url, headers=headers)
self.assertEqual(pp_list_resp.status_code, 200)
self.assertEqual(len(json.loads(pp_list_resp.content)["port_pairs"]), 2) # only pp1 and pp2 should be left
print('->>>>>>> test Neutron SFC Port Pair Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp2_id
pp2_show_resp = requests.get(url, headers=headers)
self.assertEqual(pp2_show_resp.status_code, 200)
self.assertEqual(json.loads(pp2_show_resp.content)["port_pair"]["name"], "pp2")
print('->>>>>>> test Neutron SFC Port Pair Group Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
ppg1_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg1", "port_pairs": ["%s"]}}' % (pp1_id), headers=headers)
self.assertEqual(ppg1_resp.status_code, 201)
ppg2_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg2", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
self.assertEqual(ppg2_resp.status_code, 201)
ppg3_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg3", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
self.assertEqual(ppg3_resp.status_code, 201)
ppg1_id = json.loads(ppg1_resp.content)["port_pair_group"]["id"]
ppg2_id = json.loads(ppg2_resp.content)["port_pair_group"]["id"]
ppg3_id = json.loads(ppg3_resp.content)["port_pair_group"]["id"]
print('->>>>>>> test Neutron SFC Port Pair Group Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg3_id
ppg3_update_resp = requests.put(url, data='{"port_pair_group": {"description": "port_pair_group_update"}}', headers=headers)
self.assertEqual(ppg3_update_resp.status_code, 200)
self.assertEqual(json.loads(ppg3_update_resp.content)["port_pair_group"]["description"], "port_pair_group_update")
print('->>>>>>> test Neutron SFC Port Pair Group Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg3_id
ppg3_delete_resp = requests.delete(url, headers=headers)
self.assertEqual(ppg3_delete_resp.status_code, 204)
print('->>>>>>> test Neutron SFC Port Pair Group List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
ppg_list_resp = requests.get(url, headers=headers)
self.assertEqual(ppg_list_resp.status_code, 200)
self.assertEqual(len(json.loads(ppg_list_resp.content)["port_pair_groups"]), 2) # only ppg1 and ppg2 should be left
print('->>>>>>> test Neutron SFC Port Pair Group Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg2_id
ppg2_show_resp = requests.get(url, headers=headers)
self.assertEqual(ppg2_show_resp.status_code, 200)
self.assertEqual(json.loads(ppg2_show_resp.content)["port_pair_group"]["name"], "ppg2")
print('->>>>>>> test Neutron SFC Flow Classifier Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
fc1_resp = requests.post(url, data='{"flow_classifier": {"name": "fc1", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
self.assertEqual(fc1_resp.status_code, 201)
fc2_resp = requests.post(url, data='{"flow_classifier": {"name": "fc2", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
self.assertEqual(fc2_resp.status_code, 201)
fc1_id = json.loads(fc1_resp.content)["flow_classifier"]["id"]
fc2_id = json.loads(fc2_resp.content)["flow_classifier"]["id"]
print('->>>>>>> test Neutron SFC Flow Classifier Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc2_id
fc2_update_resp = requests.put(url, data='{"flow_classifier": {"description": "flow_classifier_update"}}', headers=headers)
self.assertEqual(fc2_update_resp.status_code, 200)
self.assertEqual(json.loads(fc2_update_resp.content)["flow_classifier"]["description"], "flow_classifier_update")
print('->>>>>>> test Neutron SFC Flow Classifier Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc2_id
fc2_delete_resp = requests.delete(url, headers=headers)
self.assertEqual(fc2_delete_resp.status_code, 204)
print('->>>>>>> test Neutron SFC Flow Classifier List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
fc_list_resp = requests.get(url, headers=headers)
self.assertEqual(fc_list_resp.status_code, 200)
self.assertEqual(len(json.loads(fc_list_resp.content)["flow_classifiers"]), 1) # only fc1
print('->>>>>>> test Neutron SFC Flow Classifier Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc1_id
fc1_show_resp = requests.get(url, headers=headers)
self.assertEqual(fc1_show_resp.status_code, 200)
self.assertEqual(json.loads(fc1_show_resp.content)["flow_classifier"]["name"], "fc1")
print('->>>>>>> test Neutron SFC Port Chain Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
pc1_resp = requests.post(url, data='{"port_chain": {"name": "pc1", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (ppg1_id, fc1_id), headers=headers)
self.assertEqual(pc1_resp.status_code, 201)
pc2_resp = requests.post(url, data='{"port_chain": {"name": "pc2", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (ppg1_id, fc1_id), headers=headers)
self.assertEqual(pc2_resp.status_code, 201)
pc1_id = json.loads(pc1_resp.content)["port_chain"]["id"]
pc2_id = json.loads(pc2_resp.content)["port_chain"]["id"]
print('->>>>>>> test Neutron SFC Port Chain Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc2_id
pc2_update_resp = requests.put(url, data='{"port_chain": {"description": "port_chain_update"}}', headers=headers)
self.assertEqual(pc2_update_resp.status_code, 200)
self.assertEqual(json.loads(pc2_update_resp.content)["port_chain"]["description"], "port_chain_update")
print('->>>>>>> test Neutron SFC Port Chain Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc2_id
pc2_delete_resp = requests.delete(url, headers=headers)
self.assertEqual(pc2_delete_resp.status_code, 204)
print('->>>>>>> test Neutron SFC Port Chain List ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
pc_list_resp = requests.get(url, headers=headers)
self.assertEqual(pc_list_resp.status_code, 200)
self.assertEqual(len(json.loads(pc_list_resp.content)["port_chains"]), 1) # only pc1
print('->>>>>>> test Neutron SFC Port Chain Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc1_id
pc1_show_resp = requests.get(url, headers=headers)
self.assertEqual(pc1_show_resp.status_code, 200)
self.assertEqual(json.loads(pc1_show_resp.content)["port_chain"]["name"], "pc1")
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import numbers
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import torch
import pyro.poutine as poutine
from pyro.distributions import Categorical, Empirical
from pyro.ops.stats import waic
from pyro.poutine.util import site_is_subsample
class EmpiricalMarginal(Empirical):
"""
Marginal distribution over a single site (or multiple, provided they have the same
shape) from the ``TracePosterior``'s model.
.. note:: If multiple sites are specified, they must have the same tensor shape.
Samples from each site will be stacked and stored within a single tensor. See
:class:`~pyro.distributions.Empirical`. To hold the marginal distribution of sites
having different shapes, use :class:`~pyro.infer.abstract_infer.Marginals` instead.
:param TracePosterior trace_posterior: a ``TracePosterior`` instance representing
a Monte Carlo posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
"""
def __init__(self, trace_posterior, sites=None, validate_args=None):
assert isinstance(
trace_posterior, TracePosterior
), "trace_dist must be trace posterior distribution object"
if sites is None:
sites = "_RETURN"
self._num_chains = 1
self._samples_buffer = defaultdict(list)
self._weights_buffer = defaultdict(list)
self._populate_traces(trace_posterior, sites)
samples, weights = self._get_samples_and_weights()
super().__init__(samples, weights, validate_args=validate_args)
def _get_samples_and_weights(self):
"""
Appends values collected in the samples/weights buffers to their
corresponding tensors.
"""
num_chains = len(self._samples_buffer)
samples_by_chain = []
weights_by_chain = []
for i in range(num_chains):
samples = torch.stack(self._samples_buffer[i], dim=0)
samples_by_chain.append(samples)
weights_dtype = (
samples.dtype if samples.dtype.is_floating_point else torch.float32
)
weights = torch.as_tensor(
self._weights_buffer[i], device=samples.device, dtype=weights_dtype
)
weights_by_chain.append(weights)
if len(samples_by_chain) == 1:
return samples_by_chain[0], weights_by_chain[0]
else:
return torch.stack(samples_by_chain, dim=0), torch.stack(
weights_by_chain, dim=0
)
def _add_sample(self, value, log_weight=None, chain_id=0):
"""
Adds a new data point to the sample. The values in successive calls to
``add`` must have the same tensor shape and size. Optionally, an
importance weight can be specified via ``log_weight`` or ``weight``
(default value of `1` is used if not specified).
:param torch.Tensor value: tensor to add to the sample.
:param torch.Tensor log_weight: log weight (optional) corresponding
to the sample.
:param int chain_id: chain id that generated the sample (optional).
Note that if this argument is provided, ``chain_id`` must lie
in ``[0, num_chains - 1]``, and there must be equal number
of samples per chain.
"""
# Apply default weight of 1.0.
if log_weight is None:
log_weight = 0.0
if (
self._validate_args
and not isinstance(log_weight, numbers.Number)
and log_weight.dim() > 0
):
raise ValueError("``weight.dim() > 0``, but weight should be a scalar.")
# Append to the buffer list
self._samples_buffer[chain_id].append(value)
self._weights_buffer[chain_id].append(log_weight)
self._num_chains = max(self._num_chains, chain_id + 1)
def _populate_traces(self, trace_posterior, sites):
assert isinstance(sites, (list, str))
for tr, log_weight, chain_id in zip(
trace_posterior.exec_traces,
trace_posterior.log_weights,
trace_posterior.chain_ids,
):
value = (
tr.nodes[sites]["value"]
if isinstance(sites, str)
else torch.stack([tr.nodes[site]["value"] for site in sites], 0)
)
self._add_sample(value, log_weight=log_weight, chain_id=chain_id)
class Marginals:
"""
Holds the marginal distribution over one or more sites from the ``TracePosterior``'s
model. This is a convenience container class, which can be extended by ``TracePosterior``
subclasses. e.g. for implementing diagnostics.
:param TracePosterior trace_posterior: a TracePosterior instance representing
a Monte Carlo posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
"""
def __init__(self, trace_posterior, sites=None, validate_args=None):
assert isinstance(
trace_posterior, TracePosterior
), "trace_dist must be trace posterior distribution object"
if sites is None:
sites = ["_RETURN"]
elif isinstance(sites, str):
sites = [sites]
else:
assert isinstance(sites, list)
self.sites = sites
self._marginals = OrderedDict()
self._diagnostics = OrderedDict()
self._trace_posterior = trace_posterior
self._populate_traces(trace_posterior, validate_args)
def _populate_traces(self, trace_posterior, validate):
self._marginals = {
site: EmpiricalMarginal(trace_posterior, site, validate)
for site in self.sites
}
def support(self, flatten=False):
"""
Gets support of this marginal distribution.
:param bool flatten: A flag to decide if we want to flatten `batch_shape`
when the marginal distribution is collected from the posterior with
``num_chains > 1``. Defaults to False.
:returns: a dict with keys are sites' names and values are sites' supports.
:rtype: :class:`OrderedDict`
"""
support = OrderedDict(
[
(site, value.enumerate_support())
for site, value in self._marginals.items()
]
)
if self._trace_posterior.num_chains > 1 and flatten:
for site, samples in support.items():
shape = samples.size()
flattened_shape = torch.Size((shape[0] * shape[1],)) + shape[2:]
support[site] = samples.reshape(flattened_shape)
return support
@property
def empirical(self):
"""
A dictionary of sites' names and their corresponding :class:`EmpiricalMarginal`
distribution.
:type: :class:`OrderedDict`
"""
return self._marginals
class TracePosterior(object, metaclass=ABCMeta):
"""
Abstract TracePosterior object from which posterior inference algorithms inherit.
When run, collects a bag of execution traces from the approximate posterior.
This is designed to be used by other utility classes like `EmpiricalMarginal`,
that need access to the collected execution traces.
"""
def __init__(self, num_chains=1):
self.num_chains = num_chains
self._reset()
def _reset(self):
self.log_weights = []
self.exec_traces = []
self.chain_ids = [] # chain id corresponding to the sample
self._idx_by_chain = [
[] for _ in range(self.num_chains)
] # indexes of samples by chain id
self._categorical = None
def marginal(self, sites=None):
"""
Generates the marginal distribution of this posterior.
:param list sites: optional list of sites for which we need to generate
the marginal distribution.
:returns: A :class:`Marginals` class instance.
:rtype: :class:`Marginals`
"""
return Marginals(self, sites)
@abstractmethod
def _traces(self, *args, **kwargs):
"""
Abstract method implemented by classes that inherit from `TracePosterior`.
:return: Generator over ``(exec_trace, weight)`` or
``(exec_trace, weight, chain_id)``.
"""
raise NotImplementedError("Inference algorithm must implement ``_traces``.")
def __call__(self, *args, **kwargs):
# To ensure deterministic sampling in the presence of multiple chains,
# we get the index from ``idxs_by_chain`` instead of sampling from
# the marginal directly.
random_idx = self._categorical.sample().item()
chain_idx, sample_idx = (
random_idx % self.num_chains,
random_idx // self.num_chains,
)
sample_idx = self._idx_by_chain[chain_idx][sample_idx]
trace = self.exec_traces[sample_idx].copy()
for name in trace.observation_nodes:
trace.remove_node(name)
return trace
def run(self, *args, **kwargs):
"""
Calls `self._traces` to populate execution traces from a stochastic
Pyro model.
:param args: optional args taken by `self._traces`.
:param kwargs: optional keywords args taken by `self._traces`.
"""
self._reset()
with poutine.block():
for i, vals in enumerate(self._traces(*args, **kwargs)):
if len(vals) == 2:
chain_id = 0
tr, logit = vals
else:
tr, logit, chain_id = vals
assert chain_id < self.num_chains
self.exec_traces.append(tr)
self.log_weights.append(logit)
self.chain_ids.append(chain_id)
self._idx_by_chain[chain_id].append(i)
self._categorical = Categorical(logits=torch.tensor(self.log_weights))
return self
def information_criterion(self, pointwise=False):
"""
Computes information criterion of the model. Currently, returns only "Widely
Applicable/Watanabe-Akaike Information Criterion" (WAIC) and the corresponding
effective number of parameters.
Reference:
[1] `Practical Bayesian model evaluation using leave-one-out cross-validation and WAIC`,
Aki Vehtari, Andrew Gelman, and Jonah Gabry
:param bool pointwise: a flag to decide if we want to get a vectorized WAIC or not. When
``pointwise=False``, returns the sum.
:returns: a dictionary containing values of WAIC and its effective number of
parameters.
:rtype: :class:`OrderedDict`
"""
if not self.exec_traces:
return {}
obs_node = None
log_likelihoods = []
for trace in self.exec_traces:
obs_nodes = trace.observation_nodes
if len(obs_nodes) > 1:
raise ValueError(
"Infomation criterion calculation only works for models "
"with one observation node."
)
if obs_node is None:
obs_node = obs_nodes[0]
elif obs_node != obs_nodes[0]:
raise ValueError(
"Observation node has been changed, expected {} but got {}".format(
obs_node, obs_nodes[0]
)
)
log_likelihoods.append(
trace.nodes[obs_node]["fn"].log_prob(trace.nodes[obs_node]["value"])
)
ll = torch.stack(log_likelihoods, dim=0)
waic_value, p_waic = waic(
ll, torch.tensor(self.log_weights, device=ll.device), pointwise
)
return OrderedDict([("waic", waic_value), ("p_waic", p_waic)])
class TracePredictive(TracePosterior):
"""
.. warning::
This class is deprecated and will be removed in a future release.
Use the :class:`~pyro.infer.predictive.Predictive` class instead.
Generates and holds traces from the posterior predictive distribution,
given model execution traces from the approximate posterior. This is
achieved by constraining latent sites to randomly sampled parameter
values from the model execution traces and running the model forward
to generate traces with new response ("_RETURN") sites.
:param model: arbitrary Python callable containing Pyro primitives.
:param TracePosterior posterior: trace posterior instance holding samples from the model's approximate posterior.
:param int num_samples: number of samples to generate.
:param keep_sites: The sites which should be sampled from posterior distribution (default: all)
"""
def __init__(self, model, posterior, num_samples, keep_sites=None):
self.model = model
self.posterior = posterior
self.num_samples = num_samples
self.keep_sites = keep_sites
super().__init__()
warnings.warn(
"The `TracePredictive` class is deprecated and will be removed "
"in a future release. Use the `pyro.infer.Predictive` class instead.",
FutureWarning,
)
def _traces(self, *args, **kwargs):
if not self.posterior.exec_traces:
self.posterior.run(*args, **kwargs)
data_trace = poutine.trace(self.model).get_trace(*args, **kwargs)
for _ in range(self.num_samples):
model_trace = self.posterior().copy()
self._remove_dropped_nodes(model_trace)
self._adjust_to_data(model_trace, data_trace)
resampled_trace = poutine.trace(
poutine.replay(self.model, model_trace)
).get_trace(*args, **kwargs)
yield (resampled_trace, 0.0, 0)
def _remove_dropped_nodes(self, trace):
if self.keep_sites is None:
return
for name, site in list(trace.nodes.items()):
if name not in self.keep_sites:
trace.remove_node(name)
continue
def _adjust_to_data(self, trace, data_trace):
subsampled_idxs = dict()
for name, site in trace.iter_stochastic_nodes():
# Adjust subsample sites
if site_is_subsample(site):
site["fn"] = data_trace.nodes[name]["fn"]
site["value"] = data_trace.nodes[name]["value"]
# Adjust sites under conditionally independent stacks
orig_cis_stack = site["cond_indep_stack"]
site["cond_indep_stack"] = data_trace.nodes[name]["cond_indep_stack"]
assert len(orig_cis_stack) == len(site["cond_indep_stack"])
site["fn"] = data_trace.nodes[name]["fn"]
for ocis, cis in zip(orig_cis_stack, site["cond_indep_stack"]):
# Select random sub-indices to replay values under conditionally independent stacks.
# Otherwise, we assume there is an dependence of indexes between training data
# and prediction data.
assert ocis.name == cis.name
assert not site_is_subsample(site)
batch_dim = cis.dim - site["fn"].event_dim
subsampled_idxs[cis.name] = subsampled_idxs.get(
cis.name,
torch.randint(
0, ocis.size, (cis.size,), device=site["value"].device
),
)
site["value"] = site["value"].index_select(
batch_dim, subsampled_idxs[cis.name]
)
def marginal(self, sites=None):
"""
Gets marginal distribution for this predictive posterior distribution.
"""
return Marginals(self, sites)
|
|
#!/usr/bin/python2.4
import sys
import bx.pwm.position_weight_matrix as pwmx
from bx.align import maf as align_maf
def isnan(x):
# return ieeespecial.isnan(x)
if x == x:
return False
return True
NaN = float('nan')
def main():
pwm_file = sys.argv[1]
splist = sys.argv[2]
if len(sys.argv) == 4:
inmaf = open(sys.argv[3])
else:
inmaf = sys.stdin
# read alignment species
species = []
for sp in splist.split(','):
species.append(sp)
# read weight matrices
pwm = {}
for wm in pwmx.Reader(open(pwm_file), format='basic'):
pwm[wm.id] = wm
fbunch = {}
for scoremax, index, headers in MafScorer(pwm, species, inmaf):
for k, matrix in scoremax.items():
fname = k + '.mx'
if fname not in fbunch:
fbunch[fname] = open(fname, 'w')
print("Writing", fname, file=sys.stderr)
for i in range(len(matrix)):
for j in range(len(matrix[i])):
print("%.2f" % matrix[i][j], end=' ', file=fbunch[fname])
print(file=fbunch[fname])
for file in fbunch.values():
file.close()
def MafScorer(pwm, species, inmaf):
index = 0
scoremax, width = None, None
for maf in align_maf.Reader(inmaf):
# try:
if True:
val = MafBlockScorer(pwm, species, maf)
for scoremax, width, headers in val:
yield scoremax, index, headers
try:
pass
except Exception:
print("Failed on:", file=sys.stderr)
syserr = align_maf.Writer(sys.stderr)
syserr.write(maf)
if width:
print(width, file=sys.stderr)
if scoremax:
print(len(scoremax), file=sys.stderr)
syserr.close()
sys.exit(1)
index += width
yield scoremax, index, headers
def MafMotifSelect(mafblock, pwm, motif=None, threshold=0):
if motif is not None and len(motif) != len(pwm):
raise Exception("pwm and motif must be the same length")
# generic alignment
alignlist = [c.text for c in mafblock.components]
align = pwmx.Align(alignlist)
nrows, ncols = align.dims
# required sequence length
minSeqLen = len(motif)
# record the text sizes from the alignment rows
for start in range(ncols - minSeqLen):
if align.rows[0][start] == '-':
continue
subseq = ""
pwm_score_vec = []
motif_score_vec = []
max_cols = 0
for ir in range(nrows):
expanded = align.rows[ir].count('-', start, minSeqLen)
subtext = align.rows[ir][start: minSeqLen+expanded]
max_cols = max(len(subtext), max_cols)
subseq = subtext.replace('-', '')
revseq = pwmx.reverse_complement(subseq)
# pwm score
nill, f_score = pwm.score_seq(subseq)[0]
r_score, nill = pwm.score_seq(revseq)[0]
pwm_score_vec.append(max(f_score, r_score))
# consensus score
if motif is not None:
for_score = int(pwmx.match_consensus(subseq, motif))
rev_score = int(pwmx.match_consensus(revseq, motif))
motif_score_vec.append(max(for_score, rev_score))
# check threshold
try:
assert not isnan(max(pwm_score_vec))
assert not isnan(max(motif_score_vec))
except AssertionError:
print(pwm_score_vec, motif_score_vec, file=sys.stderr)
print(len(subseq), len(pwm), file=sys.stderr)
if max(pwm_score_vec) < threshold:
continue
if max(motif_score_vec) < threshold:
continue
# chop block
col_start = start
col_end = max_cols + 1
motifmaf = mafblock.slice(col_start, col_end)
yield motifmaf, pwm_score_vec, motif_score_vec
"""
for ir in range(nrows):
# scan alignment row for motif subsequences
for start in range(ncols):
if align.rows[ir][start] == '-': continue
elif align.rows[ir][start] == 'n': continue
elif align.rows[ir][start] == 'N': continue
# gather enough subseq for motif
for ic in range(start,ncols):
char = align.rows[ir][ic].upper()
if char == '-' or char == 'N': continue
else: subseq += char
if len(subseq) == minSeqLen:
revseq = pwmx.reverse_complement( subseq )
align_match_lens.append( ic )
# pwm score
nill,f_score = pwm.score_seq( subseq )[0]
r_score, nill = pwm.score_seq( revseq )[0]
pwm_score_vec.append( max(f_score, r_score) )
# consensus score
if motif is not None:
for_score = int( pwmx.match_consensus(subseq,motif) )
rev_score = int( pwmx.match_consensus(revseq,motif) )
motif_score_vec.append( max(for_score, rev_score) )
#check threshold
try:
assert not isnan(max(pwm_score_vec) )
assert not isnan(max(motif_score_vec) )
except:
print >>sys.stderr, pwm_score_vec, motif_score_vec
print >>sys.stderr, len(subseq), len(pwm)
if max(pwm_score_vec) < threshold: continue
if max(motif_score_vec) < threshold: continue
# chop block
col_start = start
col_end = max( align_match_lens ) + 1
motifmaf = mafblock.slice( col_start, col_end )
print subseq,revseq,ic
print align_match_lens
yield motifmaf, pwm_score_vec, motif_score_vec
"""
def MafBlockScorer(pwm, species, maf):
width = len(maf.components[0].text)
headers = [(c.src, c.start, c.end) for c in maf.components]
# expand block rows to full
mafBlockSpecies = [specName.src.split('.')[0] for specName in maf.components]
alignlist = []
for sp in species:
try:
i = mafBlockSpecies.index(sp)
alignlist.append(maf.components[i].text)
except ValueError:
alignlist.append([NaN for n in range(width)])
alignrows = pwmx.Align(alignlist)
scoremax = {}
# record gap positions
filter = pwmx.score_align_gaps(alignrows)
# score pwm models
for model in pwm.keys():
scoremax[model] = pwm[model].score_align(alignrows, filter)
yield scoremax, width, headers
def MafMotifScorer(species, maf, motifs):
width = len(maf.components[0].text)
headers = [(c.src, c.start, c.end) for c in maf.components]
# expand block rows to full
mafBlockSpecies = [specName.src.split('.')[0] for specName in maf.components]
alignlist = []
for sp in species:
try:
i = mafBlockSpecies.index(sp)
alignlist.append(maf.components[i].text)
except ValueError:
alignlist.append([NaN for n in range(width)])
alignrows = pwmx.Align(alignlist, headers)
# record gap positions
filter = pwmx.score_align_gaps(alignrows)
# score motif
if isinstance(motifs, list):
scoremax = {}
for string in motifs:
scoremax[string] = pwmx.score_align_motif(alignrows, string, filter)
else:
scoremax = pwmx.score_align_motif(alignrows, motifs, filter)
yield scoremax, width, headers
if __name__ == '__main__':
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_func op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import queue
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
class PyOpTest(test.TestCase):
def testBasic(self):
def my_func(x, y):
return np.sinh(x) + np.cosh(y)
# single type
with self.test_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = script_ops.py_func(my_func, [x, y], dtypes.float32)
self.assertEqual(z.eval(), my_func(1.0, 2.0).astype(np.float32))
# scalar
with self.test_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = script_ops.py_func(my_func, [x, y], [dtypes.float32])
self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))
# array
with self.test_session():
x = constant_op.constant([1.0, 2.0], dtypes.float64)
y = constant_op.constant([2.0, 3.0], dtypes.float64)
z = script_ops.py_func(my_func, [x, y], [dtypes.float64])
self.assertAllEqual(z[0].eval(),
my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
# a bit exotic type (complex64)
with self.test_session():
x = constant_op.constant(1 + 2j, dtypes.complex64)
y = constant_op.constant(3 + 4j, dtypes.complex64)
z, = script_ops.py_func(my_func, [x, y], [dtypes.complex64])
self.assertAllClose(z.eval(), my_func(1 + 2j, 3 + 4j))
# a bit excotic function (rfft)
with self.test_session():
x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y, = script_ops.py_func(rfft, [x], [dtypes.complex64])
self.assertAllClose(y.eval(), np.fft.rfft([1., 2., 3., 4.]))
# returns a python literal.
with self.test_session():
def literal(x):
return 1.0 if x == 0.0 else 0.0
x = constant_op.constant(0.0, dtypes.float64)
y, = script_ops.py_func(literal, [x], [dtypes.float64])
self.assertAllClose(y.eval(), 1.0)
# returns a list
with self.test_session():
def list_func(x):
return [x, x + 1]
x = constant_op.constant(0.0, dtypes.float64)
y, z = script_ops.py_func(list_func, [x], [dtypes.float64] * 2)
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
# returns a tuple
with self.test_session():
def tuple_func(x):
return x, x + 1
x = constant_op.constant(0.0, dtypes.float64)
y, z = script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2)
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
# returns a tuple, Tout and inp a tuple
with self.test_session():
x = constant_op.constant(0.0, dtypes.float64)
y, z = script_ops.py_func(tuple_func, (x,),
(dtypes.float64, dtypes.float64))
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
def testStrings(self):
def read_fixed_length_numpy_strings():
return np.array([b" there"])
def read_and_return_strings(x, y):
return x + y
with self.test_session():
x = constant_op.constant([b"hello", b"hi"], dtypes.string)
y, = script_ops.py_func(read_fixed_length_numpy_strings, [],
[dtypes.string])
z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"])
def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"]
with self.test_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
def testLarge(self):
with self.test_session() as sess:
x = array_ops.zeros([1000000], dtype=np.float32)
y = script_ops.py_func(lambda x: x + 1, [x], [dtypes.float32])
z = script_ops.py_func(lambda x: x * 2, [x], [dtypes.float32])
for _ in xrange(100):
sess.run([y[0].op, z[0].op])
def testNoInput(self):
with self.test_session():
x, = script_ops.py_func(lambda: 42.0, [], [dtypes.float64])
self.assertAllClose(x.eval(), 42.0)
def testCleanup(self):
for _ in xrange(1000):
g = ops.Graph()
with g.as_default():
c = constant_op.constant([1.], dtypes.float32)
_ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
self.assertTrue(script_ops._py_funcs.size() < 100)
def testAlias(self):
with self.test_session():
np_array = np.array([1.0, 2.0], dtype=np.float32)
tf_array = script_ops.py_func(lambda: np_array, [], [dtypes.float32])
value = tf_array + constant_op.constant([2.0, 3.0], dtype=dtypes.float32)
value.op.run()
self.assertAllEqual(np_array, [1.0, 2.0])
def testBadNumpyReturnType(self):
with self.test_session():
def bad():
# Structured numpy arrays aren't supported.
return np.array([], dtype=[("foo", np.float32)])
y, = script_ops.py_func(bad, [], [dtypes.float32])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported numpy type"):
y.eval()
def testBadReturnType(self):
with self.test_session():
def bad():
# Non-string python objects aren't supported.
return {"foo": dtypes.float32}
z, = script_ops.py_func(bad, [], [dtypes.int64])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported object type"):
z.eval()
def testStateful(self):
# Not using self.test_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(lambda: next(producer), [], [dtypes.int64])
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 1)
self.assertEqual(sess.run(x), 2)
def testStateless(self):
# Not using self.test_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(
lambda: next(producer), [], [dtypes.int64], stateful=False)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
def testGradientFunction(self):
# Input to tf.py_func is necessary, otherwise get_gradient_function()
# returns None per default.
a = constant_op.constant(0)
x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op))
def testCOrder(self):
with self.test_session():
val = [[1, 2], [3, 4]]
x, = script_ops.py_func(lambda: np.array(val, order="F"), [], [dtypes.int64])
self.assertAllEqual(val, x.eval())
def testParallel(self):
# Tests that tf.py_func's can run in parallel if they release the GIL.
with self.test_session() as session:
q = queue.Queue(1)
def blocking_put():
q.put(42)
q.join() # Wait for task_done().
return 42
def blocking_get():
v = q.get(block=True) # Wait for put().
q.task_done()
return v
x, = script_ops.py_func(blocking_put, [], [dtypes.int64])
y, = script_ops.py_func(blocking_get, [], [dtypes.int64])
# This will result in a deadlock if the py_func's don't run in parallel.
session.run([x, y])
def testNoReturnValueStateful(self):
class State(object):
def __init__(self):
self._value = np.array([1], np.int64)
def _increment(self, diff):
self._value += diff
def increment(self, diff):
return script_ops.py_func(self._increment, [diff], [], stateful=True)
@property
def value(self):
return self._value
with self.test_session() as sess:
s = State()
op = s.increment(constant_op.constant(2, dtypes.int64))
ret = sess.run(op)
self.assertIsNone(ret)
self.assertAllEqual([3], s.value)
def testNoReturnValueStateless(self):
def do_nothing(unused_x):
pass
f = script_ops.py_func(
do_nothing, [constant_op.constant(3, dtypes.int64)], [], stateful=False)
with self.test_session() as sess:
self.assertEqual(sess.run(f), [])
def _testExceptionHandling(self, py_exp, tf_exp):
def raise_exception():
raise py_exp("blah") # pylint: disable=not-callable
f = script_ops.py_func(raise_exception, [], [])
with self.test_session() as sess:
with self.assertRaisesRegexp(tf_exp, "blah"):
sess.run(f)
def testExceptionHandling(self):
self._testExceptionHandling(ValueError, errors.InvalidArgumentError)
self._testExceptionHandling(TypeError, errors.InvalidArgumentError)
self._testExceptionHandling(StopIteration, errors.OutOfRangeError)
self._testExceptionHandling(MemoryError, errors.ResourceExhaustedError)
self._testExceptionHandling(NotImplementedError, errors.UnimplementedError)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import division
import os
from io import BytesIO
import math
import threading
from cement.utils.misc import minimal_logger
from . import aws
from ..objects.exceptions import NotFoundError, FileTooLargeError, UploadError
from ..core import io
from .utils import static_var
LOG = minimal_logger(__name__)
CHUNK_SIZE = 5252880 # Minimum chunk size allowed by S3
THREAD_COUNT = 8 # Number of threads to use for multithreaded mode
def _make_api_call(operation_name, **operation_options):
return aws.make_api_call('s3', operation_name, **operation_options)
def upload_file(bucket, key, file_path):
with open(file_path, 'rb') as fp:
return _make_api_call('put_object',
Bucket=bucket,
Key=key,
Body=fp)
def get_object_info(bucket, object_key):
result = _make_api_call('list_objects',
Bucket=bucket,
Prefix=object_key)
if 'Contents' not in result or len(result['Contents']) < 1:
raise NotFoundError('Object not found.')
objects = result['Contents']
if len(objects) == 1:
return objects[0]
else:
# There is more than one result, search for correct one
object_key = next((o for o in objects if o['Key'] == object_key), None)
if object_key is None:
raise NotFoundError('Object not found.')
else:
return object_key
def get_object(bucket, key):
result = _make_api_call('get_object',
Bucket=bucket,
Key=key)
return result['Body'].read()
def delete_objects(bucket, keys):
objects = [dict(Key=k) for k in keys]
result = _make_api_call('delete_objects',
Bucket=bucket,
Delete={'Objects': objects})
return result
def upload_workspace_version(bucket, key, file_path, workspace_type='Application'):
try:
size = os.path.getsize(file_path)
except OSError as err:
if err.errno == 2:
raise NotFoundError('{0} Version does not exist locally ({1}).'
' Try uploading the Application Version again.'.format(workspace_type, err.filename))
raise err
LOG.debug('Upload {0} Version. File size = {1}'.format(workspace_type, str(size)))
if size > 536870912:
raise FileTooLargeError('Archive cannot be any larger than 512MB')
if size < 7340032:
result = simple_upload(bucket, key, file_path)
else:
result = multithreaded_upload(bucket, key, file_path)
return result
def upload_application_version(bucket, key, file_path):
upload_workspace_version(bucket, key, file_path, 'Application')
def upload_platform_version(bucket, key, file_path):
upload_workspace_version(bucket, key, file_path, 'Platform')
def simple_upload(bucket, key, file_path):
io.echo('Uploading', key, 'to S3. This may take a while.')
result = upload_file(bucket, key, file_path)
io.echo('Upload Complete.')
return result
def multithreaded_upload(bucket, key, file_path):
"""
Upload a file in multiple parts using multiple threads.
Takes advantage of S3's multipart upload.
:param bucket: S3 bucket name
:param key: keyname of file to be uploaded
:param file_path: full path location of file to be uploaded
:param region: region to use for S3
:return: Result dictionary
"""
size = os.path.getsize(file_path)
total_parts = math.ceil(size / CHUNK_SIZE) # Number of parts needed
LOG.debug('Doing multi-threaded upload. Parts Needed=' + str(total_parts))
# Begin multi-part upload
upload_id = _get_multipart_upload_id(bucket, key)
io.update_upload_progress(0)
# Upload parts
try:
etaglist = [] # list for part id's (etags)
with open(file_path, 'rb') as f:
# Create threads to handle parts of upload
lock = threading.Lock()
jobs = []
for i in range(THREAD_COUNT):
p = threading.Thread(
target=_upload_chunk,
args=(f, lock, etaglist, total_parts,
bucket, key, upload_id),
)
p.daemon = True
jobs.append(p)
p.start()
_wait_for_threads(jobs)
# S3 requires the etag list to be sorted
etaglist = sorted(etaglist, key=lambda k: k['PartNumber'])
# Validate we uploaded all parts
if len(etaglist) != total_parts:
LOG.debug('Uploaded {0} parts, but should have uploaded {1} parts.'
.format(len(etaglist), total_parts))
raise UploadError('An error occured while uploading Application Version. '
'Use the --debug option for more information if the problem persists.')
result = _make_api_call('complete_multipart_upload',
Bucket=bucket,
Key=key,
UploadId=upload_id,
MultipartUpload=dict(Parts=etaglist))
return result
except (Exception, KeyboardInterrupt) as e:
# We dont want to clean up multipart in case a user decides to
# continue later
raise
def _wait_for_threads(jobs):
alive = True
while alive:
alive = False
for j in jobs:
"""
We want to wait forever for the thread to finish.
j.join() however is a halting call. We need to pass in a
time to j.join() so it is non halting. This way a user can use
CTRL+C to terminate the command. 2**31 is the largest number we
can pass into j.join()
"""
try:
timeout = threading.TIMEOUT_MAX
except AttributeError: # Python 2
timeout = 2**16 # 18 hours should be sufficient.
j.join(timeout)
if j.isAlive():
alive = True
def _upload_chunk(f, lock, etaglist, total_parts, bucket, key, upload_id):
LOG.debug('Creating child thread')
while True:
data, part = _read_next_section_from_file(f, lock)
if not data:
LOG.debug('No data left, closing')
return
# First check to see if s3 already has part
for i in range(0, 5):
try:
etag = _get_part_etag(bucket, key, part, upload_id)
if etag is None:
b = BytesIO()
b.write(data)
b.seek(0)
response = _make_api_call('upload_part',
Bucket=bucket,
Key=key,
UploadId=upload_id,
Body=b,
PartNumber=part)
etag = response['ETag']
etaglist.append({'PartNumber': part, 'ETag': etag})
progress = (1/total_parts) * len(etaglist)
io.update_upload_progress(progress)
# No errors, break out of loop
break
except Exception as e:
# We want to swallow all exceptions or else they will be
# printed as a stack trace to the Console
# Exceptions are typically connections reset and
# Various things
LOG.debug('Exception raised: ' + str(e))
# Loop will cause a retry
def _get_part_etag(bucket, key, part, upload_id):
try:
response = _make_api_call('list_parts',
Bucket=bucket,
Key=key,
UploadId=upload_id)
except Exception as e:
# We want to swallow all exceptions or else they will be printed
# as a stack trace to the Console
LOG.debug('Exception raised: ' + str(e))
return None
if 'Parts' not in response:
return None
etag = next((i['ETag'] for i in response['Parts']
if i['PartNumber'] == part), None)
return etag
def _get_multipart_upload_id(bucket, key):
# Check to see if multipart already exists
response = _make_api_call('list_multipart_uploads',
Bucket=bucket,
Prefix=key)
try:
for r in response['Uploads']:
if r['Key'] == key:
return r['UploadId']
except KeyError:
pass # There are no uploads with that prefix
# Not found, lets initiate the upload
response = _make_api_call('create_multipart_upload',
Bucket=bucket,
Key=key)
return response['UploadId']
@static_var('part_num', 0)
def _read_next_section_from_file(f, lock):
try:
with lock:
data = f.read(CHUNK_SIZE)
_read_next_section_from_file.part_num += 1
return data, _read_next_section_from_file.part_num
except ValueError as e:
LOG.debug('Reading file raised error: ' + str(e))
return '', None # File was closed, Process was terminated
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""parameters.py -- This module contains the prospector base class for
models, ProspectorParams. This class is responsible for maintaining model
parameter state, converting between parameter dictionaries and vectors,
and computing parameter dependencies and prior probabilities.
"""
from copy import deepcopy
import warnings
import numpy as np
import json, pickle
from . import priors
from .templates import describe
__all__ = ["ProspectorParams"]
# A template for what parameter configuration list element should look like
param_template = {'name': '',
'N': 1,
'isfree': True,
'init': 0.5, 'units': '',
'prior': priors.TopHat(mini=0, maxi=1.0),
'depends_on': None}
class ProspectorParams(object):
"""
This is the base model class that holds model parameters and information
about them (e.g. priors, bounds, transforms, free vs fixed state). In
addition to the documented methods, it contains several important
attributes:
* :py:attr:`params`: model parameter state dictionary.
* :py:attr:`theta_index`: A dictionary that maps parameter names to indices (or rather
slices) of the parameter vector ``theta``.
* :py:attr:`config_dict`: Information about each parameter as a dictionary keyed by
parameter name for easy access.
* :py:attr:`config_list`: Information about each parameter stored as a list.
Intitialization is via, e.g.,
.. code-block:: python
model_dict = {"mass": {"N": 1, "isfree": False, "init": 1e10}}
model = ProspectorParams(model_dict, param_order=None)
:param configuration:
A list or dictionary of model parameters specifications.
"""
def __init__(self, configuration, verbose=True, param_order=None, **kwargs):
"""
:param configuration:
A list or dictionary of parameter specification dictionaries.
:param param_order: (optional, default: None)
If given and `configuration` is a dictionary, this will specify the
order in which the parameters appear in the theta vector. Iterable
of strings.
"""
self.init_config = deepcopy(configuration)
self.parameter_order = param_order
if type(configuration) == list:
self.config_list = configuration
self.config_dict = plist_to_pdict(self.config_list)
elif type(configuration) == dict:
self.config_dict = configuration
self.config_list = pdict_to_plist(self.config_dict, order=param_order)
else:
raise TypeError("Configuration variable not of valid type: "
"{}".format(type(configuration)))
self.configure(**kwargs)
self.verbose = verbose
def __repr__(self):
return ":::::::\n{}\n\n{}".format(self.__class__, self.description)
def configure(self, reset=False, **kwargs):
"""Use the :py:attr:`config_dict` to generate a :py:attr:`theta_index`
mapping, and propogate the initial parameters into the
:py:attr:`params` state dictionary, and store the intital theta vector
thus implied.
:param kwargs:
Keyword parameters can be used to override or add to the initial
parameter values specified in :py:attr:`config_dict`
:param reset: (default: False)
If true, empty the params dictionary before re-reading the
:py:attr:`config_dict`
"""
self._has_parameter_dependencies = False
if (not hasattr(self, 'params')) or reset:
self.params = {}
self.map_theta()
# Propogate initial parameter values from the configure dictionary
# Populate the 'prior' key of the configure dictionary
# Check for 'depends_on'
for par, info in list(self.config_dict.items()):
self.params[par] = np.atleast_1d(info['init']).copy()
try:
# this is for backwards compatibility
self.config_dict[par]['prior'] = info['prior_function']
except(KeyError):
pass
if info.get('depends_on', None) is not None:
assert callable(info["depends_on"])
self._has_parameter_dependencies = True
# propogate user supplied values to the params state, overriding the
# configure `init` values
for k, v in list(kwargs.items()):
self.params[k] = np.atleast_1d(v)
# store these initial values
self.initial_theta = self.theta.copy()
def map_theta(self):
"""Construct the mapping from parameter name to the index in the theta
vector corresponding to the first element of that parameter. Called
during configuration.
"""
self.theta_index = {}
count = 0
for par in self.free_params:
n = self.config_dict[par]['N']
self.theta_index[par] = slice(count, count + n)
count += n
good = len(self.config_dict[par]['prior']) == n
if not good:
msg = "{} has wrong length prior, should be {}"
warnings.warn(msg.format(par, n), RuntimeWarning)
self.ndim = count
def set_parameters(self, theta):
"""Propagate theta into the model parameters :py:attr:`params` dictionary.
:param theta:
A theta parameter vector containing the desired parameters. ndarray
of shape ``(ndim,)``
"""
assert len(theta) == self.ndim
for k, inds in list(self.theta_index.items()):
self.params[k] = np.atleast_1d(theta[inds]).copy()
self.propagate_parameter_dependencies()
def prior_product(self, theta, nested=False, **extras):
"""Public version of _prior_product to be overridden by subclasses.
:param theta:
The parameter vector for which you want to calculate the
prior. ndarray of shape ``(..., ndim)``
:param nested:
If using nested sampling, this will only return 0 (or -inf). This
behavior can be overridden if you want to include complicated
priors that are not included in the unit prior cube based proposals
(e.g. something that is difficult to transform from the unit cube.)
:returns lnp_prior:
The natural log of the prior probability at ``theta``
"""
lpp = self._prior_product(theta)
if nested & np.any(np.isfinite(lpp)):
return 0.0
return lpp
def _prior_product(self, theta, **extras):
"""Return a scalar which is the ln of the product of the prior
probabilities for each element of theta. Requires that the prior
functions are defined in the theta descriptor.
:param theta:
Iterable containing the free model parameter values. ndarray of
shape ``(ndim,)``
:returns lnp_prior:
The natural log of the product of the prior probabilities for these
parameter values.
"""
lnp_prior = 0
for k, inds in list(self.theta_index.items()):
func = self.config_dict[k]['prior']
this_prior = np.sum(func(theta[..., inds]), axis=-1)
lnp_prior += this_prior
return lnp_prior
def prior_transform(self, unit_coords):
"""Go from unit cube to parameter space, for nested sampling.
:param unit_coords:
Coordinates in the unit hyper-cube. ndarray of shape ``(ndim,)``.
:returns theta:
The parameter vector corresponding to the location in prior CDF
corresponding to ``unit_coords``. ndarray of shape ``(ndim,)``
"""
theta = np.zeros(len(unit_coords))
for k, inds in list(self.theta_index.items()):
func = self.config_dict[k]['prior'].unit_transform
theta[inds] = func(unit_coords[inds])
return theta
def propagate_parameter_dependencies(self):
"""Propogate any parameter dependecies. That is, for parameters whose
value depends on another parameter, calculate those values and store
them in the :py:attr:`self.params` dictionary.
"""
if self._has_parameter_dependencies is False:
return
for p, info in list(self.config_dict.items()):
if 'depends_on' in info:
value = info['depends_on'](**self.params)
self.params[p] = np.atleast_1d(value)
def rectify_theta(self, theta, epsilon=1e-10):
"""Replace zeros in a given theta vector with a small number epsilon.
"""
zero = (theta == 0)
theta[zero] = epsilon
return theta
@property
def theta(self):
"""The current value of the theta vector, pulled from the ``params``
state dictionary.
"""
theta = np.zeros(self.ndim)
for k, inds in list(self.theta_index.items()):
theta[inds] = self.params[k]
return theta
@property
def free_params(self):
"""A list of the names of the free model parameters.
"""
return [k['name'] for k in pdict_to_plist(self.config_list)
if k.get('isfree', False)]
@property
def fixed_params(self):
"""A list of the names fixed model parameters that are specified in the
``config_dict``.
"""
return [k['name'] for k in pdict_to_plist(self.config_list)
if (k.get('isfree', False) is False)]
@property
def description(self):
return describe(self.config_dict, current_params=self.params)
def theta_labels(self, name_map={}):
"""Using the theta_index parameter map, return a list of the model
parameter names that has the same order as the sampling chain array.
:param name_map:
A dictionary mapping model parameter names to output label
names.
:returns labels:
A list of labels of the same length and order as the theta
vector.
"""
label, index = [], []
for p, inds in list(self.theta_index.items()):
nt = inds.stop - inds.start
try:
name = name_map[p]
except(KeyError):
name = p
if nt == 1:
label.append(name)
index.append(inds.start)
else:
for i in range(nt):
label.append(name+'_{0}'.format(i+1))
index.append(inds.start+i)
return [l for (i, l) in sorted(zip(index, label))]
def theta_bounds(self):
"""Get the bounds on each parameter from the prior.
:returns bounds:
A list of length ``ndim`` of tuples ``(lo, hi)`` giving the
parameter bounds.
"""
bounds = np.zeros([self.ndim, 2])
for p, inds in list(self.theta_index.items()):
pb = self.config_dict[p]['prior'].bounds()
bounds[inds, :] = np.array(pb).T
# Force types ?
bounds = [(np.atleast_1d(a)[0], np.atleast_1d(b)[0])
for a, b in bounds]
return bounds
def theta_disps(self, default_disp=0.1, fractional_disp=False):
"""Get a vector of absolute dispersions for each parameter to use in
generating sampler balls for emcee's Ensemble sampler. This can be
overridden by subclasses if fractional dispersions are desired.
:param initial_disp: (default: 0.1)
The default dispersion to use in case the ``"init_disp"`` key is
not provided in the parameter configuration.
:param fractional_disp: (default: False)
Treat the dispersion values as fractional dispersions.
:returns disp:
The dispersion in the parameters to use for generating clouds of
walkers (or minimizers.) ndarray of shape ``(ndim,)``
"""
disp = np.zeros(self.ndim) + default_disp
for par, inds in list(self.theta_index.items()):
d = self.config_dict[par].get('init_disp', default_disp)
disp[inds] = d
if fractional_disp:
disp = self.theta * disp
return disp
def theta_disp_floor(self, thetas=None):
"""Get a vector of dispersions for each parameter to use as a floor for
the emcee walker-calculated dispersions. This can be overridden by
subclasses.
:returns disp_floor:
The minimum dispersion in the parameters to use for generating
clouds of walkers (or minimizers.) ndarray of shape ``(ndim,)``
"""
dfloor = np.zeros(self.ndim)
for par, inds in list(self.theta_index.items()):
d = self.config_dict[par].get('disp_floor', 0.0)
dfloor[inds] = d
return dfloor
def clip_to_bounds(self, thetas):
"""Clip a set of parameters theta to within the priors.
:param thetas:
The parameter vector, ndarray of shape ``(ndim,)``.
:returns thetas:
The input vector, clipped to the bounds of the priors.
"""
bounds = self.theta_bounds()
for i in range(len(bounds)):
lower, upper = bounds[i]
thetas[i] = np.clip(thetas[i], lower, upper)
return thetas
@property
def _config_dict(self):
"""Backwards compatibility
"""
return self.config_dict
def plist_to_pdict(inplist):
"""Convert from a parameter list to a parameter dictionary, where the keys
of the cdictionary are the parameter names.
"""
plist = deepcopy(inplist)
if type(plist) is dict:
return plist.copy()
pdict = {}
for p in plist:
name = p.pop('name')
pdict[name] = p
return pdict
def pdict_to_plist(pdict, order=None):
"""Convert from a dictionary of parameter dictionaries to a list of
parameter dictionaries, adding each key to each value dictionary as the
`name' keyword. Optionally, do this in an order specified by `order`. This
method is not used often, so it can be a bit inefficient
:param pdict:
A dictionary of parameter specification dictionaries, keyed by
parameter name. If a list is given instead of a dictionary, this same
list is returned.
:param order:
An iterable of parameter names specifying the order in which they
should be added to the parameter list
:returns plist:
A list of parameter specification dictinaries (with the `"name"` key
added.) The listed dictionaries are *not* copied from the input
dictionary.
"""
if type(pdict) is list:
return pdict[:]
plist = []
if order is not None:
assert len(order) == len(pdict)
else:
order = pdict.keys()
for k in order:
v = pdict[k]
v['name'] = k
plist += [v]
return plist
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides thread-pool-like functionality for workers accessing App Engine.
The pool adapts to slow or timing out requests by reducing the number of
active workers, or increasing the number when requests latency reduces.
"""
import logging
import Queue
import sys
import threading
import time
import traceback
from google.appengine.tools.requeue import ReQueue
logger = logging.getLogger('google.appengine.tools.adaptive_thread_pool')
_THREAD_SHOULD_EXIT = '_THREAD_SHOULD_EXIT'
INITIAL_BACKOFF = 1.0
BACKOFF_FACTOR = 2.0
class Error(Exception):
"""Base-class for exceptions in this module."""
class WorkItemError(Error):
"""Error while processing a WorkItem."""
class RetryException(Error):
"""A non-fatal exception that indicates that a work item should be retried."""
def InterruptibleSleep(sleep_time):
"""Puts thread to sleep, checking this threads exit_flag four times a second.
Args:
sleep_time: Time to sleep.
"""
slept = 0.0
epsilon = .0001
thread = threading.currentThread()
while slept < sleep_time - epsilon:
remaining = sleep_time - slept
this_sleep_time = min(remaining, 0.25)
time.sleep(this_sleep_time)
slept += this_sleep_time
if thread.exit_flag:
return
class WorkerThread(threading.Thread):
"""A WorkerThread to execute WorkItems.
Attributes:
exit_flag: A boolean indicating whether this thread should stop
its work and exit.
"""
def __init__(self, thread_pool, thread_gate, name=None):
"""Initialize a WorkerThread instance.
Args:
thread_pool: An AdaptiveThreadPool instance.
thread_gate: A ThreadGate instance.
name: A name for this WorkerThread.
"""
threading.Thread.__init__(self)
self.setDaemon(True)
self.exit_flag = False
self.__error = None
self.__traceback = None
self.__thread_pool = thread_pool
self.__work_queue = thread_pool.requeue
self.__thread_gate = thread_gate
if not name:
self.__name = 'Anonymous_' + self.__class__.__name__
else:
self.__name = name
def run(self):
"""Perform the work of the thread."""
logger.debug('[%s] %s: started', self.getName(), self.__class__.__name__)
try:
self.WorkOnItems()
except:
self.SetError()
logger.debug('[%s] %s: exiting', self.getName(), self.__class__.__name__)
def SetError(self):
"""Sets the error and traceback information for this thread.
This must be called from an exception handler.
"""
if not self.__error:
exc_info = sys.exc_info()
self.__error = exc_info[1]
self.__traceback = exc_info[2]
logger.exception('[%s] %s:', self.getName(), self.__class__.__name__)
def WorkOnItems(self):
"""Perform the work of a WorkerThread."""
while not self.exit_flag:
item = None
self.__thread_gate.StartWork()
try:
status, instruction = WorkItem.FAILURE, ThreadGate.DECREASE
try:
if self.exit_flag:
instruction = ThreadGate.HOLD
break
try:
item = self.__work_queue.get(block=True, timeout=1.0)
except Queue.Empty:
instruction = ThreadGate.HOLD
continue
if item == _THREAD_SHOULD_EXIT or self.exit_flag:
status, instruction = WorkItem.SUCCESS, ThreadGate.HOLD
break
logger.debug('[%s] Got work item %s', self.getName(), item)
status, instruction = item.PerformWork(self.__thread_pool)
except RetryException:
status, instruction = WorkItem.RETRY, ThreadGate.HOLD
except:
self.SetError()
raise
finally:
try:
if item:
if status == WorkItem.SUCCESS:
self.__work_queue.task_done()
elif status == WorkItem.RETRY:
try:
self.__work_queue.reput(item, block=False)
except Queue.Full:
logger.error('[%s] Failed to reput work item.', self.getName())
raise Error('Failed to reput work item')
else:
if not self.__error:
if item.error:
self.__error = item.error
self.__traceback = item.traceback
else:
self.__error = WorkItemError(
'Fatal error while processing %s' % item)
raise self.__error
finally:
self.__thread_gate.FinishWork(instruction=instruction)
def CheckError(self):
"""If an error is present, then log it."""
if self.__error:
logger.error('Error in %s: %s', self.getName(), self.__error)
if self.__traceback:
logger.debug('%s', ''.join(traceback.format_exception(
self.__error.__class__,
self.__error,
self.__traceback)))
def __str__(self):
return self.__name
class AdaptiveThreadPool(object):
"""A thread pool which processes WorkItems from a queue.
Attributes:
requeue: The requeue instance which holds work items for this
thread pool.
"""
def __init__(self,
num_threads,
queue_size=None,
base_thread_name=None,
worker_thread_factory=WorkerThread,
queue_factory=Queue.Queue):
"""Initialize an AdaptiveThreadPool.
An adaptive thread pool executes WorkItems using a number of
WorkerThreads. WorkItems represent items of work that may
succeed, soft fail, or hard fail. In addition, a completed work
item can signal this AdaptiveThreadPool to enable more or fewer
threads. Initially one thread is active. Soft failures are
reqeueud to be retried. Hard failures cause this
AdaptiveThreadPool to shut down entirely. See the WorkItem class
for more details.
Args:
num_threads: The number of threads to use.
queue_size: The size of the work item queue to use.
base_thread_name: A string from which worker thread names are derived.
worker_thread_factory: A factory which procudes WorkerThreads.
queue_factory: Used for dependency injection.
"""
if queue_size is None:
queue_size = num_threads
self.requeue = ReQueue(queue_size, queue_factory=queue_factory)
self.__thread_gate = ThreadGate(num_threads)
self.__num_threads = num_threads
self.__threads = []
for i in xrange(num_threads):
thread = worker_thread_factory(self, self.__thread_gate)
if base_thread_name:
base = base_thread_name
else:
base = thread.__class__.__name__
thread.name = '%s-%d' % (base, i)
self.__threads.append(thread)
thread.start()
def num_threads(self):
"""Return the number of threads in this thread pool."""
return self.__num_threads
def Threads(self):
"""Yields the registered threads."""
for thread in self.__threads:
yield thread
def SubmitItem(self, item, block=True, timeout=0.0):
"""Submit a WorkItem to the AdaptiveThreadPool.
Args:
item: A WorkItem instance.
block: Whether to block on submitting if the submit queue is full.
timeout: Time wait for room in the queue if block is True, 0.0 to
block indefinitely.
Raises:
Queue.Full if the submit queue is full.
"""
self.requeue.put(item, block=block, timeout=timeout)
def QueuedItemCount(self):
"""Returns the number of items currently in the queue."""
return self.requeue.qsize()
def Shutdown(self):
"""Shutdown the thread pool.
Tasks may remain unexecuted in the submit queue.
"""
while not self.requeue.empty():
try:
unused_item = self.requeue.get_nowait()
self.requeue.task_done()
except Queue.Empty:
pass
for thread in self.__threads:
thread.exit_flag = True
self.requeue.put(_THREAD_SHOULD_EXIT)
self.__thread_gate.EnableAllThreads()
def Wait(self):
"""Wait until all work items have been completed."""
self.requeue.join()
def JoinThreads(self):
"""Wait for all threads to exit."""
for thread in self.__threads:
logger.debug('Waiting for %s to exit' % str(thread))
thread.join()
def CheckErrors(self):
"""Output logs for any errors that occurred in the worker threads."""
for thread in self.__threads:
thread.CheckError()
class ThreadGate(object):
"""Manage the number of active worker threads.
The ThreadGate limits the number of threads that are simultaneously
active in order to implement adaptive rate control.
Initially the ThreadGate allows only one thread to be active. For
each successful work item, another thread is activated and for each
failed item, the number of active threads is reduced by one. When only
one thread is active, failures will cause exponential backoff.
For example, a ThreadGate instance, thread_gate can be used in a number
of threads as so:
# Block until this thread is enabled for work.
thread_gate.StartWork()
try:
status = DoSomeWorkInvolvingLimitedSharedResources()
suceeded = IsStatusGood(status)
badly_failed = IsStatusVeryBad(status)
finally:
if suceeded:
# Suceeded, add more simultaneously enabled threads to the task.
thread_gate.FinishWork(instruction=ThreadGate.INCREASE)
elif badly_failed:
# Failed, or succeeded but with high resource load, reduce number of
# workers.
thread_gate.FinishWork(instruction=ThreadGate.DECREASE)
else:
# We succeeded, but don't want to add more workers to the task.
thread_gate.FinishWork(instruction=ThreadGate.HOLD)
the thread_gate will enable and disable/backoff threads in response to
resource load conditions.
StartWork can block indefinitely. FinishWork, while not
lock-free, should never block absent a demonic scheduler.
"""
INCREASE = 'increase'
HOLD = 'hold'
DECREASE = 'decrease'
def __init__(self,
num_threads,
sleep=InterruptibleSleep):
"""Constructor for ThreadGate instances.
Args:
num_threads: The total number of threads using this gate.
sleep: Used for dependency injection.
"""
self.__enabled_count = 1
self.__lock = threading.Lock()
self.__thread_semaphore = threading.Semaphore(self.__enabled_count)
self.__num_threads = num_threads
self.__backoff_time = 0
self.__sleep = sleep
def num_threads(self):
return self.__num_threads
def EnableThread(self):
"""Enable one more worker thread."""
self.__lock.acquire()
try:
self.__enabled_count += 1
finally:
self.__lock.release()
self.__thread_semaphore.release()
def EnableAllThreads(self):
"""Enable all worker threads."""
for unused_idx in xrange(self.__num_threads - self.__enabled_count):
self.EnableThread()
def StartWork(self):
"""Starts a critical section in which the number of workers is limited.
Starts a critical section which allows self.__enabled_count
simultaneously operating threads. The critical section is ended by
calling self.FinishWork().
"""
self.__thread_semaphore.acquire()
if self.__backoff_time > 0.0:
if not threading.currentThread().exit_flag:
logger.info('[%s] Backing off due to errors: %.1f seconds',
threading.currentThread().getName(),
self.__backoff_time)
self.__sleep(self.__backoff_time)
def FinishWork(self, instruction=None):
"""Ends a critical section started with self.StartWork()."""
if not instruction or instruction == ThreadGate.HOLD:
self.__thread_semaphore.release()
elif instruction == ThreadGate.INCREASE:
if self.__backoff_time > 0.0:
logger.info('Resetting backoff to 0.0')
self.__backoff_time = 0.0
do_enable = False
self.__lock.acquire()
try:
if self.__num_threads > self.__enabled_count:
do_enable = True
self.__enabled_count += 1
finally:
self.__lock.release()
if do_enable:
logger.debug('Increasing active thread count to %d',
self.__enabled_count)
self.__thread_semaphore.release()
self.__thread_semaphore.release()
elif instruction == ThreadGate.DECREASE:
do_disable = False
self.__lock.acquire()
try:
if self.__enabled_count > 1:
do_disable = True
self.__enabled_count -= 1
else:
if self.__backoff_time == 0.0:
self.__backoff_time = INITIAL_BACKOFF
else:
self.__backoff_time *= BACKOFF_FACTOR
finally:
self.__lock.release()
if do_disable:
logger.debug('Decreasing the number of active threads to %d',
self.__enabled_count)
else:
self.__thread_semaphore.release()
class WorkItem(object):
"""Holds a unit of work."""
SUCCESS = 'success'
RETRY = 'retry'
FAILURE = 'failure'
def __init__(self, name):
self.__name = name
def PerformWork(self, thread_pool):
"""Perform the work of this work item and report the results.
Args:
thread_pool: The AdaptiveThreadPool instance associated with this
thread.
Returns:
A tuple (status, instruction) of the work status and an instruction
for the ThreadGate.
"""
raise NotImplementedError
def __str__(self):
return self.__name
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class SegmentReductionHelper(tf.test.TestCase):
def _input(self, input_shape, dtype=tf.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
return tf.constant(values, shape=input_shape,
dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_out_rows=None):
if not x.size: return np.array([])
indices = np.asarray(indices)
if num_out_rows is None:
num_out_rows = indices[-1] + 1
output = [None] * num_out_rows
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
output = [o if o is not None else np.zeros(slice_shape) for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _assertAllClose(self, indices, np_x, tf_x):
for i in set(np.asarray(indices).ravel()):
self.assertAllClose(np_x[i], tf_x[i])
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [tf.float32,
tf.float64,
tf.int64,
tf.int32]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, tf.segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
tf.segment_mean),
(np.ndarray.__mul__, None, tf.segment_prod),
(np.minimum, None, tf.segment_min),
(np.maximum, None, tf.segment_max)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
with self.test_session(use_gpu=False):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self._assertAllClose(indices, np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = tf.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
tf.segment_sum(data=tf_x, segment_ids=indices)
def testSegmentIdsSize(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1]
s = tf.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
s.eval()
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [tf.segment_sum,
tf.segment_mean,
tf.segment_min,
tf.segment_max]:
with self.test_session():
tf_x, np_x = self._input(shape, dtype=tf.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = tf.test.compute_gradient(
tf_x,
shape,
s,
[3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
class UnsortedSegmentSumTest(SegmentReductionHelper):
def testValues(self):
dtypes = [tf.float32,
tf.float64,
tf.int64,
tf.int32]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.test_session(use_gpu=False):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(indices,
np_x,
np.add,
op2=None,
num_out_rows=num_segments)
s = tf.unsorted_segment_sum(data=tf_x,
segment_ids=indices,
num_segments=num_segments)
tf_ans = s.eval()
self._assertAllClose(indices, np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testGradient(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
with self.test_session():
tf_x, np_x = self._input(shape, dtype=tf.float64)
s = tf.unsorted_segment_sum(data=tf_x,
segment_ids=indices,
num_segments=num_segments)
jacob_t, jacob_n = tf.test.compute_gradient(
tf_x,
shape,
s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
with self.test_session():
tf_x, np_x = self._input(shape, dtype=tf.float64)
# Results from UnsortedSegmentSum
unsorted_s = tf.unsorted_segment_sum(data=tf_x,
segment_ids=indices,
num_segments=num_segments)
(unsorted_jacob_t, unsorted_jacob_n) = tf.test.compute_gradient(
tf_x,
shape,
unsorted_s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
# Results from SegmentSum
sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = tf.test.compute_gradient(
tf_x,
shape,
sorted_s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices,
dtype=tf.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape,
dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (tf.constant(indices, dtype=tf.int32),
indices, a, b)
def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None):
return self._segmentReduce(segment_indices, x[indices], op1, op2)
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValues(self):
dtypes = [tf.float32,
tf.float64,
tf.int64,
tf.int32]
mean_dtypes = [tf.float32,
tf.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, tf.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
tf.sparse_segment_mean)]
n = 400
shape = [n, 2]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
with self.test_session(use_gpu=False):
tf_indices, np_indices, tf_x, np_x = self._sparse_input(shape,
num_indices,
dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if tf_op == tf.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self._assertAllClose(segment_indices, np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [tf.sparse_segment_sum,
tf.sparse_segment_mean]:
with self.test_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=tf.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = tf.test.compute_gradient(
tf_x,
shape,
s,
[3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
tf.test.main()
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Functions for type handling and type conversion (Blink/C++ <-> Dart:HTML).
Extends IdlType and IdlUnionType with C++-specific properties, methods, and
class methods.
Spec:
http://www.w3.org/TR/WebIDL/#es-type-mapping
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import posixpath
from idl_types import IdlTypeBase, IdlType, IdlUnionType, TYPE_NAMES, IdlArrayOrSequenceType
import dart_attributes
from dart_utilities import DartUtilities
from v8_globals import includes
################################################################################
# CPP -specific handling of IDL types for Dart:Blink
################################################################################
NON_WRAPPER_TYPES = frozenset([
'CompareHow',
'DartValue',
'EventHandler',
'EventListener',
'MediaQueryListListener',
'NodeFilter',
])
TYPED_ARRAYS = {
# (cpp_type, dart_type), used by constructor templates
'ArrayBuffer': (None, 'ByteBuffer'),
'ArrayBufferView': (None, 'ByteData'),
'Float32Array': ('float', 'Float32List'),
'Float64Array': ('double', 'Float64List'),
'Int8Array': ('signed char', 'Int8List'),
'Int16Array': ('short', 'Int16List'),
'Int32Array': ('int', 'Int32List'),
'Uint8Array': ('unsigned char', 'Uint8List'),
'Uint8ClampedArray': ('unsigned char', 'Uint8ClampedList'),
'Uint16Array': ('unsigned short', 'Uint16List'),
'Uint32Array': ('unsigned int', 'Uint32List'),
}
IdlTypeBase.is_typed_array_type = property(
lambda self: self.base_type in TYPED_ARRAYS)
IdlType.is_wrapper_type = property(
lambda self: (self.is_interface_type and
self.base_type not in NON_WRAPPER_TYPES))
################################################################################
# C++ types
################################################################################
CPP_TYPE_SAME_AS_IDL_TYPE = set([
'double',
'float',
'long long',
'unsigned long long',
])
CPP_INT_TYPES = set([
'byte',
'long',
'short',
])
CPP_UNSIGNED_TYPES = set([
'octet',
'unsigned int',
'unsigned long',
'unsigned short',
])
CPP_SPECIAL_CONVERSION_RULES = {
'CompareHow': 'Range::CompareHow',
'Date': 'double',
'EventHandler': 'EventListener*',
'MediaQueryListListener': 'RefPtrWillBeRawPtr<MediaQueryListListener>',
'Promise': 'ScriptPromise',
# FIXME: Eliminate custom bindings for XPathNSResolver http://crbug.com/345529
'XPathNSResolver': 'RefPtrWillBeRawPtr<XPathNSResolver>',
'boolean': 'bool',
'unrestricted double': 'double',
'unrestricted float': 'float',
'Rect': 'Rect', # Pass Rect by value, not pointer.
}
def cpp_type(idl_type, extended_attributes=None, raw_type=False, used_as_rvalue_type=False, used_as_variadic_argument=False, used_in_cpp_sequence=False):
"""Returns C++ type corresponding to IDL type.
|idl_type| argument is of type IdlType, while return value is a string
Args:
idl_type:
IdlType
raw_type:
bool, True if idl_type's raw/primitive C++ type should be returned.
used_as_rvalue_type:
bool, True if the C++ type is used as an argument or the return
type of a method.
used_as_variadic_argument:
bool, True if the C++ type is used as a variadic argument of a method.
used_in_cpp_sequence:
bool, True if the C++ type is used as an element of a container.
Containers can be an array, a sequence or a dictionary.
"""
extended_attributes = extended_attributes or {}
idl_type = idl_type.preprocessed_type
# Composite types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return cpp_template_type('Vector', native_array_element_type.cpp_type_args(used_in_cpp_sequence=True))
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in CPP_TYPE_SAME_AS_IDL_TYPE:
return base_idl_type
if base_idl_type in CPP_INT_TYPES:
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES:
return 'unsigned'
if base_idl_type in CPP_SPECIAL_CONVERSION_RULES:
return CPP_SPECIAL_CONVERSION_RULES[base_idl_type]
if base_idl_type in NON_WRAPPER_TYPES:
return ('PassRefPtr<%s>' if used_as_rvalue_type else 'RefPtr<%s>') % base_idl_type
if base_idl_type in ('DOMString', 'ByteString', 'ScalarValueString'):
return 'String'
if idl_type.is_typed_array_type and raw_type:
return 'RefPtr<%s>' % base_idl_type
if idl_type.is_callback_interface:
return 'OwnPtr<%s>' % base_idl_type
if idl_type.is_interface_type:
implemented_as_class = idl_type.implemented_as
if raw_type:
return implemented_as_class + '*'
new_type = 'Member' if used_in_cpp_sequence else 'RawPtr'
ptr_type = 'PassRefPtr' if used_as_rvalue_type else 'RefPtr'
return cpp_template_type(ptr_type, implemented_as_class)
# Default, assume native type is a pointer with same type name as idl type
return base_idl_type + '*'
def cpp_type_union(idl_type, extended_attributes=None, used_as_rvalue_type=False, will_be_in_heap_object=False):
return (member_type.cpp_type for member_type in idl_type.member_types)
# Allow access as idl_type.cpp_type if no arguments
IdlTypeBase.cpp_type = property(cpp_type)
IdlTypeBase.cpp_type_args = cpp_type
IdlUnionType.cpp_type = property(cpp_type_union)
IdlUnionType.cpp_type_args = cpp_type_union
IdlTypeBase.native_array_element_type = None
IdlArrayOrSequenceType.native_array_element_type = property(
lambda self: self.element_type)
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type, with space added if needed."""
if inner_type.endswith('>'):
format_string = '{template}<{inner_type} >'
else:
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type)
def dart_type(interface_name):
return 'Dart' + str(interface_name)
# [ImplementedAs]
# This handles [ImplementedAs] on interface types, not [ImplementedAs] in the
# interface being generated. e.g., given:
# Foo.idl: interface Foo {attribute Bar bar};
# Bar.idl: [ImplementedAs=Zork] interface Bar {};
# when generating bindings for Foo, the [ImplementedAs] on Bar is needed.
# This data is external to Foo.idl, and hence computed as global information in
# compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces.
IdlType.implemented_as_interfaces = {}
def implemented_as(idl_type):
base_idl_type = idl_type.base_type
if base_idl_type in IdlType.implemented_as_interfaces:
return IdlType.implemented_as_interfaces[base_idl_type]
return base_idl_type
IdlType.implemented_as = property(implemented_as)
IdlType.set_implemented_as_interfaces = classmethod(
lambda cls, new_implemented_as_interfaces:
cls.implemented_as_interfaces.update(new_implemented_as_interfaces))
################################################################################
# Includes
################################################################################
def includes_for_cpp_class(class_name, relative_dir_posix):
return set([posixpath.join('bindings', relative_dir_posix, class_name + '.h')])
# TODO(terry): Will we need this group header for dart:blink?
INCLUDES_FOR_TYPE = {
'object': set(),
'CompareHow': set(),
'EventHandler': set(),
'EventListener': set(),
'MediaQueryListListener': set(['sky/engine/core/css/MediaQueryListListener.h']),
'NodeList': set(['sky/engine/core/dom/NodeList.h',
'sky/engine/core/dom/StaticNodeList.h']),
'DartValue': set(['sky/engine/tonic/dart_value.h']),
}
def includes_for_type(idl_type):
idl_type = idl_type.preprocessed_type
# Composite types
if idl_type.native_array_element_type:
return includes_for_type(idl_type)
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in INCLUDES_FOR_TYPE:
return INCLUDES_FOR_TYPE[base_idl_type]
if idl_type.is_basic_type:
return set()
if idl_type.is_typed_array_type:
# Typed array factory methods are already provided by DartUtilities.h.
return set([])
if base_idl_type.endswith('ConstructorConstructor'):
# FIXME: rename to NamedConstructor
# FIXME: replace with a [NamedConstructorAttribute] extended attribute
# Ending with 'ConstructorConstructor' indicates a named constructor,
# and these do not have header files, as they are part of the generated
# bindings for the interface
return set()
if base_idl_type.endswith('Constructor'):
# FIXME: replace with a [ConstructorAttribute] extended attribute
base_idl_type = idl_type.constructor_type_name
if base_idl_type not in component_dir:
return set()
return set(['gen/sky/bindings/Dart%s.h' % base_idl_type])
IdlType.includes_for_type = property(includes_for_type)
IdlUnionType.includes_for_type = property(
lambda self: set.union(*[includes_for_type(member_type)
for member_type in self.member_types]))
def add_includes_for_type(idl_type):
includes.update(idl_type.includes_for_type)
IdlTypeBase.add_includes_for_type = add_includes_for_type
IdlUnionType.add_includes_for_type = add_includes_for_type
def includes_for_interface(interface_name):
return IdlType(interface_name).includes_for_type
def add_includes_for_interface(interface_name):
includes.update(includes_for_interface(interface_name))
component_dir = {}
def set_component_dirs(new_component_dirs):
component_dir.update(new_component_dirs)
################################################################################
# Dart -> C++
################################################################################
# TODO(terry): Need to fix to handle getter/setters for onEvent.
DART_FIX_ME = 'DART_UNIMPLEMENTED(/* Conversion unimplemented*/);'
# For a given IDL type, the DartHandle to C++ conversion.
DART_TO_CPP_VALUE = {
# Basic
'Date': 'DartUtilities::dartToDate(args, {index}, exception)',
'DOMString': 'DartConverter<String>::FromArguments{null_check}(args, {index}, exception, {auto_scope})',
'ByteString': 'DartUtilities::dartToByteString{null_check}(args, {index}, exception, {auto_scope})',
'ScalarValueString': 'DartUtilities::dartToScalarValueString{null_check}(args, {index}, exception, {auto_scope})',
'boolean': 'DartConverter<bool>::FromArguments(args, {index}, exception)',
'float': 'static_cast<float>(DartConverter<double>::FromArguments(args, {index}, exception))',
'unrestricted float': 'static_cast<float>(DartConverter<double>::FromArguments(args, {index}, exception))',
'double': 'DartConverter<double>::FromArguments(args, {index}, exception)',
'unrestricted double': 'DartConverter<double>::FromArguments(args, {index}, exception)',
# FIXME(vsm): Inconsistent with V8.
'byte': 'DartConverter<unsigned>::FromArguments(args, {index}, exception)',
'octet': 'DartConverter<unsigned>::FromArguments(args, {index}, exception)',
'short': 'DartConverter<int>::FromArguments(args, {index}, exception)',
'unsigned short': 'DartConverter<unsigned>::FromArguments(args, {index}, exception)',
'long': 'DartConverter<int>::FromArguments(args, {index}, exception)',
'unsigned long': 'DartConverter<unsigned>::FromArguments(args, {index}, exception)',
'long long': 'DartConverter<long long>::FromArguments(args, {index}, exception)',
'unsigned long long': 'DartConverter<unsigned long long>::FromArguments(args, {index}, exception)',
# Interface types
'CompareHow': 'static_cast<Range::CompareHow>(0) /* FIXME, DART_TO_CPP_VALUE[CompareHow] */',
'EventTarget': '0 /* FIXME, DART_TO_CPP_VALUE[EventTarget] */',
'MediaQueryListListener': 'nullptr /* FIXME, DART_TO_CPP_VALUE[MediaQueryListener] */',
'NodeFilter': 'nullptr /* FIXME, DART_TO_CPP_VALUE[NodeFilter] */',
'Promise': 'DartUtilities::dartToScriptPromise{null_check}(args, {index})',
'DartValue': 'DartConverter<DartValue*>::FromArguments(args, {index}, exception)',
# FIXME(vsm): Why don't we have an entry for Window? V8 does.
# I think I removed this as the Window object is more special in V8 - it's the
# global context as well. Do we need to special case it?
'XPathNSResolver': 'nullptr /* FIXME, DART_TO_CPP_VALUE[XPathNSResolver] */',
# FIXME(vsm): This is an enum type (defined in StorageQuota.idl).
# We should handle it automatically, but map to a String for now.
'StorageType': 'DartUtilities::dartToString(args, {index}, exception, {auto_scope})',
'Rect': 'DartConverter<{implemented_as}>::FromArguments{null_check}(args, {index}, exception)',
}
def dart_value_to_cpp_value(idl_type, extended_attributes, variable_name,
null_check, has_type_checking_interface,
index, auto_scope=True):
# Composite types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return dart_value_to_cpp_value_array_or_sequence(native_array_element_type, variable_name, index)
# Simple types
idl_type = idl_type.preprocessed_type
add_includes_for_type(idl_type)
base_idl_type = idl_type.base_type
if 'EnforceRange' in extended_attributes:
arguments = ', '.join([variable_name, 'EnforceRange', 'exceptionState'])
elif idl_type.is_integer_type: # NormalConversion
arguments = ', '.join([variable_name, 'es'])
else:
arguments = variable_name
if base_idl_type in DART_TO_CPP_VALUE:
cpp_expression_format = DART_TO_CPP_VALUE[base_idl_type]
elif idl_type.is_typed_array_type:
# FIXME(vsm): V8 generates a type check here as well. Do we need one?
# FIXME(vsm): When do we call the externalized version? E.g., see
# bindings/dart/custom/DartWaveShaperNodeCustom.cpp - it calls
# DartUtilities::dartToExternalizedArrayBufferView instead.
# V8 always converts null here
cpp_expression_format = ('DartUtilities::dartTo{idl_type}WithNullCheck(args, {index}, exception)')
elif idl_type.is_callback_interface:
cpp_expression_format = ('Dart{idl_type}::create{null_check}(args, {index}, exception)')
else:
cpp_expression_format = ('DartConverter<{implemented_as}*>::FromArguments{null_check}(args, {index}, exception)')
# We allow the calling context to force a null check to handle
# some cases that require calling context info. V8 handles all
# of this differently, and we may wish to reconsider this approach
check_string = ''
if null_check or allow_null(idl_type, extended_attributes,
has_type_checking_interface):
check_string = 'WithNullCheck'
elif allow_empty(idl_type, extended_attributes):
check_string = 'WithEmptyCheck'
return cpp_expression_format.format(null_check=check_string,
arguments=arguments,
index=index,
idl_type=base_idl_type,
implemented_as=idl_type.implemented_as,
auto_scope=DartUtilities.bool_to_cpp(auto_scope))
def dart_value_to_cpp_value_array_or_sequence(native_array_element_type, variable_name, index):
# Index is None for setters, index (starting at 0) for method arguments,
# and is used to provide a human-readable exception message
if index is None:
index = 0 # special case, meaning "setter"
this_cpp_type = native_array_element_type.cpp_type
expression_format = '{variable_name} = DartConverter<Vector<{cpp_type}>>::FromArguments(args, {index}, exception)'
expression = expression_format.format(native_array_element_type=native_array_element_type.name,
cpp_type=this_cpp_type, index=index,
variable_name=variable_name)
return expression
def dart_value_to_local_cpp_value(idl_type, extended_attributes, variable_name,
null_check, has_type_checking_interface,
index=None, auto_scope=True):
"""Returns an expression that converts a Dart value to a C++ value as a local value."""
idl_type = idl_type.preprocessed_type
cpp_value = dart_value_to_cpp_value(
idl_type, extended_attributes, variable_name,
null_check, has_type_checking_interface,
index, auto_scope)
return cpp_value
IdlTypeBase.dart_value_to_local_cpp_value = dart_value_to_local_cpp_value
#IdlUnionType.dart_value_to_local_cpp_value = dart_value_to_local_cpp_value
# Insure that we don't use C++ reserved names. Today on default is a problem.
def check_reserved_name(name):
return 'default_value' if (name == 'default') else name
################################################################################
# C++ -> V8
################################################################################
def preprocess_idl_type(idl_type):
if idl_type.is_enum:
# Enumerations are internally DOMStrings
return IdlType('DOMString')
if (idl_type.name == 'Any' or idl_type.is_callback_function):
return IdlType('DartValue')
return idl_type
IdlTypeBase.preprocessed_type = property(preprocess_idl_type)
IdlUnionType.preprocessed_type = property(preprocess_idl_type)
def preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes):
"""Returns IDL type and value, with preliminary type conversions applied."""
idl_type = idl_type.preprocessed_type
if idl_type.name == 'Promise':
idl_type = IdlType('ScriptPromise')
# FIXME(vsm): V8 maps 'long long' and 'unsigned long long' to double
# as they are not representable in ECMAScript. Should we do the same?
# HTML5 says that unsigned reflected attributes should be in the range
# [0, 2^31). When a value isn't in this range, a default value (or 0)
# should be returned instead.
extended_attributes = extended_attributes or {}
if ('Reflect' in extended_attributes and
idl_type.base_type in ['unsigned long', 'unsigned short']):
cpp_value = cpp_value.replace('getUnsignedIntegralAttribute',
'getIntegralAttribute')
cpp_value = 'std::max(0, %s)' % cpp_value
return idl_type, cpp_value
IDL_TO_DART_TYPE = {
'DOMString': 'String',
'DartValue': 'dynamic',
'boolean': 'bool',
'void': 'void',
'unsigned long': 'int',
}
def idl_type_to_dart_type(idl_type):
preprocessed_type = str(idl_type.preprocessed_type)
dart_type = IDL_TO_DART_TYPE.get(preprocessed_type)
if dart_type:
return dart_type
if idl_type.is_integer_type:
return 'int'
if idl_type.is_numeric_type:
return 'double'
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return 'List<%s>' % idl_type_to_dart_type(native_array_element_type)
assert preprocessed_type
assert idl_type.is_interface_type, "Missing dart type mapping for '%s'" % preprocessed_type
return preprocessed_type
DART_DEFAULT_VALUES_BY_TYPE = {
'String': '""',
'bool': 'false',
'double': '0.0',
'dynamic': 'null',
'int': '0',
}
def dart_default_value(dart_type, argument=None):
# TODO(eseidel): Maybe take the idl_type instead?
# if argument.default_value:
# return argument.default_value
default_value = DART_DEFAULT_VALUES_BY_TYPE.get(dart_type)
if default_value:
return default_value
idl_type = argument.idl_type
if idl_type.is_interface_type:
return 'null'
if idl_type.native_array_element_type:
return 'null'
assert default_value, "Missing default value mapping for '%s'" % dart_type
def dart_conversion_type(idl_type, extended_attributes):
"""Returns Dart conversion type, adding any additional includes.
The Dart conversion type is used to select the C++ -> Dart conversion function
or setDart*ReturnValue function; it can be an idl_type, a cpp_type, or a
separate name for the type of conversion (e.g., 'DOMWrapper').
"""
extended_attributes = extended_attributes or {}
# Composite types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
if native_array_element_type.is_interface_type:
add_includes_for_type(native_array_element_type)
return 'array'
# Simple types
base_idl_type = idl_type.base_type
# Basic types, without additional includes
if base_idl_type in CPP_INT_TYPES or base_idl_type == 'long long':
return 'int'
if base_idl_type in CPP_UNSIGNED_TYPES or base_idl_type == 'unsigned long long':
return 'unsigned'
if idl_type.is_string_type:
if idl_type.is_nullable:
return 'StringOrNull'
if 'TreatReturnedNullStringAs' not in extended_attributes:
return 'DOMString'
treat_returned_null_string_as = extended_attributes['TreatReturnedNullStringAs']
if treat_returned_null_string_as == 'Null':
return 'StringOrNull'
if treat_returned_null_string_as == 'Undefined':
return 'StringOrUndefined'
raise 'Unrecognized TreatReturnNullStringAs value: "%s"' % treat_returned_null_string_as
if idl_type.is_basic_type or base_idl_type == 'DartValue':
return base_idl_type
# Data type with potential additional includes
add_includes_for_type(idl_type)
if base_idl_type in DART_SET_RETURN_VALUE: # Special dartSetReturnValue treatment
return base_idl_type
# Typed arrays don't have special Dart* classes for Dart.
if idl_type.is_typed_array_type:
if base_idl_type == 'ArrayBuffer':
return 'ArrayBuffer'
else:
return 'TypedList'
# Pointer type
return 'DOMWrapper'
IdlTypeBase.dart_conversion_type = dart_conversion_type
DART_SET_RETURN_VALUE = {
'boolean': 'DartConverter<bool>::SetReturnValue(args, {cpp_value})',
'int': 'DartConverter<int>::SetReturnValue(args, {cpp_value})',
'unsigned': 'DartConverter<unsigned>::SetReturnValue(args, {cpp_value})',
'DOMString': 'DartConverter<String>::SetReturnValue(args, {cpp_value}, {auto_scope})',
# FIXME(terry): Need to handle checking to byte values > 255 throwing exception.
'ByteString': 'DartUtilities::setDartByteStringReturnValue(args, {cpp_value}, {auto_scope})',
# FIXME(terry): Need to make valid unicode; match UTF-16 to U+FFFD REPLACEMENT CHARACTER.
'ScalarValueString': 'DartUtilities::setDartScalarValueStringReturnValue(args, {cpp_value}, {auto_scope})',
# [TreatNullReturnValueAs]
'StringOrNull': 'DartConverter<String>::SetReturnValueWithNullCheck(args, {cpp_value}, {auto_scope})',
# FIXME(vsm): How should we handle undefined?
'StringOrUndefined': 'DartConverter<String>::SetReturnValueWithNullCheck(args, {cpp_value}, {auto_scope})',
'void': '',
# We specialize these as well in Dart.
'float': 'DartConverter<double>::SetReturnValue(args, {cpp_value})',
'unrestricted float': 'DartConverter<double>::SetReturnValue(args, {cpp_value})',
'double': 'DartConverter<double>::SetReturnValue(args, {cpp_value})',
'unrestricted double': 'DartConverter<double>::SetReturnValue(args, {cpp_value})',
# No special function, but instead convert value to Dart_Handle
# and then use general Dart_SetReturnValue.
'array': 'Dart_SetReturnValue(args, {cpp_value})',
'Date': 'Dart_SetReturnValue(args, {cpp_value})',
'EventHandler': DART_FIX_ME,
'ScriptPromise': 'Dart_SetReturnValue(args, {cpp_value})',
'DartValue': 'DartConverter<DartValue*>::SetReturnValue(args, {cpp_value})',
# DOMWrapper
# TODO(terry): Remove ForMainWorld stuff.
'DOMWrapperForMainWorld': DART_FIX_ME,
# FIXME(vsm): V8 has a fast path. Do we?
'DOMWrapperFast': 'DartConverter<{implemented_as}*>::SetReturnValue(args, WTF::getPtr({cpp_value}), {auto_scope})',
'DOMWrapperDefault': 'DartConverter<{implemented_as}*>::SetReturnValue(args, WTF::getPtr({cpp_value}), {auto_scope})',
# Typed arrays don't have special Dart* classes for Dart.
'ArrayBuffer': 'Dart_SetReturnValue(args, DartUtilities::arrayBufferToDart({cpp_value}))',
'TypedList': 'Dart_SetReturnValue(args, DartUtilities::arrayBufferViewToDart({cpp_value}))',
}
def dart_set_return_value(idl_type, cpp_value,
extended_attributes=None, script_wrappable='',
release=False, for_main_world=False,
auto_scope=True):
"""Returns a statement that converts a C++ value to a Dart value and sets it as a return value.
"""
def dom_wrapper_conversion_type():
if not script_wrappable:
return 'DOMWrapperDefault'
if for_main_world:
return 'DOMWrapperForMainWorld'
return 'DOMWrapperFast'
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_dart_conversion_type = idl_type.dart_conversion_type(extended_attributes)
# SetReturn-specific overrides
if this_dart_conversion_type in ['Date', 'EventHandler', 'ScriptPromise', 'SerializedScriptValue', 'array']:
# Convert value to Dart and then use general Dart_SetReturnValue
# FIXME(vsm): Why do we differ from V8 here? It doesn't have a
# creation_context.
creation_context = ''
if this_dart_conversion_type == 'array':
# FIXME: This is not right if the base type is a primitive, DOMString, etc.
# What is the right check for base type?
base_type = str(idl_type.element_type)
if base_type not in DART_TO_CPP_VALUE:
if base_type == 'None':
raise Exception('Unknown base type for ' + str(idl_type))
creation_context = '<Dart%s>' % base_type
if idl_type.is_nullable:
creation_context = 'Nullable' + creation_context
cpp_value = idl_type.cpp_value_to_dart_value(cpp_value, creation_context=creation_context,
extended_attributes=extended_attributes)
if this_dart_conversion_type == 'DOMWrapper':
this_dart_conversion_type = dom_wrapper_conversion_type()
format_string = DART_SET_RETURN_VALUE[this_dart_conversion_type]
if release:
cpp_value = '%s.release()' % cpp_value
statement = format_string.format(cpp_value=cpp_value,
implemented_as=idl_type.implemented_as,
type_name=idl_type.name,
script_wrappable=script_wrappable,
auto_scope=DartUtilities.bool_to_cpp(auto_scope))
return statement
def dart_set_return_value_union(idl_type, cpp_value, extended_attributes=None,
script_wrappable='', release=False, for_main_world=False,
auto_scope=True):
"""
release: can be either False (False for all member types) or
a sequence (list or tuple) of booleans (if specified individually).
"""
return [
# FIXME(vsm): Why do we use 'result' instead of cpp_value as V8?
member_type.dart_set_return_value('result' + str(i),
extended_attributes,
script_wrappable,
release and release[i],
for_main_world,
auto_scope)
for i, member_type in
enumerate(idl_type.member_types)]
IdlTypeBase.dart_set_return_value = dart_set_return_value
IdlUnionType.dart_set_return_value = dart_set_return_value_union
IdlType.release = property(lambda self: self.is_interface_type)
IdlUnionType.release = property(
lambda self: [member_type.is_interface_type
for member_type in self.member_types])
CPP_VALUE_TO_DART_VALUE = {
# Built-in types
# FIXME(vsm): V8 uses DateOrNull - do we need a null check?
'Date': 'DartUtilities::dateToDart({cpp_value})',
'DOMString': 'DartConverter<String>::ToDart(DartState::Current(), {cpp_value})',
'boolean': 'DartConverter<bool>::ToDart({cpp_value})',
'int': 'DartConverter<int>::ToDart({cpp_value})',
'unsigned': 'DartConverter<unsigned>::ToDart({cpp_value})',
'float': 'DartConverter<double>::ToDart({cpp_value})',
'unrestricted float': 'DartConverter<double>::ToDart({cpp_value})',
'double': 'DartConverter<double>::ToDart({cpp_value})',
'unrestricted double': 'DartConverter<double>::ToDart({cpp_value})',
# FIXME(vsm): Dart_Null?
'void': '',
# Special cases
'EventHandler': '-----OOPS TO DART-EVENT---',
# We need to generate the NullCheck version in some cases.
'ScriptPromise': 'DartUtilities::scriptPromiseToDart({cpp_value})',
'DartValue': 'DartConverter<DartValue*>::ToDart({cpp_value})',
# General
'array': 'VectorToDart({cpp_value})',
'DOMWrapper': 'DartConverter<{idl_type}*>::ToDart({cpp_value})',
}
def cpp_value_to_dart_value(idl_type, cpp_value, creation_context='', extended_attributes=None):
"""Returns an expression that converts a C++ value to a Dart value."""
# the isolate parameter is needed for callback interfaces
idl_type, cpp_value = preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes)
this_dart_conversion_type = idl_type.dart_conversion_type(extended_attributes)
format_string = CPP_VALUE_TO_DART_VALUE[this_dart_conversion_type]
statement = format_string.format(cpp_value=cpp_value, idl_type=idl_type.base_type)
return statement
IdlTypeBase.cpp_value_to_dart_value = cpp_value_to_dart_value
# FIXME(leafp) This is horrible, we should do better, but currently this is hard to do
# in a nice way. Best solution might be to extend DartStringAdapter to accomodate
# initialization from constant strings, but better to do that once we're stable
# on the bots so we can track any performance regression
CPP_LITERAL_TO_DART_VALUE = {
'DOMString': {'nullptr': 'String()',
'String("")': 'String(StringImpl::empty())',
'*': 'DartUtilities::dartToString(DartUtilities::stringToDart({cpp_literal}), exception)'},
'ScalarValueString': {'nullptr': 'DartStringAdapter(DartStringPeer::nullString())',
'String("")': 'DartStringAdapter(DartStringPeer::emptyString())',
'*': 'DartUtilities::dartToScalarValueString(DartUtilities::stringToDart({cpp_literal}), exception)'},
}
def literal_cpp_value(idl_type, idl_literal):
"""Converts an expression that is a valid C++ literal for this type."""
# FIXME: add validation that idl_type and idl_literal are compatible
literal_value = str(idl_literal)
base_type = idl_type.preprocessed_type.base_type
if base_type in CPP_UNSIGNED_TYPES:
return literal_value + 'u'
if base_type in CPP_LITERAL_TO_DART_VALUE:
if literal_value in CPP_LITERAL_TO_DART_VALUE[base_type]:
format_string = CPP_LITERAL_TO_DART_VALUE[base_type][literal_value]
else:
format_string = CPP_LITERAL_TO_DART_VALUE[base_type]['*']
return format_string.format(cpp_literal=literal_value)
return literal_value
IdlType.literal_cpp_value = literal_cpp_value
CPP_DEFAULT_VALUE_FOR_CPP_TYPE = {
'DOMString': 'String()',
'ByteString': 'String()',
'ScalarValueString': 'String()',
'boolean': 'false',
'float': '0.0f',
'unrestricted float': '0.0f',
'double': '0.0',
'unrestricted double': '0.0',
'byte': '0',
'octet': '0',
'short': '0',
'unsigned short': '0',
'long': '0',
'unsigned long': '0',
'long long': '0',
'unsigned long long': '0',
}
def default_cpp_value_for_cpp_type(idl_type):
idl_type = idl_type.preprocessed_type
add_includes_for_type(idl_type)
base_idl_type = idl_type.base_type
if base_idl_type in CPP_DEFAULT_VALUE_FOR_CPP_TYPE:
return CPP_DEFAULT_VALUE_FOR_CPP_TYPE[base_idl_type]
return 'nullptr'
# Override idl_type.name to not suffix orNull to the name, in Dart we always
# test for null e.g.,
#
# bool isNull = false;
# TYPE* result = receiver->GETTER(isNull);
# if (isNull)
# return;
#
def dart_name(idl_type):
"""Return type name.
http://heycam.github.io/webidl/#dfn-type-name
"""
base_type = idl_type.base_type
base_type_name = TYPE_NAMES.get(base_type, base_type)
if idl_type.native_array_element_type:
return idl_type.inner_name()
return base_type_name
IdlType.name = property(dart_name)
IdlUnionType.name = property(dart_name)
# If True use the WithNullCheck version when converting.
def allow_null(idl_type, extended_attributes, has_type_checking_interface):
if idl_type.base_type in ('DOMString', 'ByteString', 'ScalarValueString'):
# This logic is in cpp_types in v8_types.py, since they handle
# this using the V8StringResource type. We handle it here
if (extended_attributes.get('TreatNullAs') == 'NullString' or
extended_attributes.get('TreatUndefinedAs') == 'NullString'):
return True
if extended_attributes.get('Default') == 'NullString':
return True
if extended_attributes.get('Default') == 'Undefined':
return True
if idl_type.is_nullable:
return True
return False
else:
# This logic is implemented in the methods.cpp template in V8
if (idl_type.is_nullable or not has_type_checking_interface):
return True
if extended_attributes.get('Default') == 'Undefined':
return True
return False
# If True use the WithEmptyCheck version when converting.
def allow_empty(idl_type, extended_attributes):
if idl_type.base_type in ('DOMString', 'ByteString', 'ScalarValueString'):
# This logic is in cpp_types in v8_types.py, since they handle
# this using the V8StringResource type. We handle it here
if (extended_attributes.get('TreatNullAs') == 'EmptyString' or
extended_attributes.get('TreatUndefinedAs') == 'EmptyString'):
return True
if extended_attributes.get('Default') == 'EmptyString':
return True
return False
|
|
"""Python API for interacting with installer API
"""
import abc
import json
import os
from subprocess import CalledProcessError
import pkg_resources
import requests
import yaml
from retrying import retry
from ssh.ssh_tunnel import run_scp_cmd, run_ssh_cmd, SSHTunnel
MAX_STAGE_TIME = int(os.getenv('INSTALLER_API_MAX_STAGE_TIME', '900'))
class AbstractDcosInstaller(metaclass=abc.ABCMeta):
def __init__(self):
self.offline_mode = False
def setup_remote(
self, tunnel, installer_path, download_url,
host=None, ssh_user=None, ssh_key_path=None):
"""Creates a light, system-based ssh handler
Args:
tunnel: SSHTunnel instance to avoid recreating SSH connections.
If set to None, ssh_user, host, and ssh_key_path must be
set and one-off connections will be made
installer_path: (str) path on host to download installer to
download_url: (str) URL that installer can be pulled from
host: (str) where the installer will be downloaded to
ssh_user: (str) user with access to host
ssh_key_path: (str) path to valid ssh key for ssh_user@host
"""
self.installer_path = installer_path
if tunnel:
assert isinstance(tunnel, SSHTunnel)
self.tunnel = tunnel
self.url = "http://{}:9000".format(tunnel.host)
def ssh(cmd):
return tunnel.remote_cmd(cmd, timeout=MAX_STAGE_TIME)
def scp(src, dst):
return tunnel.write_to_remote(src, dst)
else:
assert ssh_user, 'ssh_user must be set if tunnel not set'
assert ssh_key_path, 'ssh_key_path must be set if tunnel not set'
assert host, 'host must be set if tunnel not set'
self.url = "http://{}:9000".format(host)
def ssh(cmd):
return run_ssh_cmd(ssh_user, ssh_key_path, host, cmd, timeout=MAX_STAGE_TIME)
def scp(src, dst):
return run_scp_cmd(ssh_user, ssh_key_path, host, src, dst)
self.ssh = ssh
self.scp = scp
@retry(wait_fixed=3000, stop_max_delay=300 * 1000)
def download_dcos():
"""Response status 403 is fatal for curl's retry. Additionally, S3 buckets
have been returning 403 for valid uploads for 10-15 minutes after CI finished build
Therefore, give a five minute buffer to help stabilize CI
"""
self.ssh(['curl', '-fLsSv', '--retry', '20', '-Y', '100000', '-y', '60',
'--create-dirs', '-o', self.installer_path, download_url])
if download_url:
download_dcos()
def get_hashed_password(self, password):
p = self.ssh(["bash", self.installer_path, "--hash-password", password])
# password hash is last line output but output ends with newline
passwd_hash = p.decode('utf-8').split('\n')[-2]
return passwd_hash
@staticmethod
def ip_detect_script(preset_name):
try:
return pkg_resources.resource_string('gen', 'ip-detect/{}.sh'.format(preset_name)).decode('utf-8')
except FileNotFoundError as exc:
raise Exception('IP-detect preset not found: {}'.format(preset_name)) from exc
@abc.abstractmethod
def genconf(self, expect_errors=False):
pass
@abc.abstractmethod
def preflight(self, expect_errors=False):
pass
@abc.abstractmethod
def install_prereqs(self, expect_errors=False):
pass
@abc.abstractmethod
def deploy(self, expect_errors=False):
pass
@abc.abstractmethod
def postflight(self, expect_errors=False):
pass
class DcosApiInstaller(AbstractDcosInstaller):
def start_web_server(self):
cmd = ['DCOS_INSTALLER_DAEMONIZE=true', 'bash', self.installer_path, '--web']
if self.offline_mode:
cmd.append('--offline')
self.ssh(cmd)
@retry(wait_fixed=1000, stop_max_delay=10000)
def wait_for_up():
response = requests.get(self.url)
assert response.status_code == 200, "{} {}".format(response.status_code, response.content)
print("Webserver started")
wait_for_up()
def genconf(
self, master_list, agent_list, public_agent_list, ssh_user, ssh_key,
ip_detect, rexray_config=None, rexray_config_preset=None,
zk_host=None, expect_errors=False, add_config_path=None):
"""Runs configuration generation.
Args:
master_list: list of IPv4 addresses to be used as masters
agent_list: list of IPv4 addresses to be used as agents
public_agent_list: list of IPv4 addresses to be used as public agents
ip_detect (str): name of preset IP-detect script
ssh_user (str): name of SSH user that has access to targets
ssh_key (str): complete public SSH key for ssh_user. Must already
be installed on tagets as authorized_key
rexray_config: complete contents of REX-Ray config file. Must be a
JSON-serializable object.
rexray_config_preset (str): name of preset REX-Ray config
zk_host (optional): if provided, zk is used for exhibitor backend
expect_errors (optional): raises error if result is unexpected
add_config_path (optional): string pointing to a file with additional
config parameters to be merged or used as overide
Raises:
AssertionError: "error" present in returned json keys when error
was not expected or vice versa
"""
headers = {'content-type': 'application/json'}
payload = {
'master_list': master_list,
'agent_list': agent_list,
'public_agent_list': public_agent_list,
'ssh_user': ssh_user,
'ssh_key': ssh_key,
'ip_detect_script': self.ip_detect_script(ip_detect)}
if rexray_config:
payload['rexray_config'] = rexray_config
if rexray_config_preset:
payload['rexray_config_preset'] = rexray_config_preset
if zk_host:
payload['exhibitor_zk_hosts'] = zk_host
if add_config_path:
with open(add_config_path, 'r') as fh:
add_config = yaml.load(fh)
payload.update(add_config)
response = requests.post(self.url + '/api/v1/configure', headers=headers, data=json.dumps(payload))
assert response.status_code == 200, "{} {}".format(response.status_code, response.content)
response_json_keys = list(response.json().keys())
if expect_errors:
assert "error" in response_json_keys
else:
assert "error" not in response_json_keys
def install_prereqs(self, expect_errors=False):
assert not self.offline_mode, "Install prereqs can only be run without --offline mode"
self.preflight(expect_errors=expect_errors)
def preflight(self, expect_errors=False):
self.do_and_check('preflight', expect_errors)
def deploy(self, expect_errors=False):
self.do_and_check('deploy', expect_errors)
def postflight(self, expect_errors=False):
self.do_and_check('postflight', expect_errors)
def do_and_check(self, action, expect_errors):
"""Args:
action (str): one of 'preflight', 'deploy', 'postflight'
"""
self.start_action(action)
self.wait_for_check_action(
action=action, expect_errors=expect_errors,
wait=30000, stop_max_delay=MAX_STAGE_TIME * 1000)
def wait_for_check_action(self, action, wait, stop_max_delay, expect_errors):
"""Retries method against API until returned data shows that all hosts
have finished.
Args:
action (str): choies are 'preflight', 'deploy', 'postflight'
wait (int): how many milliseconds to wait between tries
stop_max_delay (int): total duration (in milliseconds) to retry for
expect_errors (boolean): raises error if result is not as expected
Raises:
AssertionError: checks 'host_status' and raises error...
-if expect_errors is False and not all status=='success'
-if expect_errors is True and all status=='success'
"""
@retry(wait_fixed=wait, stop_max_delay=stop_max_delay)
def wait_for_finish():
# Only return if output is not empty and all hosts are not running
output = self.check_action(action)
assert output != {}
host_data = output['hosts']
finished_run = all(map(lambda host: host['host_status'] not in ['running', 'unstarted'],
host_data.values()))
assert finished_run, 'Action timed out! Last output: {}'.format(output)
return host_data
host_data = wait_for_finish()
success = True
for host in host_data.keys():
if host_data[host]['host_status'] != 'success':
success = False
print("Failures detected in {}: {}".format(action, host_data[host]))
if expect_errors:
assert not success, "Results were successful, but errors were expected in {}".format(action)
else:
assert success, "Results for {} included failures, when all should have succeeded".format(action)
def start_action(self, action):
"""Args:
action (str): one of 'preflight', 'deploy', 'postflight'
"""
return requests.post(self.url + '/api/v1/action/{}'.format(action))
def check_action(self, action):
"""Args:
action (str): one of 'preflight', 'deploy', 'postflight', 'success'
"""
return requests.get(self.url + '/api/v1/action/{}'.format(action)).json()
class DcosCliInstaller(AbstractDcosInstaller):
def run_cli_cmd(self, mode, expect_errors=False):
"""Runs commands through the CLI
NOTE: We use `bash` as a wrapper here to make it so dcos_generate_config.sh
doesn't have to be executable
Args:
mode (str): single flag to be handed to CLI
expect_errors: raise error if result is unexpected
Raises:
AssertionError: if return_code is...
-zero and expect_errors is True
-nonzero and expect_errors is False
"""
cmd = ['bash', self.installer_path, mode]
if expect_errors:
try:
output = self.ssh(cmd)
err_msg = "{} succeeded when it should have failed".format(cmd)
print(output)
raise AssertionError(err_msg)
except CalledProcessError:
# expected behavior
pass
else:
print(self.ssh(cmd))
def genconf(
self, master_list, agent_list, public_agent_list, ssh_user, ssh_key,
ip_detect, rexray_config=None, rexray_config_preset=None,
zk_host=None, expect_errors=False, add_config_path=None,
bootstrap_url='file:///opt/dcos_install_tmp'):
"""Runs configuration generation.
Args:
master_list: list of IPv4 addresses to be used as masters
agent_list: list of IPv4 addresses to be used as agents
public_agent_list: list of IPv$ addresses to be used as public agents
ip_detect (str): name of preset IP-detect script
ssh_user (str): name of SSH user that has access to targets
ssh_key (str): complete public SSH key for ssh_user. Must already
be installed on tagets as authorized_key
rexray_config: complete contents of REX-Ray config file. Must be a
JSON-serializable object.
rexray_config_preset (str): name of preset REX-Ray config
zk_host (optional): if provided, zk is used for exhibitor backend
expect_errors (optional): raises error if result is unexpected
add_config_path (optional): string pointing to a file with additional
config parameters to be merged or used as overide
Raises:
AssertionError: "error" present in returned json keys when error
was not expected or vice versa
"""
test_config = {
'cluster_name': 'SSH Installed DC/OS',
'bootstrap_url': bootstrap_url,
'dns_search': 'mesos',
'master_discovery': 'static',
'master_list': master_list,
'ssh_user': ssh_user,
'agent_list': agent_list,
'public_agent_list': public_agent_list,
'process_timeout': MAX_STAGE_TIME}
if rexray_config:
test_config['rexray_config'] = rexray_config
if rexray_config_preset:
test_config['rexray_config_preset'] = rexray_config_preset
if zk_host:
test_config['exhibitor_storage_backend'] = 'zookeeper'
test_config['exhibitor_zk_hosts'] = zk_host
test_config['exhibitor_zk_path'] = '/exhibitor'
else:
test_config['exhibitor_storage_backend'] = 'static'
if add_config_path:
with open(add_config_path, 'r') as fh:
add_config = yaml.load(fh)
test_config.update(add_config)
with open('config.yaml', 'w') as config_fh:
config_fh.write(yaml.dump(test_config))
with open('ip-detect', 'w') as ip_detect_fh:
ip_detect_fh.write(self.ip_detect_script(ip_detect))
with open('ssh_key', 'w') as key_fh:
key_fh.write(ssh_key)
remote_dir = os.path.dirname(self.installer_path)
self.ssh(['mkdir', '-p', os.path.join(remote_dir, 'genconf')])
self.scp('config.yaml', os.path.join(remote_dir, 'genconf/config.yaml'))
self.scp('ip-detect', os.path.join(remote_dir, 'genconf/ip-detect'))
self.scp('ssh_key', os.path.join(remote_dir, 'genconf/ssh_key'))
self.ssh(['chmod', '600', os.path.join(remote_dir, 'genconf/ssh_key')])
self.run_cli_cmd('--genconf', expect_errors=expect_errors)
def preflight(self, expect_errors=False):
self.run_cli_cmd('--preflight', expect_errors=expect_errors)
def install_prereqs(self, expect_errors=False):
self.run_cli_cmd('--install-prereqs', expect_errors=expect_errors)
self.preflight()
def deploy(self, expect_errors=False):
self.run_cli_cmd('--deploy', expect_errors=expect_errors)
def postflight(self, expect_errors=False):
self.run_cli_cmd('--postflight', expect_errors=expect_errors)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from keystone.common import config
from keystone.common import logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Tests use this to make exception message format errors fatal
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(StandardError):
"""Base error class.
Child classes should define an HTTP status code, title, and a doc string.
"""
code = None
title = None
def __init__(self, message=None, **kwargs):
"""Use the doc string as the error message by default."""
try:
message = self._build_message(message, **kwargs)
except KeyError as e:
# if you see this warning in your logs, please raise a bug report
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
LOG.warning('missing exception kwargs (programmer error)')
message = self.__doc__
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
:raises: KeyError given insufficient kwargs
"""
if not message:
message = re.sub('[ \n]+', ' ', self.__doc__ % kwargs)
message = message.strip()
return message
class ValidationError(Error):
"""Expecting to find %(attribute)s in %(target)s.
The server could not comply with the request since it is either malformed
or otherwise incorrect.
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class StringLengthExceeded(ValidationError):
"""String length exceeded.
The length of string "%(string)s" exceeded the limit of column
%(type)s(CHAR(%(length)d)).
"""
class ValidationSizeError(Error):
"""Request attribute %(attribute)s must be less than or equal to %(size)i.
The server could not comply with the request because the attribute
size is invalid (too large).
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class SecurityError(Error):
"""Avoids exposing details of security failures, unless in debug mode."""
def _build_message(self, message, **kwargs):
"""Only returns detailed messages in debug mode."""
if CONF.debug:
return message or self.__doc__ % kwargs
else:
return self.__doc__ % kwargs
class Unauthorized(SecurityError):
"""The request you have made requires authentication."""
code = 401
title = 'Unauthorized'
class AuthPluginException(Unauthorized):
"""Authentication plugin error."""
def __init__(self, *args, **kwargs):
super(AuthPluginException, self).__init__(*args, **kwargs)
self.authentication = {}
class AuthMethodNotSupported(AuthPluginException):
"""Attempted to authenticate with an unsupported method."""
def __init__(self, *args, **kwargs):
super(AuthMethodNotSupported, self).__init__(*args, **kwargs)
self.authentication = {'methods': CONF.auth.methods}
class AdditionalAuthRequired(AuthPluginException):
"""Additional authentications steps required."""
def __init__(self, auth_response=None, **kwargs):
super(AdditionalAuthRequired, self).__init__(message=None, **kwargs)
self.authentication = auth_response
class Forbidden(SecurityError):
"""You are not authorized to perform the requested action."""
code = 403
title = 'Forbidden'
class ForbiddenAction(Forbidden):
"""You are not authorized to perform the requested action, %(action)s."""
class NotFound(Error):
"""Could not find, %(target)s."""
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
"""Could not find endpoint, %(endpoint_id)s."""
class MetadataNotFound(NotFound):
"""An unhandled exception has occurred: Could not find metadata."""
# (dolph): metadata is not a user-facing concept,
# so this exception should not be exposed
class PolicyNotFound(NotFound):
"""Could not find policy, %(policy_id)s."""
class RoleNotFound(NotFound):
"""Could not find role, %(role_id)s."""
class ServiceNotFound(NotFound):
"""Could not find service, %(service_id)s."""
class DomainNotFound(NotFound):
"""Could not find domain, %(domain_id)s."""
class ProjectNotFound(NotFound):
"""Could not find project, %(project_id)s."""
class TokenNotFound(NotFound):
"""Could not find token, %(token_id)s."""
class UserNotFound(NotFound):
"""Could not find user, %(user_id)s."""
class GroupNotFound(NotFound):
"""Could not find group, %(group_id)s."""
class TrustNotFound(NotFound):
"""Could not find trust, %(trust_id)s."""
class CredentialNotFound(NotFound):
"""Could not find credential, %(credential_id)s."""
class VersionNotFound(NotFound):
"""Could not find version, %(version)s."""
class Conflict(Error):
"""Conflict occurred attempting to store %(type)s.
%(details)s
"""
code = 409
title = 'Conflict'
class RequestTooLarge(Error):
"""Request is too large."""
code = 413
title = 'Request is too large.'
class UnexpectedError(Error):
"""An unexpected error prevented the server from fulfilling your request.
%(exception)s
"""
code = 500
title = 'Internal Server Error'
class MalformedEndpoint(UnexpectedError):
"""Malformed endpoint URL (%(endpoint)s), see ERROR log for details."""
class NotImplemented(Error):
"""The action you have requested has not been implemented."""
code = 501
title = 'Not Implemented'
class PasteConfigNotFound(UnexpectedError):
"""The Keystone paste configuration file %(config_file)s could not be
found.
"""
|
|
"""--------------------------------------------------------------------
COPYRIGHT 2016 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed SI Vector Platform is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file vector_teleop.py
\brief This module contains a class for teleoperating the vector
platform with a joystick controller
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
from utils import *
from system_defines import *
from vector_msgs.msg import *
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool,Float64
import rospy
import sys
import math
"""
mapping for controller order is dtz_request, powerdown_request, standby_request, tractor_request, balance_request, audio_request,
deadman_input, manual_ovvrd_input, twist_linear_x_input, twist_linear_y_input, twist_angular_z_input
"""
MAP_DTZ_IDX = 0
MAP_PWRDWN_IDX = 1
MAP_STANDBY_IDX = 2
MAP_TRACTOR_IDX = 3
MAP_BALANCE_IDX = 4
MAP_AUDIO_IDX = 5
MAP_REC_GOAL_IDX = 6
MAP_DEADMAN_IDX = 7
MAP_MAN_OVVRD_IDX = 8
NUMBER_OF_MOMENTARY_INPUTS = 9
MAP_TWIST_LIN_X_IDX = 0
MAP_TWIST_LIN_Y_IDX = 1
MAP_TWIST_ANG_Z_IDX = 2
NUMBER_OF_AXIS_INPUTS = 3
class VectorTeleop:
def __init__(self):
self.is_sim = rospy.get_param('~sim',False)
if (False == self.is_sim):
"""
Subscribe to the configuration message
"""
self.config_updated = False
rospy.Subscriber("/vector/feedback/active_configuration", Configuration, self._update_configuration_limits)
start_time = rospy.get_time()
while ((rospy.get_time() - start_time) < 10.0) and (False == self.config_updated):
rospy.sleep(0.05)
if (False == self.config_updated):
rospy.logerr("Timed out waiting for Vector feedback topics make sure the driver is running")
sys.exit(0)
return
else:
self.vel_limit_mps = rospy.get_param('~sim_teleop_x_vel_limit_mps',0.5)
self.vel_limit_mps = rospy.get_param('~sim_teleop_y_vel_limit_mps',0.5)
self.yaw_rate_limit_rps = rospy.get_param('~sim_teleop_yaw_rate_limit_rps',0.5)
self.accel_lim = rospy.get_param('~sim_teleop_accel_lim',0.5)
self.yaw_accel_lim = rospy.get_param('~sim_teleop_yaw_accel_lim',1.0)
default_ctrl_map = dict({'momentary':[[{'is_button':True,'index':4,'set_val':1}],
[{'is_button':True,'index':8,'set_val':1}],
[{'is_button':True,'index':1,'set_val':1}],
[{'is_button':True,'index':2,'set_val':1}],
[{'is_button':True,'index':0,'set_val':1}],
[{'is_button':False,'index':6,'invert_axis':False,'set_thresh':0.9}],
[{'is_button':False,'index':7,'invert_axis':True,'set_thresh':0.9}],
[{'is_button':False,'index':2,'invert_axis':True,'set_thresh':0.9}],
[{'is_button':False,'index':5,'invert_axis':True,'set_thresh':0.9}]],
'axis_range':[{'index':1,'invert_axis':False},
{'index':0,'invert_axis':False},
{'index':3,'invert_axis':False}]})
"""
Get the mapping for the various commands, defaults are xbox360 wireless
"""
self.ctrl_map = rospy.get_param('~controller_mapping',default_ctrl_map)
"""
Initialize the debounce logic states
"""
self.db_cnt = [0] * NUMBER_OF_MOMENTARY_INPUTS
self.button_state = [False] * NUMBER_OF_MOMENTARY_INPUTS
self.axis_value = [0.0] * NUMBER_OF_AXIS_INPUTS
self.send_cmd_none = False
self.no_motion_commands = True
self.last_motion_command_time = 0.0
self.last_joy = rospy.get_time()
self.cfg_cmd = ConfigCmd()
self.cfg_pub = rospy.Publisher('/vector/gp_command', ConfigCmd, queue_size=10)
self.goalrecorder_pub = rospy.Publisher('/vector/record_pose',Bool, queue_size=10)
self.motion_cmd = Twist()
self.limited_cmd = Twist()
self.motion_pub = rospy.Publisher('/vector/teleop/cmd_vel', Twist, queue_size=10)
self.override_pub = rospy.Publisher("/vector/manual_override/cmd_vel",Twist, queue_size=10)
rospy.Subscriber('/joy', Joy, self._vector_teleop)
def _update_configuration_limits(self,config):
self.x_vel_limit_mps = config.teleop_x_vel_limit_mps
self.y_vel_limit_mps = config.teleop_y_vel_limit_mps
self.yaw_rate_limit_rps = config.teleop_yaw_rate_limit_rps
self.accel_lim = config.teleop_accel_limit_mps2
self.yaw_accel_lim = config.teleop_yaw_accel_limit_rps2
self.config_updated = True
def _parse_joy_input(self,joyMessage):
raw_button_states = [True] * NUMBER_OF_MOMENTARY_INPUTS
self.button_state = [False] * NUMBER_OF_MOMENTARY_INPUTS
for i in range(NUMBER_OF_MOMENTARY_INPUTS):
inputs_for_req = self.ctrl_map['momentary'][i]
for item in inputs_for_req:
if item['is_button']:
if item['set_val'] == joyMessage.buttons[item['index']]:
raw_button_states[i] &= True
else:
raw_button_states[i] = False
else:
temp = joyMessage.axes[item['index']]
if (item['invert_axis']):
temp *= -1.0
if (temp >= item['set_thresh']):
raw_button_states[i] &= True
else:
raw_button_states[i] = False
if (True == raw_button_states[i]):
self.db_cnt[i]+=1
if (self.db_cnt[i] > 10):
self.db_cnt[i] = 10
self.button_state[i] = True
else:
self.button_state[i] = False
self.db_cnt[i] = 0
self.axis_value = [0.0] * NUMBER_OF_AXIS_INPUTS
for i in range(NUMBER_OF_AXIS_INPUTS):
axis_input_map = self.ctrl_map['axis_range'][i]
temp = joyMessage.axes[axis_input_map['index']]
if (axis_input_map['invert_axis']):
temp *= -1.0
self.axis_value[i] = temp
def _vector_teleop(self, joyMessage):
self._parse_joy_input(joyMessage)
if self.button_state[MAP_REC_GOAL_IDX] == 1:
if (False == self.goalrecorded):
temp = Bool()
temp.data = True
self.goalrecorder_pub.publish(temp)
self.goalrecorded= True
else:
self.goalrecorded= False
if self.button_state[MAP_DTZ_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = DTZ_REQUEST
elif self.button_state[MAP_PWRDWN_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state[MAP_STANDBY_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state[MAP_TRACTOR_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = TRACTOR_REQUEST
else:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_NONE'
self.cfg_cmd.gp_param = 0
if ('GENERAL_PURPOSE_CMD_NONE' != self.cfg_cmd.gp_cmd):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = True
elif (True == self.send_cmd_none):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = False
elif (False == self.send_cmd_none):
if self.button_state[MAP_DEADMAN_IDX]:
self.motion_cmd.linear.x = (self.axis_value[MAP_TWIST_LIN_X_IDX] * self.x_vel_limit_mps)
self.motion_cmd.linear.y = (self.axis_value[MAP_TWIST_LIN_Y_IDX] * self.y_vel_limit_mps)
self.motion_cmd.angular.z = (self.axis_value[MAP_TWIST_ANG_Z_IDX] * self.yaw_rate_limit_rps)
self.last_motion_command_time = rospy.get_time()
else:
self.motion_cmd.linear.x = 0.0
self.motion_cmd.linear.y = 0.0
self.motion_cmd.angular.z = 0.0
dt = rospy.get_time() - self.last_joy
self.last_joy = rospy.get_time()
if (dt >= 0.01):
self.limited_cmd.linear.x = slew_limit(self.motion_cmd.linear.x,
self.limited_cmd.linear.x,
self.accel_lim, dt)
self.limited_cmd.linear.y = slew_limit(self.motion_cmd.linear.y,
self.limited_cmd.linear.y,
self.accel_lim, dt)
self.limited_cmd.angular.z = slew_limit(self.motion_cmd.angular.z,
self.limited_cmd.angular.z,
self.yaw_accel_lim, dt)
if ((rospy.get_time() - self.last_motion_command_time) < 2.0):
self.motion_pub.publish(self.limited_cmd)
if self.button_state[MAP_DEADMAN_IDX] and self.button_state[MAP_MAN_OVVRD_IDX]:
self.override_pub.publish(self.motion_cmd)
|
|
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABAC8vVnP9cZ17/lVqh1fRXCf1xmcRAh0ziK5NllkDfutKnK/fBAk6YsD9UU3IKD75CofvmvVfuRMNuAO4t9GYsuSrGWSNazhP/zg/u3vn9wf8vvhD/47"
+ "/+0vSrn/9T/mF//9v3v9xZ9+9933f/blpy9ffvrhy6/7v7sfvv/y5ccff/E/fvGr77/845/8/T/88ssPv/7y41/9xZ/+6Xf9r/+hgZ/Sip+9/L5H+Pv3H/z9v/y5f/in"
+ "P/ntH//qu9/85vvvfv23f2Cw4NPyOyPZ/9p/+Ps/+d/6433/q1//2B+y//+XH7/8+pf/6h/9w/+/lzlJ6f/3O8L98Gdffvzz/ho//+OXv7A/7v/646+/+3H8+5999+M/"
+ "/vMP/Q++//V3P/zmy09/9eWn3/zmu+//uv993/9m/G1/91fj3/76y4/jP/+T0+A/ZNK2/QH/w/75H//OnvHP3U9f/qK/vB+//MXven+//Nt//rufvvx1fx3f//jlr3+y"
+ "v++v7A/c37pdU/IPLb83wK/+7Kcvfz7+i3/+8z/6v/03V//X/+3+3//zf7p/+j/+r//1P/+ff/tf+fvf+wn7avwPq+Nff4i//afvfv3Tl7/57scvf/OnP375y1/1BfmH"
+ "Lof/+KtnlfTv/twPX/7my0/9k/1lf/2//uH7v+yL/vtfuF98/tVf/fN/Olh/slWKtt/zl//5py9/ORbjX37+iV/+8pf/+Sdbcsltu3/HX/kf/3t/df/5f/Dv+gVff8+G"
+ "/iP8kqT5dz3XH+V39A/27xfIH+3X93PDnmzTwD1Z32XL7zoa/yi/JaeELcdZSg5UsHL+QQf+f82vbv3RqBUS/dwXZFAkWJATW4x96RfBPtrh46QlMIfIolPGnm3XWv0M"
+ "fbW6+RIUOornO7Xt9yXE/9U/SasGau3HnqVWKjOY+kX9u5P9P8JPInZP2zfjrjNXdNKZ2meXL6tP0No/pG1BoZznKRHcZ1LahmVXOnGXp0ThNnVt+twkeSQJGXnjDaVX"
+ "81Z8bT4hT2YFPJPH9d8u6fcW1P/lv7mfIFqg1/govc7VSoQaVwwTyH57vmdsgSxemO9lPwmee7L9DNw+23SsfSZY06JYQ2nJ/WyEYrk15P/Y3fxj/fRS7Ibp9dl/aKT+"
+ "8X5zX4o31XWJPixFFybYQV4xewa/mdQNK6n72bh4rFVWcsXOYUt4uDvm0AD1kvqvP1eGGoBjgXAV/M/pN3Lw2wmCPZodVthVPfULjept9jI3eWxbN4lcQ8kX5TLiHcy+"
+ "nSVXWLAiNrGAgllNjeVyNqtTKkm146pQTxbyyW3q+jvBIH+kXy/gyRxk/npqYLLUF9gBdFNOUP/PvQt47MnKOXEJT8hcSe2oxGr8ntsdAvVwKZeIPdpTTmxgMbpy3BAB"
+ "3NMu+L4+qLJao+fqs5Inj7ULnrqu2JPN5a5NoFxuDbLc1OlYfOPOxkVeYAuQvKkfvbLQBLVdWq8ssKWvC3h5Nv94SKJgNVIiBnAsGQTAzj5xzau1r0bs8nwp+GSXnxs2"
+ "jVGwH20oQCqUYeW0FOqj1TtcWM2056rPDZrFtE0DluzXrefEFLa3LxCPVRbauKacM8AylvFoEGgputFv3DMF4+k3NYcYmjcpIbfGQKFywjDLTiuH0BjdAmw5auTaIG4/"
+ "JWFVExiqJ6lb5lqpm3xwB7EY64EKphe3GA1tO2FnozyLx75Z0Mrh1+YADmPCOXuKyuR2iSDaNoNzJg0qKUPb+pKgWBKyGMicWiFRVw7lZQBYrD6r5we3HN8ERSjYKtw5"
+ "7PrVCbJHevaNwU8uLVwCvvczHwlkP4OIYsFWLRIWKNs3bi5WVM8kVm5cnlSwkEvB+gXhBukjVUBMatGVQxf0TY01C/qpP4R4mAWyZBugQUPPfp9xEGk5l4Kh2u8rc+m3"
+ "pVZQm8yaLiB8rRqrlEp5DLmJqWgsIKrG1QbOqckHcwaQxpaj59CNNvO8MHCSAYZ6aQE93ZbBtGAqvjUKyWBCAhjUxe7pRn2zOwcO3GsERexCsyoGa8sd0rgW4GFUJqpA"
+ "u/onwwRkhJu/W++Ky3echsotkD9QlPK/5mebGsscLz8fGL9olsLBsRtJVTG67L+TnP3j/dRjkj9Dt4AjoK09tyrnAt1nJ5fqu5bTjS1HDdw2GyUT1i14efBCA9tknzkI"
+ "1QAfwoPUFZOkCTYVlzVRPbkxi+EWyMtIOCiijJppTeRqXBRs25pYJFYMPrV/MyZU32fgyNNFm59RGc+cA5deiREvqVZqk8jlxHsG1URXzWWlMBpzTj3ngZhTAT31g5wL"
+ "9Rp36ZUniP3mMDzWRcXygqTgAokeJChG8o7Z1C9cmRtAWedwY1va2kmZKz3nfJaWoNpzWLhQTZfawBJ+qPZSsJolgyXTnrFQn51U7mxs3HykgWTI/mTfOEqYB7VI+pNN"
+ "HHukH1cVA8DGuzRM5bDfnSAtfagcYvJ8oCvCU1rlfJl84BAhqzHuKR36YSOHdVLJy9Oc3bgnI1vts1TOuyso55Tknn01gjZyYIfn2c9hY6Ah3yxJjuRQnJMInopwaiQ5"
+ "rNymnshGmUm9YUmqaSdhA9agmTuJd/K4eijI46uZcoBy1pUrXih1nAuUdHGTzgcHgM0gLd0aZZjJzwFKtRvShROsjlx1ZhkxiG+sG+hTsIjnisEikbtjhpIXtR61+AM7"
+ "Gw/julHvcQ7iC9bh8WmpVMYzglGVzKqJU1pZT1kknE/ks00k0uVtOg+pDFXSl2kmOzwPubkLTUhKmEEOsfZ3lYk7iH0Fpwi2z85+qSEEo40caW1Dng/KvyNJ45t19pgF"
+ "w7uwYGK9e1dYsCBp3qDlWMGmnFFVYHEt6uD3oOml0xP0rJPEjWKcT/pBxTKZFS4xiLJQDj+m36icFY4EkJY+qhivFyY3+5Uilu6ZAwEO10tsU0dZuSPkkJtDAVaSWbeD"
+ "5rIucIIdv+0RU+rY/dSnbs8H6ZVkJGBMEi0/KYMfN2QwMaaKuy9Op8lYWhnLv8PJJVeunmCun4PnfCzMvYi7YmwWQzWkDxBx2I8rUGk5gGZ8LoBEBCMCc6iJSft7pDBl"
+ "klr2lMaQpQVYUW2eCCDSBZQefGJgkP47jH9GBTOLH2zpvzaPsYtMfW1ZqYb0QerwzGAbxAVpGasGn9I48InlINh7XHsBj6UFCylXYywt7JuZhjT2Gm1OjR3EbzYk9HAH"
+ "qeT16QPPsEeU5J+NkRZICQNzfW0ctLef+kwg+41ikHqRJX/jYO2SVgNkQz4WeeN6gCVPzOlhv0fpm5oae8pSvFIrxOyHOdmwoN8wSthUcj4wxo+2DfOnrrKQoqwlyQIV"
+ "1UbCwbKrnWtHWwFPrXtn2VWNGL0/+XnDqHV9fWxCIfPMtqsqhraduGKw72nF5pDRBM1BMOWMVbpKqiQc5e5JKvQe33kj9HD9fuHESHqZy4FPdEgMUd6QHhW2OAPniYAq"
+ "MoDUXBNq4kTGP+XTmdxxnCBEIPstWn2hcDw2rMN4x1tmmEXjt/r+0Sg3bAPmYUDiXp9x1Mu3nBFUoj2KgtBe7dnVAuk692C6eMgSJNygZ10DZwhuJmnpioJP1K+Y6k/U"
+ "deOUKfNzw8aQGrKv1CVT8rxhKfFatPVwzFebFZzBF7HxGRPL9TdoND5mQiLhxny7LAcBvUdAeOOQ54NC2ZOB0qUmZ8TZWxVu5OmCGMIROokNR4xVFjb4wZJ9kJTeb2of"
+ "ODi29lhYJ2QWTB1koJM4+NrIrkCLTa6AN6wcVlmYww92Eh+kAUPNaefAlJ4smciEZyEBsJHUSY0elBIgfbv6awSfbJwg1KNpwMoKwxZUfVBNl0F7oJZI9IVz3l78N5I4"
+ "xaX6NqfGEIfuIVQb1VlXDrzP9rxxQ89eVnB921mix0bHTTglbnuNLywL6XUFd+zrlDml5ahhwp6MHMA7m8Rwtjsy9cMReroMFjFuFx+xYC+fvI0ikWAHOF8dJBzsuDL5"
+ "B6x5ZesDWyBgr2DwYrhokSxz+03NaQkEUuCz352Q1qwbjbKCceBLBtXX3lLcFBRqAZkIbxIO5aBoBubUyKI2mbH3aPQirAs4k3RZI29PVP7d08ZVPMVE0GBQYibYri8O"
+ "uTmTrGMTpgQTnkkPSkbDeldcVy6XtlH7bIApoVhOHw/uPgONJcZq5PbZ6PBgwQQsquUsXCskKGhFSeG7xi9ksMNzgN40tqm5Aj7548CeTEm79E/CPSNcsJNa7YPiDOqk"
+ "egxNacw67NSfim+YbtguiaPxhXwWimLhQr/QQBxx4Mb9Dy0cAFYD5jc13Is8JiBzGAWeyvVJe2qjhHE5yBhDmkgIEUw86AI1i2+BOvd9WotnZkw9GLinnZ6rYqTSN0GR"
+ "8rHIgWsSm+k8mF2dCzfN7cvjQZFz26ZFKU6Y5foH9dEWcgwZMjiGtCQVY7CaAixmCDKTmNSYy4rxPOec6qkhI8EM5IUZ55rWBNZRWnTiGmVjpEUd+0/S1HAh8+8xZaIq"
+ "zwdJDth9CJ46QpZccr+tIaEm7otZo+wkORZP7WnIB/Ie3+ATqivn46QFIh5XEI39bidxbMiSMUG0OSiTEtivNn1gHnkmN1s89HCkN82QM8Kam5EEElPm9uMnJF4/5JMK"
+ "9Z7FYKbi9aycyPgEusi9n4xkj2BPNjzFqWhvh3soXNDKKZ/seaaMJSyV8yto8504lpYpLYMLBLyrF3lu2E0tAYTVGESDkm90T20cdHMdqAlojfiYV2xQvWdwKH4oiE4K"
+ "MnMCMmsAZaFWmQq2Gg9SLPIslVNwDP7jg0MB+tJPRygtsD4IdjYaqh0rPaOAllO73Jyz2y4zljZ+3mdQsJpB75FZSsQUIAa+ERxpSd9oDPxkNq0JyvcS1DO3boG1vyF7"
+ "9rFAMsXSCsKRSk30Bzv166Zcq/3Te4SST/ecfnrw3DDXDPIm7orZ5MQWiAnjcIiyYCgvqME5S9sCZQjy3mcUCvBdMjH9JDnbGalU7ujfzJgPyJPVM0bOoSOD7P7BgaeC"
+ "BcrVzX6fdiBMsN2D+iD7iZnI2SwGdHEOJ2iX3pNUcypggi05+uS/nsxxledcCjTuB7VE3/qNGFzOFggGCIlycYT7mSRvT5YRQ7FG5dkYPNlb/gEbHQfloAWf2raY6A/Z"
+ "tvVYpj+YCKCQriwZG8a8PHdRmzr2iTU3V+HEalwl06uFRLXXe6JkmkwVChOgsn0Gaict5Gr0F4jXXzgRzKGTikm1DzNsTsz/BJGbOzkUj7La6mdutPwArXCiCkd2k5Ob"
+ "QvZv1nygGvtBQQ+XXlEnbIlMXhLFVHHVx4wVFgZavjHXS0kLdl5dvqykRDAHKZukTNgRsmru75HyzQ2e6hV8FvBUsKn4ZaXmZ2XLnPTJJhyofYC8sCdLJHNq0pJkgVo8"
+ "j5A5gRDS2G14HXNqomfjHBR7tgM63PeFWCjhn+DBPkgDdeH7FYN5qvSDuG80zH94yGBSj3aJQcooGp/pwlNt2wGmpOC2KoVDSM/SBGOKC6lRFjmQ6CeYktIuDX7hns1G"
+ "4thqNOftXnwW5PHmnGZ9zj4T+7qRq9Gs/wryWM66BWBDesmg6I8WP2MSVDtpaqgB1GqfN+EYHUOFnsv1wQst9qwRk4CIpBNlk0mwBLwq6O1GgpMcCU5yNT+4G22wR6hH"
+ "y4tiIvQugV7YzrICr42BboZ+wWB541OThgwJ/8RewK+9IkSCaY+GbbRJQVPDflzdlUpSpRpThYnV7zOQoNgyCKHXGrUI1OA8SCtbvcAq5jlaPFDADxIrFwfnGApnmT6G"
+ "SW0SFSsGq5xcybQZJpXqSAt3WA05I0ps9l8I95AJVFkwKFQAU5D+zUBU+0oqLVuPGFv7EnTH2i5GgM8UzzP6eRNKmTJkcAy5k9Z/C4m27flOUop9+dZ0gaaeTe6ALf2U"
+ "lWsSGxybm8CDgDJnc2NMTjRkEGludQXGK+25fl/60Bkyk08GdkEGl4lrSFtrAgPAHkJJQjlj95uNBbRC9rxxdo1BDu6b6fLimi5xKFBBN7XR0pU6rg7F/KbeBEXspi75"
+ "G1fB95qaUzk0aRwM6pJI/zOT54vUNCbIlTHGvQfliN1RwGluk8jl+vX8AFdj5gBlfVNjtKl3Ac9Z/5lPEhVs3spdmzBdFwyPar+F1AcZnVTqPntnxNCbFA6ybORtkBJ2"
+ "CGcpPkBe2HrUCxQvnXPgrphIWtwHeQk287y1cs7bxnQDR8cLZ05juT6HmlhAZcr83DhJhkuCMmB9Z91vMC2Ivn2g6mtUrDdTHJMINj1iTh0by63e1n+kmD/Hgg8CGu+8"
+ "rf8oGh+J11/UL9i8f96kYgYMddOCgSlt8IOl37tExVo8PUclYTU35fUwln7B1v7KTc8+WccYQrrI3HM5JljPiDkjSsPKYV2Xl4JyRg0kj5gYCTeo7kkqd3v6CMpoWPrd"
+ "E2JsDEk6B2QOmEfKFrgqnDWktSa4s3Hn8uF3jxhryy2gLZOTs3B9kL3XZw8sLTjXs1Ln/ntiQTU385lWJtRvjU6YlkFQTsjLibFwOKrKZQfWhgTrK7Gv/Mo83OVB4lQa"
+ "OjycnBFnGNMvTg7qMrAFHIP1UTA7viGDCdqigmKR8SmU684YQ5KcQU5DWkCzdPOx4FQODykYCLCnBSCI5+vJLcZexZykojk30foXIAPxeO/eFcXNJUVdTNoWCtWDJc94"
+ "U9vPOjxYFROEI9Z9FvBUMJkPrA/ScysPtgAXDQq1ryLphLOZBCzVAjRHYOyuDqD1SH+yyqnY/axCjwR7X51QP2kn9a4WzxERjFQaMfXjQzg7T5sy3Zgl2XPLOmGOU2bN"
+ "PmtCNppBDpU6HBcPIqRnUPt4IKSNu820bf0HEeYzWK/PsKJp07ByMitkt0BMPppa+4XTFxpL/+tJ3dQbSSoFXZneSl5YAv7OriADcwPAzloZC7SepLZEdYkP8S14qrDI"
+ "WD48PH44OmTJL24EX/Tm+kntBfoXPXpyBWJSU893oGB9JXIU52HAQOGuDCvHTeDDhGJSV08NWAUU7XVXxnh1vfKUVDlBNHKBpEyJXbnhpeUxs5Ojr3vsmy2sEaWZsEIf"
+ "LeQTxIOAzn/uoUFnajWa7Q6mRxKUw5n3y/MytQlmq5VsdzUSqic8dUgBQjW1lugp9rY5KHLjEWr6br+WQbpsnptgPK1dFk6tZtHkMU7YqpwClaseXPumR4wVaHMuXNPF"
+ "6k4uu8qFc2Gt94K9xTcRGFuNOXnMpiBI3bASbT85Fp+hAAMm+yOp+UxhpE0GM1PZ1ZSniTWd99AFmkCpiZ7rg6WnOf9hw5iF5MUED67GoTBEBVvl5rrfQZpg6raR1AI0"
+ "2wBMrP3wcbKiGln9u9RIjc+cGWxiZcwHSBR3ZCzHkc+cZVeVg5StJ3jqDwdFTq3GMGWU5uY0kVLcs6dIWu9xP0a47zkI9hp1uduGjSHN9dKvDDe3ki1A0yjDstRVwX6j"
+ "BFDS/GlUcbJRJpTbwyIg+GQmCYpvn0GqUXaCZe4u89eT0pApOikDlHNv0PKkjTmID5Nqp+b9PnOFp1ty4zA8k4JUFcyOzH5yThy9fy2+YmWTftoMUq6XNKodSgwWjdoo"
+ "XeeF9Keet8wpDO3y9aTk9a2T2hc+NbEooIrG5cuKVRbgENJwxM1ja795UkrA5GoyJAE7k/LpjRSRnnzhrANePmz9LOZUNKDe91v5BONYBGkFw9Xc4HXmdlJpuZE1Nakk"
+ "0G9qcAwZzX+BMzAHNcp63ugDpDtowhacsBwIqjGeZ2maIIx02zQvGQJDNS1aKSJfydOEDVgX0jzUelfGQUOCmaAidl71E+S5UXnBBNbvLpGE+4UUrJ49KOY/aeXYkJ60"
+ "t9rIb2ayOJiAzMuDPmEX2AG0komjpb8V86AX+bLWhEQklisybwo52br17I82WD/Am9SQMfSaMX5Gx5GxkZNC6qSC1m6BPPXnfuw36iiuxoqhUjk7riqFLQhKlTD9J6tw"
+ "Hemagy4C6bJGvy4YksFfXMX0aS0BnVgmK4edIUOMBIrVX2PI2FD8wpT8neGuQCBDIzkWjZPWep/6Zi+BBAvnXJRStjikUFqiJhZJ+be40S3w1EXtBFR/GPwzrEd8aeHY"
+ "23XjPpl75oLpx7gImj0Muqy8oDpm0QvTaeqbGjz1m7w4D9ZMsnD6puaSkE//MyhJBUHtRgnjQKkmC481pIs8MAR9z64okqczNiR4gsz2zaidJuHieDH1/OCI6f0242jH"
+ "GB1sBFOwPusLhFv6VSLXT8oFNKLsz+UDJZ8UZcnYBC2Q+vqXYt6ygynOjbRscswNxccwBgrWD2IOMPTcbFoHPVrIhcP2qhmYU+9xLifnLTF8mTCki5QJCWS/D65D/Fux"
+ "SChY5iomt3LTszGGLNhk8LLSk2oClvyNA9b0+ixiK+QgHTrAwfHPoj9QerXYfUalBdx01b0ZrNxQfFiKQ+E+MmgtUUh17KKgupbZW2Ef7bjBE+TdSYUgZc++GCk7z/dN"
+ "Ta0QBWFXrvnHQxJUWjRuXmHYgpMbQwbN3Hk1FZBftJPZ1SwlYrgaGx1jVUzLUbBNHclZzJKjT5SHYpXE7bO+FDPnPXKCg59eC37jLMkkcjV1uUFB4ugLd6EdCkKhLk0n"
+ "RvkxXyYqljnh+Jlqts+WEFO4mvflCT3aZLgrhYBXVuZipuLNgyCekqtidMjY6woM5DWT5jSj34gNfko/QKgmzwGW1G4/QRUNDTcnN7vJvVAqK5ZdcVSVySAaXKu9rNg3"
+ "WwcbEsrmHj7kSPFHRjuJMpj96E+Fgby2gdeHGEamd4WxtC4BeTEn2CdzQmq9PdVmnlTCI2XPFK69B5uxOtfc0plI/ackZ1BDPj2VGAgo1e5MOQk7rxa5PHQyWrDBO2aC"
+ "GYZHKBBPv2JKpnrEk4II2CQHNxSPwxCYmsWcnMCQAfMwRUVjQ4Jcplw4roqxIbHG/tQfjDPpleax3kSVGDkbuXxybnwhg8on+xk40R9JCyUx/gYtY3VuOGcOMRQ0J0yO"
+ "ZMkX6TgVOXGc6CPXth1GJ0YWJ4KtcnM44pbTzmVXxl+lgu0k0HwF2bLWlevvERrX1SacIsMbjg0Fi37muLnJH6AWoHJHo7tIj02rz25q3r/njUu/hYTVBDFMGRTMX5kr"
+ "c43hjFklJT8f2E6LsmcMV7OR1LpVMzetk/DkeDG3LJxMwlM56Ldbi64Zc5waElRIKCtze05MzRHemptEpP6L+YNzrRt6xJRIWc/1OTu+OZcF8x/Zc+VmWvPmv55UmtoG"
+ "YAgJNVqAGIuvZ1fcW3xfnlTjdibx+iaSgE0RQgbNQzfTPVmlIHPPWWePoZYHeZuCedWmPZeDLBSjrFyZa/Mz7JutpKTiKpOZSzCJKjjQGt1vkAjs5w1LeYpgy2OAltNM"
+ "2bjkALK330MEKJhfQHq/BIEV8zBSab7AcX9/jRhYrmVQH6TIxQ3FkxbFcMSjIU092sGpN74rzydVwPfT6oEdIY20RR1JqkK1RZWJO4iT4Rsp1Y43aBlKeHpGzKmJLqQ3"
+ "e9sUXPqbwQChk7iSVkmTBu4+61enYjorw+abOkNIKy2DaOSLsoKfc8GmkG4OGkfXhTiyxgKZqNXYb2qM0DGCcfA1KJD9/IWRi/6lIY3s7UfIXG9iyWUWaoow7Dwx7PdN"
+ "2oFsoHPXGENCRqUDjg2F6sG0+pl6jws5Hrn8wcFqLhI1YSoaWLsgKmhksb8VPqH6zCdo6uOsNTEVDMgwZ1A7SSpHrOubGmy2x7sX8NRNHaSZ0ApVWISA7bO3/ANli5pA"
+ "NdHLX1x61V5g3/ZZdPYZ0kRLunCJY99nOVEnVgDV+dwY5VLxHjL1o5jy0uplICaIZn6eGGrCmpvY4Gf4wFPiWkZVYSI5q89OTP3BBf/BgSkNP48lqaBEggl8gqPjXUEM"
+ "z2Ju6RSQ+O2qAjFj7ATBIPRvMRIo2CWB48WA/hyj38jpXc2kfmM9wQLeCPcYOTeoD5hNgSE3Ofl0nbi0wLQmQMHqumH72pP+1HoGzlJIpTal8GvLGSBNZzeQLv6iFNF6"
+ "SsC12g8BB5HvySCVXQmnNuumkvMBxXIC8u3dy1bjoyiy9CvpXlTOnlxRYhNRZd2oltKgqgRIkcQ8oLCi2jo8mEbwRXodtwKCUnu6U6j1YZs66kHlINbimbQh87oFE/12"
+ "n/hGDp00Hxh+zfr6WENp7jdMpkwNX2QLsGZOctOADFx2NTA81E3dC3hsCul2ElazyIu7Ykz8GHuyyYeA7bNe5RZMsbr6JYEH8VkwJsIuCYvltsyprLhH32dULHeRN3XT"
+ "wjER3oQOJlYvBidOdPPNZYLuTxv8YP2Cnl1xcOyxGqmPVi1x7HUuEqwvEMyMz4JxOUg4wwmFGq0Jbty/a/KPB9T/NvoZ5quymHMAdYQsGWy6mBElVsIPI0rq0SQ0WQuk"
+ "3CukioYshZO8/8iFsyQrZDGY/IyBAF2401KpLPXQflhROMBexXB9kHkrnDEqORg0kXFu8PPc7oCRA57bmTGv0oOk8Rl8zWY/SLApSJopEk6VtJ+Uz08hSfCpv0VQsLr4"
+ "QHnZ9mBcnTuDdGo3Fb9iZczn0mfeoy0QLHE8pFYMVbbne8aInpzCeP89NPgnl/BoqFRKPGUQkxp90ZsTPy6aPqBox83lO67fLxw7wMSPNQgjet8ytxh7sIkLVjflKM4c"
+ "Ks+N+4wLZ551IKp9cJyZeEFAQPaqhUNT6sJ9MjdJ5AD7fenfkD3ecC/ioC5bLk0TVOauUpIyMyZzSwdXYwMnxzY6vrG0oK9GrgV43CeXXRn1UimsnKUFxUN9WwOac4oM"
+ "o98INRyjb5x0ac+9J86XKSinV9O2E0y/5cX1rsw5AHIesT6IcCOt5h8PLFyQygFrjtFppyD0qfivJzTRXaBy+jNYwqxHrEfsMZsCCZwlgpNp4rRthyMwhgLMVbEXGWVN"
+ "mSLnBgzF4IZNAVdYvK3/oHJQyVZ7P0EoPYY3L4ZToe/FIJd+64qRjp1JjGO+O8mDdiBR7gnTxtmV1OHpOQhnmpEhBXr7RQxl7kYLsKeNxSOT6h5slQKNxd9OONCbVNIt"
+ "fdGLatr2feZTw3RWJIBOOC1js9zRLUhYuDGsozAa1ay3KWBevEFh+OA/hGKlD9oDEsh+hypHvexpASWtZSLjldQC1IJVTVIWD63FdzDMedt6xJhi3puJAH2z/ljcfWYS"
+ "VJhP2PA6ppR0NxCcZOY0Beu7GDAPW/tGCWMiudEj5kDLAjr/9WCR68t9cHwf5775HLER66WcrNyQM/KUnFEinUoTaeES5RsIX8sg1GWgJrBg4iP2aD6tXGHxkMLZ7ihp"
+ "YD6Lb1jCY8w6zFPccpDZN8Yjz0omrPQ0YQtsFnPngNnmultqxHqATcEL7VnORRMEWn4qSd6+Fw41EWU+NH0wD5fLk7OW6BnqhcHXgv/AOoBvJS8qldsV9Kzrh/6YaiHH"
+ "42ytCYrB2uvOgsELoi+ceGkE5UFs5slRpw6yD2Ldb25T9yoX6xeQVa6TRFUVzlK5uvkjM8HqWQ29xjxeFRBbUDbT/IFIP3vmkFBv2x3KBErq1jDmlBSZsdlgy5zGuLUA"
+ "uQLNtACxOyYISCo9BGxI7wICYH0AJ/Cb+IDhsafiW6P8rT5IaZx6TxxqeZDdqIaSCeNgEI3k0dZECVBuZcdVWigK/DsYE2o4u1GhLBhomiFRi58xyOFNucCPhCeCJByO"
+ "XeSSqdVQJ3FfHzMHX8tgWmDDuikXBu2y+nxhxKlVI+ehODQ3G9QllnPhhOVM25aK5S5O6M0cgbkbxrW+PrCGUl+JTwMCIsfI2yWMwzcmDFYzesRUoRsFmp3Zb5cPbprb"
+ "rxhuU28Z7BFvCqporFoEw4M8OGSBc1pbweiQ5g2JNZQq52JhrXZQJxXkQtoYMnDCFiY3i43g1wy69Cqoh+b6G8SkbV2vBBdsQRqiDONOmS8TtkAuDRzteMgZUc9W+7HP"
+ "RHIDNcGVMZxhnXsrDIFJqlGBMUdg8D5bSC3AmQQMSd0CpysXJi0NAgI2SRxp8F3AQ/MzUEPDzkZwYgGayA35dM7/LEg5sW/2CGCvfdGojTqvFoWcHuw3e1BNdB62qNDG"
+ "tlOfMzA/PzjNzZKrYDlxykoJvZnZGoh0MWF4DLB/lLs2Stj5sLkPNUjo6TcHJF6kBApx+CZvUxfazN3To1twKyXVNG8gyUJenmteSYiKBSs5cErLQTi4vpFwPNaVG9I4"
+ "1KbWi5uOuPkGq5iZ9M0dKodUsA9yiPBB0kcuMJOz1gQ2E3dPOTGG88/moVCHZ8uR4aRbMAEXyM8ymMhZPGx3qMvzkBvDnvRgbQtUI2QnwZS+gIejDyBg3y/gRjPrP0zF"
+ "zoIJpVswGTuA2mjmqcxNLEjN++rX4iFTFZf8nANVWpiz206RLKwvgZVMmy4vT6kBvnHEnpn5z1Krpzg/EvQbR3swGzkMuZkXrVSFFs4nB/IKJBx7DcJdnkoWaDbux+6Y"
+ "dwuQCqazxyY/UjdORNqgSZhpnaynsfuZJfKy9IrjwHtOfc2GdSeVOG4nidw8QarKRDLFb8w51A0FWK70fLeToF1d/cQJDxoJJ1Ezz6j9RqMASoED1YyheMLgJ4FCk9nv"
+ "AJtygwMPcnPDnaiz+KGFkwjWKXMrZCbJ2xJuzhH44wTVahKpVhOz5+BrfZctVaBtHXIpWBqynyDteJfI6X5vuTRN0FZb5TbcMhHKGmXRMkcm2PCLYUIZ4Z4civtZMUJH"
+ "k7QqdRYPvD4FlksGNMc0pEG2rAtQ4/sdTCs4OvZx0gK1rzz5Htd+e2Lp93AOwORmY16xLFXDo3iFSIo9DkgEFoyjNeBrnOjPhJmf9V8GJTdtmstt6kB6IhwkCWeXmQOl"
+ "mjV7prIr4wFjrMEbVdFoghnhuNw8N4Y8fLEpExRMZmyG4Pa8Ja6Az9yDWTBuPrKbwtAC1Ra7nNwcsheCXGGx5J7wUKnBLJHr60sEG44m1ISdjZcWbjW2/h6xO6b6PG1U"
+ "y9G6+tgCKVI3DFeTTxAmmhestWkw0UBJaw0hXW5OPbi5VPq9n8FjPK1FEtcHmYoKRWB15kjGXTFpDVi0IeZPDRH6CYIBpF0kuUymRRIUynkCx4W0PohyB/GhIL1oEKcw"
+ "6VIP0ouMoIhxLOoJSgTXTwEZJphMXFeuyPz1pKC98c6JIo+Y8CCIBzHHKSotCOfsMRWNIBdX51reKBSkzBpl2BGioVfVGMdCw3OjspA5iMcK+N9SwojvZvcZRhqcSTkj"
+ "Od8kC0oQjTuIB9CFAmk8JXN4kOTTA8SDkIJoAsr2kg/Wk9RUMShUbfrAgO2WEWN9uacEDsgQheRYcFL+VuZyXmujzC03tPQPKzyp9NuSVOyr7aSrirml+8dDkXRuO8FT"
+ "f86FM+l4J6n5uREvcpYh0MfUTBJAaZymlWLAv+myGNQ83qVhtqgmRoJhewNUmo3foekWjqqykG4PicPVkG19pwHsy2lYOMPGhRwizKMNQgXrqRx2V38IiC24Moi2vTzn"
+ "J/TG8FBPNoThqTI3DrgtpjA0c/QRI+Fg+6yXTBz3clBVqNXYD2LFKB3CWa0597oTKNkhhWvcGh4E8+Prj4VJlL1NeqloFfNJ+gyGSXYUToPeuadPB3Z5Pk3VGYplDFYu"
+ "vbKbOmSIrxJuUGsiGDIPG/fLzHXK9gx25azMxdoFvRTMGPfSMKmY3cNCyhmNfiMl+2M0PszHwjjpiUpT5eYMFE2/sZ/7ytj0SlCO8dMjFWyA1iT6g5J1LueDqyxyAHsT"
+ "MX9wjI7R/aaOkGiivVQZc3wioZhg0ra7YHTZylUWppgnVG+iJzyCDRFWmaxTxgRbclSMhrPoxEGke3aVMH7/vIHittP5eGBTpknKxJlhl5njy8oOIso+NOWJerKeoRas"
+ "kzqCcU2XVvx8UMFWUK2G02MY5ACfqNV4nJEJNIIJKDG0y3woNUDbTpCEY4MfrD7TCIokqClWU+iC+W4Yn+MTW0AJNYUb0nkbwaR4ysjCGmVYLtfritoEgvd+nCAc+wMc"
+ "Mrk7Y/0dwzcmj/UA61kxYt2Y5mLOyv0VcilPD/bCJpFVJk7btuTIyUJFQ5RRljE9saoYmvJQrkXs9gwqeZl/ORZs5TToezDNZaXi6bkqhsxbztqww3HR6gvV3lw0cNDe"
+ "uYf6wCTvU8uJGv2QZunu5QPH0mpFZqydZCJ2GLag6MohN5MH2ZDRzxzuKgr4HsNJivmTpoaHBsxc1rTaOf7IxLWj+0H8aFy3oGnmiDGGmgCFmkCCYlSsjzqqmA0LZt1o"
+ "LhiJb1wk3dh9NhXfGhLJ2cwT014zkNekmOKViR9TWErX/OPhE1Q01ZYLNrCrPnLskXSaxw+Ea+9Jag4CzZksI84UfA0bro5gcnAN6QgmVz3XJ+0ac+H4Z4f/+ECtJaij"
+ "0e1gW989JAQs/37rg0CdkClP043hrlLbMrVCbs/hGMwbctb0wRwiTRKHKSsnh8tzBfRVdl9P5VrtUcoqNrUggr0dFKEazZfYn2xBYjlPuj1sHgQy6GUQL8rjhyM4G+7K"
+ "c+ahJrPiGSbTm1nHabr0Cp6KZcG4U/890oLG4i9STbQKmIMUDkppwY4De5FPk0+nxCkjKTwYpQoGCDHCPQY+Ceel2Ok4amomlAUDk9TgQfvhQy9O7W31K/caH7ZCEnSE"
+ "LLlQxMuR8HDT3EVCEEqNZOZU5ZybfAhcHyToN0kLI6TbcuRAPMbSwpZjAUETLvkjY1OmpJlUNLfWhGdmg+aKihHuTZ4PWyJBPAeAPQScwO+auNHxJy2diWZNF0qE3ihh"
+ "GCPMzUGjpsYs/36f7cbDQaJ9nP3RMK9jjzn89GAmxENZB7StcBaKJFzfPcmh+FNar9AggZCUOfMiAzJwaNs9c3YxRg7gVqNJdmA9wHmTG2xIg4TqyVTDqAUiiXuLzi5O"
+ "fUHB2iYcTrSeHyCp9Fm4gX/dINlS+0WwrnDBczeM6YNooQq0Q0FJxUNAX6aVTHjebEioZnoPxan6rGIezlYM+g+705Bgl784R+BLQsCWfiug705fH8+NMjCKg1mHhLKu"
+ "3CqBwlOaDCZqAoUVn4cUThDN1LGxqnrTErGDeJMSMZjoUs4Zy3lMmRLr284atEIdwF4MJm5XG3EKm8FPCvZBJIJ+MR/c4Ni5G1Qudbd5XlLCYe/siok1jLuwY39kV0ik"
+ "/qs5RmYi3n9PNT4kFczURKltnSvHt38z66j3GPKNmYQ5m/pgBCPTHcRmMabVLtR63HNVbNz/HiJQWu0KTiw82Lrqub7nNDcNmOcpxbw3ChBKiXswIMrnT0NesC6g4Rsx"
+ "4cGpnAnTAuyFxeohKpOTunEgHikLNztuLxCZ18hZTJMi4PwMZI9UzJRpBJOL9PPkTOercqm+K3Jz+6xIzBjUPK3gSMsAsCcFgA3nXJTiMh35LC1B6ra9sCAxqfXEbFF7"
+ "MI7L1PO4vtGgJGSI+WM+gzeYFkig7Kbewbjz6vIJky3oOYiV1NBVXTKIKCsypEuZYE//xHqbLoc7YiBYm1NjmuajdxUg+aQ9czgGu2Iqdnva6JjCmbuNZGlxXTL39vih"
+ "pCaG/AM2+LEBq2AqhxunO+iqhH7JQMFKrlwuV3TlxG2LgHPqtOaZiuUSaBLmQgZVEoJW8MnImWcPhsmD2LCuH1hQQ3oHt9nwGcRykC2H4KkK3oJhHj8aQZPNRZXjWMwZ"
+ "JPIJCHRxl4ZLKS1uq2Gwk9i6357CTfQvxh1XkZSgGt4jVLCQE2gHoqj3SN1O6vI8PHfouz1j8jHjpv56UrgavybKOrTf1CROdPOFc/TcNBVsNVowTwkXaBBMRMPI2xhe"
+ "yC2WhFA94kU2Li2YSRX6OSebI0Bm2DnNGzWnnkF+optkAi1cbuGSEAEFhn5rToM8XD3AtMDshxMlhDlgNZSw3NfTc1nqkxS2iJziprmqlMKVTFo4P0/D8HiqgD9IPHYP"
+ "xpnTHHJz32zPC6fauyunC+/IlW/9Rm5OrQF7iSP9vjHN+0UCJ7SyiOdM5+e8cA3pWUD+2UQO63r6LdhJfGnh3uNleBCuIa1hArvfL6xbUCUygeyXcomYyHjyB7fPkrDi"
+ "x6AjcE9StUBzpqG0TDUngoCUn8OnBTuI9xOULt3lg5NZeQvIMLEMBZjU6JdEsLVn35iDed/RppOAxHLreyrOBHsUVerutGCLFgizb0RgoVR7H0Eujps7/M+gYIZJxY6r"
+ "SS8rq5lo5rCJ9aQvofjNzjRdfPKUtEWTKFhNXZteHC9mAzEaoPiDVTEggzUn9djaj/12wcjbsV9mmPJJNHIANc4NJ3jq90yOq8+MXmT8bSTYTg5+rPuNBduEaxG7tXDg"
+ "pF5YCNdKfZBaE5Z6U7GcpsyJjHNmMc6kcXr9/vVk1v+iF8ecWjRwHZ5ZZ24WM8uzYAhpCTeXzZEmYWYC1TbqJL44qpvh9bVg3KkmL4p4aZqbqMKQHJ7q29ZeeWI94iog"
+ "B/6tWwAdVwMKRZ3E5jiFLX0b/HBQKDFPQy4Y5xkd+x2DjY6DB61OAgi7MgarmUZTwQpXoR0+Tpj0ybuAhxb/0fcZRy8y/5YH9CJH75sqPY0MiSVzNmUCZzHg2bjoVDLV"
+ "L5hJTGrPrbhOmaS1fzUI1j6s/zjaQ+PGCDeXgQwDBgwJ5ZoWzji3gRwcV8+DCmXBQBOomkEp7upXUG5WAij6kxNXMhWdJp0hCcd4F86uMc6Hpg8onNVnFEmrB+OGuS5k"
+ "7tDvxWBOUijdAiPh+JXJ5R4cEMq5XlVwhA4N2PzdCB035idkztsc5ce0ADG3tcmDND4Z7rKUnFE90+wh0Xspnssc5cB632Pwg83qXLtBnmfLnKqzaySDtfUVgkWrTR8P"
+ "gV5l9YnDg1RJDRvBR103TFguCsbRsmAzt89IvJA7LOOhHs2CYb2rPVcul9ulYt40zqeVE4bvpxU2gHcrqNM0GD8Y7+GhJWEn1kNOzMTZKQnH7sk3h9xcMlVVWDCTzMOC"
+ "cTaUVlhwIvTv8QgTyrmXD5yWQOsFPAZPaiYbhqXfm81HoBVpVBUkkP2KPs7VUyCv0f2GvtlTTm5X98rsUoqgmKRlrPJMvczFqOKRbAEepp1EvcZDPk6sn3QIaJe+CicA"
+ "64bBPTUhWeS5YZSwOZfgK3SEzCE3HyA/7FlKDpSU7qSgwL6cK1fGyNnzRk66lOPbW7CezEHZ1Q3OV12z+j1AI7SefnPmkFVBLcCKHfnubW/FgU/M3ooq0L6CarOG1+cS"
+ "xygzl4OEYU5DMcXlLFgOctyg1Jt5Q2JrfxfM5Nu0k3JZqY9m5wenkxrBobgGTvjELSqc/Noiaz8emVhWWHCvcc6cr7KbTdaiQuSAkqNgB/EzFw7I8BxFNfRoZqyMuUtE"
+ "hWUwsa1mwYSyraMEDsfvMEoYRdMyARmsiumnVcSAeT8LDyIoaQ0HKJ9ezuop9rbpbbYNqi0WnzmOxaLVY9t6VpDQMRW/YvP+wayjJj/Sk6sVG7BuyvGLmkTFxLjrnThN"
+ "l9r0uVEpcc2ZY7vVcHMKsKMrRxUWRYVjlRYBvbR63vj1pOYj7yQVQij1moIT/Qlyc1Jvx21aE4FJQg6yRzwoYdTZeEjj0LaHlIj1QfbMqRk5D+roOo3KiUhbjooBrxZt"
+ "oNZEAN20rCt3U+DNeZPUqNHxbDo81JzJhLywiYUU7jqzkokjlb6VTyARWJPBxJ7sIz842bC25SgV6pUN1ASGLeBYMW8cMZTIfQajzqu6ga6GpecFi89M1+WpjUt5TCwy"
+ "U6djvE+uZop34U79mD+4lDjkE8SDcJDlod8IKlMK1/x2R7krll3tZ/BKrRArBpFAI5j2j8Y5u81fT8r8z1c5OVLpSzlSqZKuKktOEiAXOStz81Ogunq4hGEqh6SuXK+o"
+ "TbSDaRKb0DL3ZEG/UbHM65gjsV7CHfqu3YFj97ccM4Y5bJuQM08wmTMbOawhXUFhSitzIyYG2L/XifUL6jyI6VAwuThxLRP9wWrqkimXEwsmys2p8+I5dn8CB4MuelAi"
+ "OMrK8YuMCUkxwhzXcXHvmSfnKZ7BfWZK3NiT+QRKn3jQ/sxtINDFbTlxPhYbphnWf+u5FCwJMSFdDFJmBGfsCJmLr5VaIe85NRXMwCeUBqyUC0Nj92BtO6nS09LhSFkK"
+ "Sbg48xEJoCCaSOFGrDd5EN+SOOHBi1QTbaDQsmG/oUjuLaQrFOu4btz6cE8tnBBP8nPGktReeXLitjGn2t+kILd1L3MxL2zXc4IbO0OiGStTyzFKa5u+mGAhXyC2QE6O"
+ "e3n4MmEnyEFKdozRsS1/IhHZbhBStmrhqCrDVcXXGWnv9woGtGbfejXYII7iZzBmHNMrz4KNWOd+bWJguTEdwQr4nn7flEXH0GPA6jNQctNKJo6lZeLH2qC2y0UqDLXz"
+ "puaCVgxiiZxxc4sq9R6rDxdnRDnnhn2zQuKungZupJZIApmXQ60GQ9ZEk3qjrpgo5cb6LiMYFMumuZxNbyCBeSFz81UXPEfuH9hvbOh5+MGnZr7bfnJ+Qoa25Xws+sLH"
+ "BPPcQmq1mxYg983kxQWbg3BqokYExjhhU894XhS9fyq9GqSaPMJx+weOWLEpQsvgSKueIGCo5iJYxlM3cwOB+gV1U245GqkU46rYO8SQecmUSys1izHNTaqMif3JFg8J"
+ "OMb+wbjJICvU9ATpsjLERJlg4eScJXoVEzgvraA5eWwyqKCz8lvyHpt5XhnzROjBONzVMeY+0EF8kGo1IxgUq9fUZrqDBbM5JBXMx4j12ncuHzagOZfqu61HKwq1vy2Y"
+ "9vsTcdMC3WKce5yg3YNeBlmmgr0B0sySNP9yIs74LaCki1tyyIXK5kwdGxuKL72exu6z4VlHTX7M2Y0aMhm7HyXcc6gJY/dj6XezvJFKQuomL44I3INhKK9B3uZYx6AE"
+ "lfWjsTFTIc/G2AvqtUBJyJuby8QafRBwdHySc2qOXWSCaBPmF3OQ2kn7CSLK9gyy+30FW4DbLbNP0ArZdOFyfZMNwyr4vsc4y1eDZ2BTcV1evXqHgi05cVCoJcec/NcT"
+ "eZOL6SxTzYnFFDcpaO/cCyashB/YAswqKdxc39auaazlKHXj3B5sNoLBvCSAZ+N9cn6vDuTVWU0NUhFafnG7uikoEfxWNIc2WpIyew41sZ6YioahJog472ByT1idG81b"
+ "gqotzHHq7K8SKZuiLNwVE06wPguZUxKwYB5LC4KftDTo4QyigVnnBvUrll4FkqoSBLzRgqnVUDf1IY17jYdUrgVojTIqVg/G8YDdLsvLr9ACWQNohbOaMvwC1RZvKQHo"
+ "TtOn50R/lpsTEzV/am5TD8YPFW3RpNh8ZNGo2FG8kGbYi4CaLiap2DCfwZ5eYcjNWZqQwhZYlTvAJ9gRMvXViMFtJURMScC1coK9Ky0q1AqpOZiJIhRsUx8Emub2ouLK"
+ "lC9TlcQdVyXPG9jhaQMDCwUrg6sCGZhzYFsDn1SsjAkGu+IIHWDz6uByKwsGRjtIN75dIldTr6AwpVvNdJ4aoGm4OLt0k9zMlDr2IunEsqseDLP4eVcxFMhrziCOeM6B"
+ "0wcZWhNYMCkRU92cCiMkOn7ycc7YJdMvavNggIKZNTsGZODGq869JDTMIe/CtIw+g2GXZ+s5CGeQZ1qAGBNBJ85TvErkZOiLgjzPQqZypnKIHSJPExiiPpo1JjAZ+kSK"
+ "2PWFr57SdDFKaaWsBoekYoAmFj2TEwxz+Jbng2rPQzCFshGMQ24epKvKbj1i6uDfM6Z2ZQ739cRmMbuhiKloO0fi+1cGDERM3wt47D7z4Yw9lWMW/+atqIYWSN9lnDnN"
+ "CgJdPsEnULRHyAXruug3Dg7idAFz/aUolzguGp4Yhn7SC4rk3hN4wRwUaz0T1QI0BR6MvD0YPxRIwyJhk5/7PDjRn3vkjVC0Fyk3+1Kw4Wj9xiInw2G9OFCNIV2+npik"
+ "YtOo2JTJmpvYTMuMKEEhXeWEC55eF6HI26Ski/WuOGe32MuKcELBDg/qyr31G6GkeFdQXGvnbE6GWKSAHIvZjn2mWcZh1/rvAfZc3IPs8OgF0h70XLG5j9OQObkanTeO"
+ "cL/oxY1+5k2oC8YgGpS6pxvCFhxyU0rkHk2KzDbVYoI9wVnMWz+GCtZf4UZV8JdmjolwglITruX+FqnEsW3ge2zCrY8eLHissjB9ECxvLCeovlYUNFsrylVMrvTVqBSO"
+ "J0el2NQ92LRYOcgEixxkeYBPOIhG/2Qrlu1HKZzbQxzW21ywFbNx7sEslWOWSOQAGsO9aBHqqwU/FWjYb5ouHxy0N5Cz4yBgyhOkSqK8Sg/MQ84NIV3Og5XjpFuwlGdN"
+ "kJ7oijmHuk/wSYa0v9W6BVTCo8VzvJjFR84h790og4IZ/0wpuyQpkUNuCihWY8E4G+fBZaIKtDdxCsqwJHAJsTXKPrBLRpa+HKkV0nNGTrb3W54336BeagOVBIZqLxTK"
+ "+o3gnLpJ5Bzum3CxXD0r15WrnhsyuXJyCA1X8jewubmBfiBF7pgTBOLJ4CczZh13giSypo5auGQuDhkNKpiAs2MLhi2Q0W8slM035wxpXbmPD8wOJCh4eR6CwV8tGBXJ"
+ "GY1POMrPbsfVTDHrOGzS28UZGyL0YJjNoKEAuYN46wcjtvqteseE4ReQfmbEKW4MaapQGOVnDnJiSk29fOfS76l4bH04eeaQqY60mJIA5dwlQQUTrL5P0C/mPg8slnuR"
+ "qPZXv8+wpX9C+en4We8KC9bT4YKtR5vlYpVnyZWb5xa9k1CDnyJmb4WEcu653Zw55NNcVahNnYOPTypaNo9Njn/WPrgOj2+cYnX0lbMfBtH6w7MuYTKYESuoLZgmDttr"
+ "XbmLOhyjeA4wZAo8WLIf5MKacsOSjHPjsyuGagIGOTymg9lDFUz92NTXkED223N9blRvYs+cNaTbuS09WoAQds39TASGotmR3zMDJppJ3mPzkdWDEI1V+h1D1UwatfSS"
+ "CYn1s0vYiXy2RdKNNTetk4qVuYtQlaAb+vqcNXsvYShprR6M5HlOhWPLOs6c2n3CRDEUYFCO6HmfnMG9e/Xzw2tkFCdayVze2E98TgezbRzO3FCAVCjnaq8FMSN4Yx2D"
+ "4lqgblivA0+soTS631TJVPx8YOsxYgQLCyYBpPENT0Osw9O4QXWQwrULgnw9MWJMEC6T68FGp51Zj4dZByCRLJhNBqmaes8bB9HYVVKk2FO73NygehMwSV2L3Jg8n0Ft"
+ "T0pZzvogmOamTpnTap8NSEwdxIa7wpbjTErjzFK4dsFU+nmFufGFXGuGzE7MLV3KwuTEk4IZsQmfYNQYIyJgSAYwAxkMVm4Wc5+g7c6NAQssmNaqFEy05YlrhDThtPxd"
+ "vcOF3dRDZByKNYIpBe0d7aQFYh7XDRQZrz3/blQ1WHvlCdJluVTfFbW7GtMCFE597Vny5RdIiecpJ+fnCdLthxYgd8WYiB2nBSgcsW6QSrH0KkjLN3VXB6k53VCwwzBe"
+ "FKH6UBAAeygnbesOaVuhXuMud8RSuV1mjvKz9gMEG8esPnGYVOUQqUYqBTmDGh7FUxLBbygUEurdlcM4zj0f5rByk4CSHXKBzU05C0anHtZ/kXJhNfU1jBBpRAROMc9z"
+ "rjvulhqxjPiSujUq279M+bhBz9Z66YmJwLYMkgOafzz6UcwEHJ4IFMTRuLlgC1ASZjpVQT8hVwSUP36SOjyJSuPsF32rJyV4Fa1FTLVSo0cRZeXGKB09mKwF0hiKMnGE"
+ "+3CCzKmBY8CCaePamybmXzg2JFcx9WD7iVlZHMVXTCn1EEwYZwSj5KoNmEfaomqtGNB8V7DVbpBDbKPtkk6MC7ybHR/2Gs1AkeoC+g+QEuYvzPPSesS6YvWgnqB7EQe6"
+ "6r+l6IsLlnu2T6mXWrBEnY6LgK32uYBEz9lGx1CsHoybaLnpTAk7r4akIhVOBogH+mqm/oAdWAI2v82hA6N5WkMaa4O4l08NI6ZfJNLlksA1NxtoieCaBzda20AicF+K"
+ "XPrdpJ0QNcCCJYwH7KqPGdMt6CuRQ5pXkMrkihXwmJqoPs6VQpQ95YMJZL8MTo4deAx/Gp1Qwzpz6Nio5R9JId0I+naZpCIGBzFJxQVjdIRz5q7qkAvnoRjMpoAbj2Aa"
+ "9J+KDFywfle/oKvalCk5aO+dsYrJHeWumLTFCEYVugdGqxvBQsXOqx1j27v3eMRTBCNf84QBYFfNZQhGMsHArtxDTYR+EUQuUkkw5aLg0HPRQNmXWzBdMDj2AjqSuTmn"
+ "WZ/NI1/OVDS4iYWVMNTQcyYVZObRBoHe5KSg3tXbBx7KeGyIgK3GE9TWcs3H3DboqzWvoFik8WIq9Nl6Ovzs+5pJVKuP3Aqp282tx7rlhNXUhmoHn0y+nlSXeHS/kUgW"
+ "TGLETv2S+46mehMpb4qZkiWQfuZiLh4bHkcFJYYsGAZr78H6mU8Fk3L3HJVJUo0viE1zh1AkxXGOYrh2TGtiUayqDufiL6oFaC8Ru2JGqx3zZSINGAKo5T+GCFCot5cW"
+ "1rg1Ly1Mc9MmFtiLPDzmYmHBQA/WQymorRvKJ4pdngdpU7BnDsfQg1HTVQsGvkVTqwHVsQdThSLhkOCT7QQto3v9znERHkXSTKVyJo2DCYQsWp8YPWCxASs3ZVKq920T"
+ "C42Q7OCgPcBS3JRsmBE6MuWJMJ2Bw9APjoVA87opc3popmieFsoDbfKg+PGkwi2RHoyT4rbJMRZN1uKxO+byZcVYOEPOiLrPThCS6tqLk2p3LceImbi0TLJH+k0NGuQV"
+ "Uhg+RmyCVjcQulll4RyFytmLGKoJWLJ5aSGhzCDvwthF7nmCrhlJwcszCSiNE0E/oU+qCtRrjxoyhsyLUi7M2c04Flg7yRjO3OBHLpBjIahWOwfcNPl0jpbuSQ78wH5T"
+ "3l2G/caWfg+mlTpCHoUDTTh9gR1pPR8aqcRRI+bwYz1i8jUud8MaZUYOwDbaosL1iBczraMe7VOrnQvGRLJgYuBerCFda6Yw9BbM/NKRaJ/ekBg5gLs8h58QdaMJKK/v"
+ "JPQXSUFCXloDppl39efCUmJr22Ktsma0dCiWCcjcGEK6kuaQw9SQovGNYJjkvZqZFhPL1Tk3bKNVcIYwOqlc2zZj0scWzE59SEygB+MmrKXnqNg3+3reXGviqaBi9VNC"
+ "9FQZk8HmtyPlG3sw5aIlsOfikl6ChesJMRTJjSECplftotyctu1g/OSTyXlGsH5bA6FGsG9IoM9gXO/KBNE4R2CjFwnkpWUYL27woxkzCbNgHKE6KMebckFeRROk+mMk"
+ "HIw51Y8qTiDEZMOwjOfwoG7Yoa1/NOh0HCQczMVZCjeH3DM4zd3zxlkC7wrWZ8PCpUHdTf8BemltXhespt7UH9g32wTUI96MicCkqOa8fVZ9QrDlAWSAcAzu4Rduevyw"
+ "wU+ClA41gKhl4/BFyj2035ycPPZyVswQ2C25NY7LdKeFC2ZSAtThOGvQSl1oo8ql9GYl2GwQerJvpNzsSy6uGLxGckU5dNjMEyN0SN0wBmsT8ARpEieszO3BQF25pk8M"
+ "A9uXByliJyemYmdu6Rjyyny7/NcTE7HjtASqgjbfVYs8qPPKJvBYBV9y5bBQz+LrTO3rDALKXJKFI5XGE6RDRm/8M2jtRy2zLsqUTTY/68ufErErVB91BMOET3owzDLD"
+ "gq2CuYdGeUigjpCQVw6P3fczqSsHSt4fclOWGWNiYbcME++Q6DFhix6MEyTeT6wLYrMY7IuNWUzCou0yfz2pfoEvwoGh1p5+c0OEnwmKxNMNjbJNK+Iq/hDPSVAZsw5b"
+ "+qbqjJ36ujVMkMEZJJVTXzvDzAUjdXjmgnH7TVcOnLCaEw629OdNuBdpInZ34ETsItaQnkndgskMNqFYblKTxoHWvtzC2TX2T4Zc0u9gpGyYhJuzSuqHvvmBMHRZObiK"
+ "yclSPAZfI5FQ7s4nZ81+K8e3d7dsgpFwvp3gtn5pWjLFsrj83HJBQpkl2X5idNmmpWCKDD0YloJYMMWYwPX84CYW9exnFVXnGusYWyB1M59v6CB+D8Ux8jYma/GpTImN"
+ "+0HxNVduCpBqwU5Q1rl4EHdVdPUYpqwoqApVpJfwVPPqaTQ+Cpz3lMjZsMYMjrR6MMUshWzcLz0FZ4Jp4JCbYwKPfTMJnJBuFJBxHzxWvg+eJxLnM9jBvcegYDFoPE+Q"
+ "VHootkCOcteGERRzaYlS1zr8xweHmvCcwJBp23IZj61FLOE5NE5KScgcAgqt7Cc4rLNgUCjDg9QNwkcPIjDnHrrr41Go42qXyLGOSeXSHixwkoqrX1Ys2TcoJdac6Etx"
+ "KX5msuJH4ETo3UOaj9SYSc+qE6aOHT64m1pNQYayUFyytSagXb2QzttLBp1KFzO9pJb+oiEXSk10DlKU8iqdt+KtjEFWiWF4MPm1KYPmNJOGA0MnCVieOWOEYUWTeA5V"
+ "44w3xQEZlBpSWzDZOWzBi8yILwEJ92conDZO68Ug9mjNPyRhc+qmXLT+Ej8wW/FqIyaq2V43eUHUgPdQHMPwWDAPBvuKgXiMA98oQEjluiBG3v7g2klGlaW0Ip11CrD0"
+ "qmjMWBJSKGrRCGajXGqBfD1BVulzO7nK4qmt5AQJFyRpBtIgQlmwNGPNzXgXjDflIinKGkkV+qjFLwoJZJstKtZP6sH6IQIh23uwDWsSmyLD1t8jo/ttQqkU7SH4acJu"
+ "6iAbJ/wdZJEXdaEd3CRmyDpvSpm+WjApUGFxGD8R0/2WoH6FNtp+koLVZ/CUR+9Qx8bosrskDnm1i+em4rvMx9cTIt1vfZdh2dWmhZM53Dh/DufW152wq3rtpSBn8w0a"
+ "TtlQvGUKSvwWtsAm8JxqmDOABqaSsOTEeSJ8BmM+2uK1LJQmg43PsLRgMV05qoSfg5yQBv173I8ljrO3cS5UVc/cQOsTW6CV2WpDjIS6Zfq650QSzFOck7w3sRouWMFm"
+ "CG4qJ1dUT8UaVxA9YCqYP0cP5gMmuekmUk1UUstYXiBp5RTNezDFQF6SJGCXp5BSby9N3GTQJDuwFs+lNtH6yqA0LptSUwukZa4f7ZrGJxatnpGD5tUzKNZ1qU8pB0X6"
+ "qTlyOMC66UT6WNzY4TgAQ5TWsmF4sMqzBwsZc+iQlUMMlYyVFQMw5LFkbqCTOMmOhAkMGTqJVD4B4djPXOq8Qav/6UEmwlO5WC6DRASXOVaMBcPcqXswzivduXRysVwC"
+ "nSVckoPDQkUfILa9e/vFTBSGJ5ruN/XVIqjqbMHWQnlG92CyGpOPCQZCNMJZOGhvsBYgpzCkPZfLzGwwyEVtaQs2RBWZi8Z0eLCT+FCPIQ7fHj+coRDWRzUo1M0lIXvG"
+ "5u/DCYebU+92EFOtCY+x7ftv7Rc11gdZg9wLlRes/TK7qOtz5cyt3sonWG1hwaBQhk4CO6mf+iBMsEUnjnC/qCkqQjKwC8kUn81TnGIo9mALlqTOOTVzsoCCkVZJfZ81"
+ "Hxh4wQAMZagYnEki8KyzxwqLSaVhJbx8gF05wwthJ5Zp0GNljKRdA+Up1INxCY8Ey0Kgo9g4OFi/URaJTKT++xCwI32fGVMScN88iPJ69WM4L9Ah8tp8w9QiLwVV6C9Q"
+ "tsC1G1Q0bxm0lmi5gOikDEoEW87ILRDOau0N8sIUiasv3Aje3IuK59yLUqOAxJXUCDaPH8wNu0rkBD5Lnias/V2U8rYawUCG4nPTCTv1n3IG7NlSnk4Mk5rW88bUsZPm"
+ "iNEeYsYAh4aaOESpEySaNzvVlovyzWM2zgZkQAK9g3lMI8GFG8z1A0kvCiBA+udxPxXs0ERNj4NgQ0iTxikVc6cxtRpGQmME46pck8bhkAymw4PBlvdewIMQjaoPaoHs"
+ "mhLmLrvL/PWkTmJTq8EYitsNkko3sjWxFvm4qbv6EWycCx3EFgyb5n5iC6AJ/AuzLx/eIxzUZSHdHhYNEcN+LwrOzxbFuP0DD8IdV7PZNWIQjQDay74FZMBgmJ/n3C80"
+ "rMMzb8plqRYMA38b0qUX8cyjWTBQrSZyB/FE6utL4STKDHzCMRH6oY95ZjiJnvtqpA+lM2tILJe7r8z1ru7zwJpy7jbbHerRbr24TX2TjTJDumAqh30xzp46ri7l5Btd"
+ "O0FywFuACnqNzUfu1G+KBgucSkIDEYeuWaoPxXKgZq9ZkglH4+sXDIgH2eSVqHdZpZ0lQUPP4mcDSROhTB/kYAKNYFIbVsVkkCju0okZRhuQgSs8DcjAvUYTScAKtM9g"
+ "GJDBm7wWZbuzYCOtyBlsmiIDCJEO2VNW2AbRqDJRPgXBpwVTP34LW0Bf7R0MIpCEvp8xRYbhg4MtfRVsVueCgCOtw2zgIVsmd2zSFyS0Rg5VjBbjDqNTY+CTM3F26XsW"
+ "c9lknm1XUGHIJDuYSCMY2JrowbgkdaueU5YzXybIFNW5NYcFa4S8xUgu5rv1SKRV0spNLDRGwcCUOljHmC8TJ8hgsBquR9yLsxlrTiwKumEP9yIqB5nPGYrkPsEnFHdq"
+ "9mCHZ9441TDz+MHeopvNg5WyRX0Ho8AnUn2kFsksJWIwr3lQL6FgU1EOJzrlibtiJi1JysLUMRPIvHRTX/oY5nDwOTAdnp7JQaGGxw8GancfGaMn9mAKBrvPwvnF3MKh"
+ "RJ3Z5lIzhLeAzAsMhhVoVz+IseXYSMn7VuTC4NiNnHm2odgB5Y09GKVQZsES5xzQ5AXKGcl+YrlcE8yd2jA8+twoMEPlvPgsGJfvuAp+Mle3zM1H6uY/MIEQkzPCGnMW"
+ "jPQ/K9jIouTKaU2YcRe2+EsuHB4EZMBbMBC6WUhTw6KgGMlTG6donqRJoCQ7kiQObRs15IWa50Zs6uM+FYaocFE4rzUD5h0YycIgh3M5P6hgMzawixIuLJsLJ3cOu5BP"
+ "joQTFBNUtGAzd30GOQsmVzMkqKhKNwg163cDvrb4nYKviXVCOFUoDgp1SAhYrr+fPRZ1hOxaz0rpYPZgDVv8PVjFzqtdi2L0/l0fD241Srqp+t35KgvWJl41l9ULI+j/"
+ "8Auns6IXqNqrJ9jX1xPE6/fTSh9UR1oDRuewYP7DuCpQMOEcp5ZeUXOQQw/qEXPv0IJp8pWiTvWTsR/60BzSjmFMuWCRm8tSFzkyJxt2lsYpeZkgGqWiMec0Y/2kOYcJ"
+ "O/aHIBpVWMwbiGSYN+OEUfhGc+OjRj8jGDX6maXcgbrSDEp5U7KKUjliXQ+2BcindMiGcfm3cC2XHmw9uZmFrOmkrjN3n6Cn+OUTVw1efjbvLua8ajdp3FWUm8C3PM+Y"
+ "NmXP47CS2rUtR6GwDE1XTvK+Zq6uMMBQyNRXqxsILqifCrDI8TjM1ijulAXDxv1Vn03jxMA0qnCzuhEMS0MqKEJvnnUnJoRZTZWVSokNvoZlPCWHvtGoYLpy49zcQK32"
+ "XHsKAsVyOYD8/uTnLVMaZUnnjNmiggabzsW7cFOmeHOsBxd9qyeGAvSYCKahAAtmEjas/7CmSxRTasIcFBfBIPQhQxIr72DgODfkAjnOWzB9YQOLHgz0rDNbJuzc78ue"
+ "aycFAdX8DQWI8WUH5JAiGAWJfYVAFMXDxwk7Qo5PyQ4k3iGFI8YcAvI8DXIIyvOBYpG7aZRxInZfT86u8QXiQVYPOgesm68YSnrtGXE5FyY1eBRJ1VfmJH6Y8CDVvtLG"
+ "1RU9WMXYss60jLC80dTMsU6qBs+BypQTmx1+ntoTA8ip1KoYioK2aLoxSfMejFOGX5TjTbmFJOHMd6mNEoaazX2YOkJmubmZ5ywlYKXnLJxor5vOwmVXE2ncNZ0caMJN"
+ "QTink0mKTBRGQ67iMaEVOY2lRQUj6ZAf5DT35cOmVAL+EtD67/JlxZbjRUIOLy0Zs3w1XTmMP9KDYRrj7hoCn5A1astgQ7r5SorY5QeWhDRQTLQHOxMl6dKDgUYnTQ7B"
+ "WhP1TJwfSD0hquwI5r9xQ3GDHGKDn6rK5frlrBV7sqFRRo0hS55AQTS5Y6YgZU8FM54nWebmD06O2CVSGiepBg7pYlQ3Dp3EkSENneQrlWBFOQuWOEZj1lG5vgUjNcr6"
+ "/Qnt67dGGWaLymmM92AHR3aLsnAqCQZf4wTRct/U3JMVTO7KgvlASVMGkz6hwN8m9YZttIG7wnxzpRSsvWnkfs7rWOaCwZOG4SuFobdgG7UaDw6bZMFApvghhaM97JnD"
+ "nrjdg0Px3Zz/qOtzF1CKe+v1GfYaVx8mLRABbQUbPO4hl89UBa+XoRuhMobrJbk3yIsMxhUWi+eElt0iK3c2zsbho9LvmVNqN/PQxNVn8ya1UmexBcPO/Vka5z0yk0yE"
+ "uX8yzqTXUjmqbzuDclc9GDgZnCX0MgaKNpWbs4yeim9NoYN/UhC/NknhTn05F653NYJh4loFFD+2goki9/dgKzc/kwBau0mgiEzOIPTnGigF2JJXzr1o75+sbRCno0h4"
+ "YlJebxUN6KNNmnaJECp1KiemeO82klT6Es4L21wvOeGCLb/6LQMF05K0MuLpzj03H4J/MsFaXqiXaLLO4jEmsIDEuvHNqE/mtJdMUCjr8IDd71HFUMF6bZYwRTST7MDO"
+ "KzADMWxB4kTsSl4S1geJUi6MF9MjzZqgO8YMhZTKQiwYE8mNfiN3n+0y54kKJuFRlHq2qciCnY2bFhvWMcHsJZpD9UEE0+WFHSC9pubyHWdvEEsLiqSFQhxaC5Crz/q6"
+ "59KCoCWD8nxlxgT226bZVBmQYHPwj0eGPlr0K5dd7T5CkZxVnmCwedgPQ8Emz7EeXAj5xjKefnMqcnHa79UPYqW+2WLBqM+294M4Up2yB7lAyG3mGnlcrf3ExxKstSio"
+ "mJfLyjkokloTT10KpnLYz0aqrDBhC/MJ4+qziLWT1oCd+fZk5l4EBeuro6f60HrklqKzbgHWubJiMHKF7ieOmAlmJRMTyX36WEhgMoNyTtyufne/PVM1FXkIVp/psnIO"
+ "HdGHhm20IidXMoFr0RrSILVuOYUb95u27REwiAbY1y9oJ3W4F0HBpKQMnfkD6XJhw7pduOmI0+K57MpaEzd1OibldAf7N7s8hsx7WKs9Qfn33M9F7PZs3MK3JLVt+oKC"
+ "tbtwdiAL2W/sZz4HOTxOqoRxA0fMYXiShsp15U5semYniNzYCbKdK+cIHOUbFsstJAJ2efl1o3aaueZS11m/PIPHvppeWrRB79GDKFH3ssOROvcPssydOCDUz1AoaOnL"
+ "6ABCwRYBrf/8Ba796Qaral9FsFZqNFUoCuli4BMsLXj5flxR6KSSK+a64/rNyeVyVcIlCzRnOnzhWhNVy4r5yFVtXJJ6gBOtXsCXgtWDupzcGNKeTLEnS8V/pWycX0I5"
+ "zjujqvRvRq1Hy62w8XF/rBPDaEjQ3RQgkHiJnMCH08RIoGAvTQuJldPohTn5zRYVAy3nxtkMulb0wpAuOfiLUxM9OQ8ot+XISQmsWjiKsykyQKFGdsWRSreTg/C4cGMi"
+ "mGMWQ/b1HzlV6KrOJXDNzaJgQ9r0rrAC/q1bAL3GpJTlpTMfiylg0N6QexHzQS39yGG8rMMznVS4tRe52Hm15YJ12o2WjnFKnUxFNoGoD9YCxNJUe4UYYMgKQcXatlq5"
+ "Xb3njVOAiBLmHKDVWM4lY4pXCwk0PxRkTj2LZQVQNAFhxE7KvHFWST0aloO8QK6beyoknW4/Y/dju3ol2f2r5sejQDfaW3gQ2mqGB3lSS+TdB4F2GomldEuOPnloUr0o"
+ "yGXqOQFHBF7PnqVqicgSUQHFAMM5UyqpgxfDdeXq0+aCUE7c67McHtAJEkmkiy7xxNgjdnliwLyBmmiQqMs8Rp5EJGftpBfni2pSAlhjv5JU8ZcHsd/1DhdG5Cs5V7Pe"
+ "QXaar4VjsO62PiiDvEcuBbupXyRTPCjIGawe7IPMQTj8mjd9T+pwHNq2VPu73zActLecDww04V45LyVDBLRhRJmfDLQmCgkY0oszOhmNMmqFrGdtckG5/i4g+CT4nLCb"
+ "+lP5BKTLIpHcAOZhNhY9u4pcdpX7fYY54Sgoju0WfWGDQQfCGEbeyOk677lyEPrtXLHpiPNrkpmiIswSFKO7tRy5FmAv4AMmc8hBJtwwp3lhGPolgzDAt3QpFMy8tAK1"
+ "z5py5BFXe3XmBTqyzNmNApQ5s+IDhwig1gQ50TLhQY9RVeacOMWrlCF+hf3W/lxYKme+TFSs/s1Kr88gAkkQECf61iijsqsgHDf3qWXGoi36DZMCHIp5mE1Bz0G4YlAi"
+ "1kwydn/FUKLvhjQVrJKamyVzEtKffjFQsOZj5E6QnutjK+TlEwcTNQ3phVoiMmEF029BXkylO4FmD06jx2pBq2J2LE0d34xqX2mMGKrGZjEBG+cqaQ65a6H0zAfuKmMT"
+ "eNtnUKjRauck816kVVJuFBXSDVFWrswtepuWP/N0/abmOjy7gqpQjfSx2M7IDVj7FcOdxLXpE/MfvrzZ5kLHVTBKGLVCVikcyqvfMFyZq48iWBWzqXC6YSUvK9bh2foC"
+ "wabiClqKO3AIad8MtLJ9elNEg4LtZ/KY6I+hGLCMpxeDUCQ3kJucVruklimAtN3UJVAQ2HmTir3GtWROgkpJivMcjBoAJQbLDYr5z9vJ4UEWiZz0d1HQAK02UNZZTsPb"
+ "QsGC5oQxgXepT+wEKWetnI1ceXFTpr7uPdZ26SkqORnE6s7+GgNGmxqyYZiIhptJc5qvpybMf1hW0Ow4nqVguIldIgeSFpC7bTV18RSttN0cgdVNfZ9h0SYS5KUh+EwN"
+ "0D7x+lCrnVP9Hg6KJVMjrYv0P1t6DoLlBVZYYLn+nuXrSV0x22hIM7H6N6vchPV9gjCxDMPD5d8m/5ALBNKYTnDm+Y00Dz20RI8BzXOdM5br16YThrviTFGdqzlkTLmg"
+ "f7HWqLWfcuF4D6Qw5VtuViJzWacMNje51MpZmQvaD28kX3YCJ8f9m4GmhhpM2gIKdmjiPH6MHIAJhMTsOcD+wy8ckLhtsiwKvUjzGXwgkdxbH4SToJL5wNouRZ4bpv8w"
+ "lZtUWo6cBJVJAWLUy10fD0wjeBGsq2/dAq2Ueno/rk5OCzCRsr3lrFiqP6ZMWLCHTORQfF1BrXaw1b6CBh3mdUzZ41nb9lw3rHfFqcqZKCtInFq8cto4AnpmWGuCM8OO"
+ "rD/1zLk9rDl9CGVwBXoXjT5I8dTif3kOIO3ssTBGh/QtjdnxcQmIsxOES4jdpQXUSVUwS7UqRguU9FiPmFQY4lCAUc5+OELzs80XLG10S075opL9SSpHYS3kqW/MfkxO"
+ "NMqdsITHRlpMJGc0PhAPYuMRTPWH5AG7qn2nNaiGf+jIHJmz8Qx3xRr7W38uLJXrB3HC/EDmDPpTLwKmBauAc2qNytnIvTgX+CHUpFiZuwuIgF0ExOvPoS8Riio+Z7BA"
+ "i/KNsy/apXBHyFTOhC19X6tQhlNuORcuJZ6MUA3FMtVeTOTQzVKiUl25yXwGMUSZGQJTYMra9MHhiDNoakjC9V3wiUMMTSonE6n/XqT+wyqB80ry1S8YkKGVkxPdXDRw"
+ "gsSfDvfMGqkec4t5q2goZUW5gP4LzpOs4zh0LaBgMynE88bKeSZRfbMhiUhuZFdUbvUp9UbdMcdZGvYatzNxIuNyFs4OpJ8fnFXSxakWjK4cd1wtmjhpnL6pudVo+iCc"
+ "ikbMXAvwKfXrKVDrNuZNYoTcR/p95oVquyySPMZWGZYZ2Kb2nG2ukXDSQhVNgqnCO2Owct0kt4Jm6QaFipwgWsSgJ859yLxh05hH0DYzfsCDqvL1pGaeWkrGIBqLgJJ5"
+ "MaeCgcr2XE9syqTLC0NCuSnnAztBTGYFO4kr2SM2i01s7ff0mxuwtn4uYsGK3PGkPMkWKVyPeMsnZ7tT8jRhS7/XuL3ShZ5s1cRxBidNCfNwyXOTC/M6HqaGUGKwnZAW"
+ "sf1M7oqjy1q3kdpo/RyuPUllPtrqDUacGD3uVQon5j/fhWOg9WAgQppMeDi7KTfmZ4oV1cuZZqyAP/oV46nFb0gXTG/W5p3cpt5keVFzz17FTCfFMDINaSaSBVPPYVLj"
+ "DZrTWGsTK5piBkVZ95xAIEMuignDR9+wfMewBRzS/IPUByHPYdOQ5rSTgnxwxeAnN5f5bmMUQ9VnUXYMJTr0iLGaeio5H9ThKNPCWW+Hc+YGrFXvmE9oiei5aqL6IO+h"
+ "OIQ+uQyTSlkH9IyY415KSSem5m9LH+vbPrQsPkGaaKYFSMQZP/0oggXDbGztV86JVBO9OaXUkRFTwdTwr1QwmUB9kNctmDO7QaG+nhRONAjYAjzE2nJQMDOnwZLUKXMA"
+ "aWOPYEpvw3SeAzKEyJ36As59DJP6zNSdppdaycR8tj1XxSSCNYAc56FRRlFjivaskVoglhFz7H4FDYVeyoEmxoAV6zeaCRT2ZE8pHMgraDyxHuBE+gzuJzixWLnB8XDe"
+ "rphAiGwUDcGZktfKNaT1ASLKJLUXNqfudxn3ZD3TX6VAVC3TTuKwBRDFc/xyvbi+C3gyGtlN0lKgOiZl0BTh8MpBoVrhFBV/trdilmOSXp4plF/1jLjXgkgog0L59gHF"
+ "Ms86pdDYpvsNiv5sxffSgvGCl/5g6YM6iG+OfmbdAq55ZQRFLNl/eZDLhOlt2s86qdjIYtGXx3rtEyn/kDluv+GuKke4f8hwuGdW5BzkXKju1X4uHHFqJXN9XVaOMyhB"
+ "MZFD1yQlbOkPdBKVypmIHRTKXiPopfUsoPD3M5eHp8yw88INBntNzenCG2Do7B+NKQgb6ZsLIlJ/VvKCDuIDFKtxcq49L4C4U4deIDfXB6EOEDed4NKvpItzLhcIhZIT"
+ "g34PMRJTNEIq3QwqlzpJfsI+WvScFKDbSfPQxQeOENnrdw5NmfTF9UH6yufga8aLwZxKt1wEA2lM/dEwkNeA0FN7zfSIsWJw4ZICAwzd3AQtciYFb4IiNtIa6CRqGFPy"
+ "xIFPNkmcFmA5n4oBa/oHC9yAFWwR9xMkei4tsAejjv0XyegwfWBKPd2mTEugkpD/r7czsHIchIFoK7QiG8UoBpOFeHO+/gs5ybkafgW8zWIx0oxmdntBJ6VbK8d1g35B"
+ "OMnh+xqKAZ5vBis0KJOCYaub88RufppvMOSng7q8dPSF27FofRbuMNtBMeXRuWW3oI4xX+dIKqX2gIPuN4z0fNT40qBGN0eOBbXQ8e6LYT4rRebERDybnVNfL6aIZA2C"
+ "BCIR1qoUX/HFIJRjR6oX6EK/qx6co7kOju7fdHB9zNrr2Rbq6hflRhOh68Uy7r1+7NhHXU5QIV1IyWH+CKdf81rFhfR6IeY2gTs3jo5NcQExiL16peKSvKFu2HWULBVb"
+ "RVg4xvNeKuXMtd7ac6c4+LbudlBxfE3+GOZcsMl4mEKUhcMdtQ3qqqs4mFNICCgLhkDiZ5ycVZNffM4+XVvj9PqZ9OyY0k5Q6fLD7cCr9+8LxVRnRzwHxcVk0s3fWzNu"
+ "xyI8pLHDvAgLlnv5BKUFqfSLZOAvLppd/+puVAlZC2ch/X9GDC0YaR2cxdBa1Ysj1OnKAB0ZDv21SpFamjnCImV1KPeBTot4eyys9NkHmFTqfxYm7r13BqGzItltYGU/"
+ "8s/IsDVw4f7et4e05tKWsC4gjkrpbRzJlAq53f8AMw0D8PycFOJR7+CxCU8oXbDfsaIhUKCqJtkOWgQ3q9x3VslN8adw9o1hqbhQZ6XbE4pqZD5k1vEo2FQ/zCLJHYt+"
+ "blWgiKvV0Q6m0egT1Ej7XzU/HUpR/LWxWYUGjqFpx96zb38mDAjJ5Fr6ooe31StTRBqGvVO0ueXAxgU2/YWh/m3bzeZCh92mP5gD7OSiR1LEWGBQ7q9QgtQUGATUyrWe"
+ "FbshjxHpRZNxRfOOmmMGN7kqNnTZBLRPfzgmwGQ14QC7UT4rzdYi1PR786uPDds/pGtvk2pYcYxCLJQ7zvvkhgWO9fOwvDHX8Tu2fRXkmfH+7K2Uua0cb8P2Zf3qr52S"
+ "J9UOCob2DppIP/t8KbUTtnZwwvMJWEAd9gsmJaXizSCG9kFZXijKDqxjSuPMvVLBTIM76nY+wc66pVDYVO7rBVgZdjBCoDAecr9hwYNpPpd+cXZGC+kPUuTiRDxFBmez"
+ "IuPA8vG8NuqC/Yz1zh6hDD6HGIblwjsJo0ciNKNRhb/0hbMzWhXMGfRvGvOwSx/jHDuCYOXMSOppm1GH7XpwIT9PWQ2DqRtZrmJGvNeLqfua24kl4Uwjvdpj6MJlQxrX"
+ "WPgTI9gTs5wk8WO1GjVy/A0vfyq+aBYyxfk6uIUOv40cKV4Ee83ugTRHsOY+JjYwyAJ6uiyjZ6WQo9R+TOg5+wesxl2QvJYIAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<109 and y<5164):
return g[y*109 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<109 and y<5164):
g[y*109 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(2,0,5163)
gw(0,0,5163)
return 1
def _1():
gw(1,0,0)
sa(0)
sa(gr(0,gr(0,0))-64)
sa((1)if((gr(0,gr(0,0))-64)>0)else(0))
return 2
def _2():
return (22)if(sp()!=0)else(3)
def _3():
gw(1,0,gr(1,0)*28)
sp();
return 4
def _4():
sa(sp()+1)
return (21)if(sr()!=12)else(5)
def _5():
global t0
gw(0,gr(0,0),gr(1,0))
t0=gr(0,0)-1
gw(0,0,gr(0,0)-1)
sp();
return (1)if((t0)!=0)else(6)
def _6():
gw(3,0,gr(2,0))
return 7
def _7():
gw(4,0,0)
return 8
def _8():
return (17)if(gr(0,gr(4,0)+1)>gr(0,gr(4,0)+2))else(9)
def _9():
global t0
t0=gr(4,0)+1
gw(4,0,gr(4,0)+1)
t0=(1)if(t0<(gr(3,0)-1))else(0)
return (8)if((t0)!=0)else(10)
def _10():
global t0
t0=gr(3,0)-2
gw(3,0,gr(3,0)-1)
return (7)if((t0)!=0)else(11)
def _11():
gw(9,0,0)
gw(5,0,gr(2,0))
return 12
def _12():
sa(1)
sa(gr(1,gr(5,0)))
sa(gr(1,gr(5,0))-32)
return 13
def _13():
return (16)if(sp()!=0)else(14)
def _14():
sp();
sp();
sp();
sa(gr(5,0)-1)
gw(5,0,gr(5,0)-1)
return (12)if(sp()!=0)else(15)
def _15():
sys.stdout.write(str(gr(9,0))+" ")
sys.stdout.flush()
return 23
def _16():
sa(sp()-64)
sa(sp()*gr(5,0))
sa(sp()+gr(9,0))
gw(9,0,sp())
sa(sp()+1)
sa(sr());
sa(gr(5,0))
v0=sp()
sa(gr(sp(),v0))
sa(sr()-32)
return 13
def _17():
global t0
t0=gr(4,0)
gw(6,0,gr(4,0)+1)
gw(7,0,t0)
gw(8,0,12)
sa(gr(12,gr(6,0)+1))
gw(gr(8,0),gr(6,0)+1,gr(12,gr(7,0)+1))
return 18
def _18():
global t0
gw(gr(8,0),gr(7,0)+1,sp())
t0=gr(8,0)
sa(gr(8,0)-1)
gw(8,0,gr(8,0)-1)
return (20)if((t0)!=0)else(19)
def _19():
sp();
return 9
def _20():
global t0
sa(sr());
sa(gr(6,0)+1)
v0=sp()
sa(gr(sp(),v0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(gr(7,0)+1)
v0=sp()
t0=gr(sp(),v0)
gw(gr(8,0),gr(6,0)+1,t0)
return 18
def _21():
sa(sr());
sa(gr(0,0))
v0=sp()
sa(gr(sp(),v0))
sa(sp()-64)
sa((1)if(sr()>0)else(0))
return 2
def _22():
sa(sp()+(gr(1,0)*28))
gw(1,0,sp())
return 4
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22]
c=0
while c<23:
c=m[c]()
|
|
from __future__ import print_function, unicode_literals
import six
import os, sys
from io import BytesIO
from twisted.python import usage
from twisted.internet import defer
# does "flappserver start" need us to refrain from importing the reactor here?
import foolscap
from foolscap.api import Tub, Referenceable, fireEventually
class BaseOptions(usage.Options):
def opt_h(self):
return self.opt_help()
class UploadFileOptions(BaseOptions):
def getSynopsis(self):
return "Usage: flappclient [--furl=|--furlfile] upload-file SOURCEFILES.."
def parseArgs(self, *sourcefiles):
self.sourcefiles = sourcefiles
longdesc = """This client sends one or more files to the upload-file
service waiting at the given FURL. All files will be placed in the
pre-configured target directory, using the basename of each SOURCEFILE
argument."""
class Uploader(Referenceable):
def run(self, rref, sourcefile, name):
self.f = open(os.path.expanduser(sourcefile), "rb")
return rref.callRemote("putfile", six.ensure_binary(name), self)
def remote_read(self, size):
return self.f.read(size)
class UploadFile(Referenceable):
def run(self, rref, options):
d = defer.succeed(None)
for sf in options.sourcefiles:
name = os.path.basename(sf)
d.addCallback(self._upload, rref, sf, name)
d.addCallback(self._done, options, name)
d.addCallback(lambda _ign: 0)
return d
def _upload(self, _ignored, rref, sf, name):
return Uploader().run(rref, sf, name)
def _done(self, _ignored, options, name):
options.stdout.write(six.ensure_binary("%s: uploaded\n" % name))
class RunCommandOptions(BaseOptions):
def getSynopsis(self):
return "Usage: flappclient [--furl=|--furlfile] run-command"
longdesc = """This client triggers a prearranged command to be executed
by the run-command service waiting at the given FURL. The executable, its
working directory, and all arguments are configured by the server. Unless
the server has overridden the defaults, this client will emit the
command's stdout and stderr as it runs, and will exit with the same
result code as the remote command. If the server desires it, this client
will read data from stdin and send everything (plus a close-stdin event)
to the server.
This client has no control over the command being run or its
arguments."""
from twisted.internet.stdio import StandardIO as TwitchyStandardIO
class StandardIO(TwitchyStandardIO):
def childConnectionLost(self, fd, reason):
# the usual StandardIO class doesn't seem to handle half-closed stdio
# well, specifically when our stdout is closed, then some data is
# written to our stdin. The class responds to stdout's closure by
# shutting down everything. I think this is related to
# ProcessWriter.doRead returning CONNECTION_LOST instead of
# CONNECTION_DONE (which ProcessWriter.connectionLost sends along to
# StandardIO.childConnectionLost). There is code in
# StandardIO.childConnectionLost to treat CONNECTION_DONE as a
# half-close, but not CONNECTION_LOST.
#
# so this hack is to make it look more like a half-close
#print >>sys.stderr, "my StandardIO.childConnectionLost", fd, reason.value
from twisted.internet import error, main
from twisted.python import failure
if reason.check(error.ConnectionLost) and fd == "write":
#print >>sys.stderr, " fixing"
reason = failure.Failure(main.CONNECTION_DONE)
return TwitchyStandardIO.childConnectionLost(self, fd, reason)
from twisted.internet.protocol import Protocol
#from zope.interface import implements
#from twisted.internet.interfaces import IHalfCloseableProtocol
def wrap_in_binary_mode(f):
if hasattr(f, "buffer"):
# py3 "text file", as returned by open(), or sys.std(in|out|err)
return f.buffer # _io.BufferedWriter
assert isinstance(f, BytesIO)
return f
class RunCommand(Referenceable, Protocol):
#implements(IHalfCloseableProtocol)
def run(self, rref, options):
self.done = False
self.d = defer.Deferred()
rref.notifyOnDisconnect(self._done, 3)
self.stdin_writer = None
self.stdio = options.stdio
self.stdout = options.stdout
self.stderr = options.stderr
d = rref.callRemote("execute", self)
d.addCallback(self._started)
d.addErrback(self._err)
return self.d
def dataReceived(self, data):
if not isinstance(data, bytes):
raise TypeError("stdin can accept only strings of bytes, not '%s'"
% (type(data),))
# this is from stdin. It shouldn't be called until after _started
# sets up stdio and self.stdin_writer
self.stdin_writer.callRemoteOnly("feed_stdin", data)
def connectionLost(self, reason):
# likewise, this won't be called unless _started wanted stdin
self.stdin_writer.callRemoteOnly("close_stdin")
def _started(self, stdin_writer):
if stdin_writer:
self.stdin_writer = stdin_writer # rref
self.stdio(self) # start accepting stdin
# otherwise they don't want our stdin, so leave stdin_writer=None
def remote_stdout(self, data):
#print(b"remote_stdout", type(data))
assert isinstance(data, bytes)
#print(data)
self.stdout.write(data)
self.stdout.flush()
#print(b"flushed stdout")
def remote_stderr(self, data):
assert isinstance(data, bytes)
self.stderr.write(data)
self.stderr.flush()
def remote_done(self, signal, exitcode):
if signal:
self._done(127)
else:
self._done(exitcode)
def _err(self, f):
self._done(f)
def _done(self, res):
if not self.done:
self.done = True
self.d.callback(res)
class ClientOptions(usage.Options):
synopsis = "Usage: flappclient [--furl=|--furlfile=] COMMAND"
optParameters = [
("furl", None, None, "FURL of the service to contact"),
("furlfile", "f", None, "file containing the FURL of the service"),
]
longdesc = """This client invokes a remote service that is running as
part of a 'flappserver'. Each service lives at a specific secret FURL,
which starts with 'pb://'. This FURL can be passed on the command line
with --furl=FURL, or it can be stored in a file (along with comment lines
that start with '#') and passed with --furlfile=FILE.
Each service has a specific COMMAND type, and the client invocation must
match the service. For more details on a specific command, run
'flappclient COMMAND --help', e.g. 'flappclient upload-file --help'.
"""
subCommands = [
("upload-file", None, UploadFileOptions, "upload a file"),
("run-command", None, RunCommandOptions, "cause a command to be run"),
]
def read_furlfile(self):
ff = os.path.expanduser(self["furlfile"])
for line in open(ff).readlines():
line = line.strip()
if line.startswith("pb://"):
return line
return None
def postOptions(self):
self.furl = self["furl"]
if self["furlfile"]:
self.furl = self.read_furlfile()
if not self.furl:
raise usage.UsageError("must provide --furl or --furlfile")
if not hasattr(self, 'subOptions'):
raise usage.UsageError("must specify a command")
def opt_help(self):
self.stdout.write(six.ensure_binary("%s\n" % (self.synopsis,)))
sys.exit(0)
def opt_version(self):
from twisted import copyright
self.stdout.write(six.ensure_binary("Foolscap version: %s\n" % foolscap.__version__))
self.stdout.write(six.ensure_binary("Twisted version: %s\n" % copyright.version))
sys.exit(0)
dispatch_table = {
"upload-file": UploadFile,
"run-command": RunCommand,
}
def parse_options(command_name, argv, stdio, stdout, stderr):
try:
config = ClientOptions()
config.stdout = stdout
config.stderr = stderr
config.parseOptions(argv)
config.subOptions.stdio = stdio # for streaming input
config.subOptions.stdout = stdout
config.subOptions.stderr = stderr
except usage.error as e:
stderr.write(six.ensure_binary("%s: %s\n" % (command_name, e)))
stderr.write(b"\n")
c = getattr(config, 'subOptions', config)
stderr.write(six.ensure_binary("%s\n" % (c,)))
sys.exit(1)
return config
def run_command(config):
c = dispatch_table[config.subCommand]()
tub = Tub()
try:
from twisted.internet import reactor
from twisted.internet.endpoints import clientFromString
from foolscap.connections import tor
CONTROL = os.environ.get("FOOLSCAP_TOR_CONTROL_PORT", "")
SOCKS = os.environ.get("FOOLSCAP_TOR_SOCKS_PORT", "")
if CONTROL:
h = tor.control_endpoint(clientFromString(reactor, CONTROL))
tub.addConnectionHintHandler("tor", h)
elif SOCKS:
h = tor.socks_endpoint(clientFromString(reactor, SOCKS))
tub.addConnectionHintHandler("tor", h)
#else:
# h = tor.default_socks()
# tub.addConnectionHintHandler("tor", h)
except ImportError:
pass
d = defer.succeed(None)
d.addCallback(lambda _ign: tub.startService())
d.addCallback(lambda _ign: tub.getReference(config.furl))
d.addCallback(c.run, config.subOptions) # might provide tub here
d.addBoth(lambda res: tub.stopService().addCallback(lambda _ign: res))
return d
def run_flappclient(argv=None, run_by_human=True, stdio=StandardIO):
if run_by_human:
stdout = wrap_in_binary_mode(sys.stdout)
stderr = wrap_in_binary_mode(sys.stderr)
else:
stdout = BytesIO()
stderr = BytesIO()
if argv:
command_name,argv = argv[0],argv[1:]
else:
command_name = sys.argv[0]
d = fireEventually()
d.addCallback(lambda _ign: parse_options(command_name, argv,
stdio, stdout, stderr))
d.addCallback(run_command)
if run_by_human:
# we need to spin up our own reactor
from twisted.internet import reactor
stash_rc = []
def good(rc):
stash_rc.append(rc)
reactor.stop()
def oops(f):
if f.check(SystemExit):
stash_rc.append(f.value.args[0])
else:
stderr.write(b"flappclient command failed:\n")
stderr.write(six.ensure_binary("%s\n" % (f,)))
stash_rc.append(-1)
reactor.stop()
d.addCallbacks(good, oops)
reactor.run()
sys.exit(stash_rc[0])
else:
def _convert_system_exit(f):
f.trap(SystemExit)
return f.value.args[0]
d.addErrback(_convert_system_exit)
def done(rc):
return (rc, stdout.getvalue(), stderr.getvalue())
d.addCallback(done)
return d
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains helper functions for MLEngine operators."""
import base64
import json
import os
import re
from typing import Callable, Dict, Iterable, List, Optional, Tuple, TypeVar
from urllib.parse import urlsplit
import dill
from airflow import DAG
from airflow.exceptions import AirflowException
from airflow.operators.python import PythonOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.google.cloud.operators.dataflow import DataflowCreatePythonJobOperator
from airflow.providers.google.cloud.operators.mlengine import MLEngineStartBatchPredictionJobOperator
T = TypeVar("T", bound=Callable)
def create_evaluate_ops(
task_prefix: str,
data_format: str,
input_paths: List[str],
prediction_path: str,
metric_fn_and_keys: Tuple[T, Iterable[str]],
validate_fn: T,
batch_prediction_job_id: Optional[str] = None,
region: Optional[str] = None,
project_id: Optional[str] = None,
dataflow_options: Optional[Dict] = None,
model_uri: Optional[str] = None,
model_name: Optional[str] = None,
version_name: Optional[str] = None,
dag: Optional[DAG] = None,
py_interpreter="python3",
):
"""
Creates Operators needed for model evaluation and returns.
It gets prediction over inputs via Cloud ML Engine BatchPrediction API by
calling MLEngineBatchPredictionOperator, then summarize and validate
the result via Cloud Dataflow using DataFlowPythonOperator.
For details and pricing about Batch prediction, please refer to the website
https://cloud.google.com/ml-engine/docs/how-tos/batch-predict
and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/
It returns three chained operators for prediction, summary, and validation,
named as ``<prefix>-prediction``, ``<prefix>-summary``, and ``<prefix>-validation``,
respectively.
(``<prefix>`` should contain only alphanumeric characters or hyphen.)
The upstream and downstream can be set accordingly like:
.. code-block:: python
pred, _, val = create_evaluate_ops(...)
pred.set_upstream(upstream_op)
...
downstream_op.set_upstream(val)
Callers will provide two python callables, metric_fn and validate_fn, in
order to customize the evaluation behavior as they wish.
- metric_fn receives a dictionary per instance derived from json in the
batch prediction result. The keys might vary depending on the model.
It should return a tuple of metrics.
- validation_fn receives a dictionary of the averaged metrics that metric_fn
generated over all instances.
The key/value of the dictionary matches to what's given by
metric_fn_and_keys arg.
The dictionary contains an additional metric, 'count' to represent the
total number of instances received for evaluation.
The function would raise an exception to mark the task as failed, in a
case the validation result is not okay to proceed (i.e. to set the trained
version as default).
Typical examples are like this:
.. code-block:: python
def get_metric_fn_and_keys():
import math # imports should be outside of the metric_fn below.
def error_and_squared_error(inst):
label = float(inst["input_label"])
classes = float(inst["classes"]) # 0 or 1
err = abs(classes - label)
squared_err = math.pow(classes - label, 2)
return (err, squared_err) # returns a tuple.
return error_and_squared_error, ["err", "mse"] # key order must match.
def validate_err_and_count(summary):
if summary["err"] > 0.2:
raise ValueError("Too high err>0.2; summary=%s" % summary)
if summary["mse"] > 0.05:
raise ValueError("Too high mse>0.05; summary=%s" % summary)
if summary["count"] < 1000:
raise ValueError("Too few instances<1000; summary=%s" % summary)
return summary
For the details on the other BatchPrediction-related arguments (project_id,
job_id, region, data_format, input_paths, prediction_path, model_uri),
please refer to MLEngineBatchPredictionOperator too.
:param task_prefix: a prefix for the tasks. Only alphanumeric characters and
hyphen are allowed (no underscores), since this will be used as dataflow
job name, which doesn't allow other characters.
:type task_prefix: str
:param data_format: either of 'TEXT', 'TF_RECORD', 'TF_RECORD_GZIP'
:type data_format: str
:param input_paths: a list of input paths to be sent to BatchPrediction.
:type input_paths: list[str]
:param prediction_path: GCS path to put the prediction results in.
:type prediction_path: str
:param metric_fn_and_keys: a tuple of metric_fn and metric_keys:
- metric_fn is a function that accepts a dictionary (for an instance),
and returns a tuple of metric(s) that it calculates.
- metric_keys is a list of strings to denote the key of each metric.
:type metric_fn_and_keys: tuple of a function and a list[str]
:param validate_fn: a function to validate whether the averaged metric(s) is
good enough to push the model.
:type validate_fn: function
:param batch_prediction_job_id: the id to use for the Cloud ML Batch
prediction job. Passed directly to the MLEngineBatchPredictionOperator as
the job_id argument.
:type batch_prediction_job_id: str
:param project_id: the Google Cloud project id in which to execute
Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['project_id']` will be used.
:type project_id: str
:param region: the Google Cloud region in which to execute Cloud ML
Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['region']` will be used.
:type region: str
:param dataflow_options: options to run Dataflow jobs. If None, then the
`dag`'s `default_args['dataflow_default_options']` will be used.
:type dataflow_options: dictionary
:param model_uri: GCS path of the model exported by Tensorflow using
``tensorflow.estimator.export_savedmodel()``. It cannot be used with
model_name or version_name below. See MLEngineBatchPredictionOperator for
more detail.
:type model_uri: str
:param model_name: Used to indicate a model to use for prediction. Can be
used in combination with version_name, but cannot be used together with
model_uri. See MLEngineBatchPredictionOperator for more detail. If None,
then the `dag`'s `default_args['model_name']` will be used.
:type model_name: str
:param version_name: Used to indicate a model version to use for prediction,
in combination with model_name. Cannot be used together with model_uri.
See MLEngineBatchPredictionOperator for more detail. If None, then the
`dag`'s `default_args['version_name']` will be used.
:type version_name: str
:param dag: The `DAG` to use for all Operators.
:type dag: airflow.models.DAG
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:type py_interpreter: str
:returns: a tuple of three operators, (prediction, summary, validation)
:rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,
PythonOperator)
"""
batch_prediction_job_id = batch_prediction_job_id or ""
dataflow_options = dataflow_options or {}
region = region or ""
# Verify that task_prefix doesn't have any special characters except hyphen
# '-', which is the only allowed non-alphanumeric character by Dataflow.
if not re.match(r"^[a-zA-Z][-A-Za-z0-9]*$", task_prefix):
raise AirflowException(
"Malformed task_id for DataFlowPythonOperator (only alphanumeric "
"and hyphens are allowed but got: " + task_prefix
)
metric_fn, metric_keys = metric_fn_and_keys
if not callable(metric_fn):
raise AirflowException("`metric_fn` param must be callable.")
if not callable(validate_fn):
raise AirflowException("`validate_fn` param must be callable.")
if dag is not None and dag.default_args is not None:
default_args = dag.default_args
project_id = project_id or default_args.get('project_id')
region = region or default_args['region']
model_name = model_name or default_args.get('model_name')
version_name = version_name or default_args.get('version_name')
dataflow_options = dataflow_options or default_args.get('dataflow_default_options')
evaluate_prediction = MLEngineStartBatchPredictionJobOperator(
task_id=(task_prefix + "-prediction"),
project_id=project_id,
job_id=batch_prediction_job_id,
region=region,
data_format=data_format,
input_paths=input_paths,
output_path=prediction_path,
uri=model_uri,
model_name=model_name,
version_name=version_name,
dag=dag,
)
metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True)).decode()
evaluate_summary = DataflowCreatePythonJobOperator(
task_id=(task_prefix + "-summary"),
py_file=os.path.join(os.path.dirname(__file__), 'mlengine_prediction_summary.py'),
dataflow_default_options=dataflow_options,
options={
"prediction_path": prediction_path,
"metric_fn_encoded": metric_fn_encoded,
"metric_keys": ','.join(metric_keys),
},
py_interpreter=py_interpreter,
py_requirements=['apache-beam[gcp]>=2.14.0'],
dag=dag,
)
evaluate_summary.set_upstream(evaluate_prediction)
def apply_validate_fn(*args, templates_dict, **kwargs):
prediction_path = templates_dict["prediction_path"]
scheme, bucket, obj, _, _ = urlsplit(prediction_path)
if scheme != "gs" or not bucket or not obj:
raise ValueError(f"Wrong format prediction_path: {prediction_path}")
summary = os.path.join(obj.strip("/"), "prediction.summary.json")
gcs_hook = GCSHook()
summary = json.loads(gcs_hook.download(bucket, summary))
return validate_fn(summary)
evaluate_validation = PythonOperator(
task_id=(task_prefix + "-validation"),
python_callable=apply_validate_fn,
templates_dict={"prediction_path": prediction_path},
dag=dag,
)
evaluate_validation.set_upstream(evaluate_summary)
return evaluate_prediction, evaluate_summary, evaluate_validation
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
class partialObject(object):
def __init__(self, path='fake-path'):
self.path = path
self.fault = fake.DataObject()
class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
stubs.set_stubs(self)
vm_util.vm_refs_cache_reset()
self._instance = fake_instance.fake_instance_obj(
None,
**{'id': 7, 'name': 'fake!',
'display_name': 'fake-display-name',
'uuid': uuidutils.generate_uuid(),
'vcpus': 2, 'memory_mb': 2048})
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
"HostSystem"),
fake.ManagedObjectReference("host2",
"HostSystem")]
hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
prop_dict = {'host': hosts, 'resourcePool': respool}
hardware = fake.DataObject()
hardware.numCpuCores = 8
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.memorySize = 4 * units.Gi
runtime_host_1 = fake.DataObject()
runtime_host_1.connectionState = "connected"
runtime_host_1.inMaintenanceMode = False
quickstats_1 = fake.DataObject()
quickstats_1.overallMemoryUsage = 512
quickstats_2 = fake.DataObject()
quickstats_2.overallMemoryUsage = 512
runtime_host_2 = fake.DataObject()
runtime_host_2.connectionState = connection_state
runtime_host_2.inMaintenanceMode = maintenance_mode
prop_list_host_1 = [fake.Prop(name="summary.hardware", val=hardware),
fake.Prop(name="summary.runtime",
val=runtime_host_1),
fake.Prop(name="summary.quickStats",
val=quickstats_1)]
prop_list_host_2 = [fake.Prop(name="summary.hardware", val=hardware),
fake.Prop(name="summary.runtime",
val=runtime_host_2),
fake.Prop(name="summary.quickStats",
val=quickstats_2)]
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_1))
fake_objects.add_object(fake.ObjectContent("prop_list_host1",
prop_list_host_2))
def fake_call_method(*args):
if "get_object_properties_dict" in args:
return prop_dict
elif "get_properties_for_a_collection_of_objects" in args:
return fake_objects
else:
raise Exception('unexpected method call')
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', fake_call_method):
result = vm_util.get_stats_from_cluster(session, "cluster1")
if connection_state == "connected" and not maintenance_mode:
num_hosts = 2
else:
num_hosts = 1
expected_stats = {'cpu': {'vcpus': num_hosts * 16,
'max_vcpus_per_host': 16},
'mem': {'total': num_hosts * 4096,
'free': num_hosts * 4096 -
num_hosts * 512,
'max_mem_mb_per_host': 4096}}
self.assertEqual(expected_stats, result)
def test_get_stats_from_cluster_hosts_connected_and_active(self):
self._test_get_stats_from_cluster()
def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
self._test_get_stats_from_cluster(connection_state="disconnected")
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
def test_get_host_ref_no_hosts_in_cluster(self):
self.assertRaises(exception.NoValidHost,
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
def test_get_resize_spec(self):
vcpus = 2
memory_mb = 2048
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 0
cpuAllocation.limit = -1
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_resize_spec_with_limits(self):
vcpus = 2
memory_mb = 2048
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_resize_spec(fake_factory,
vcpus, memory_mb, extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.memoryMB = memory_mb
expected.numCPUs = vcpus
cpuAllocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpuAllocation.reservation = 6
cpuAllocation.limit = 7
cpuAllocation.shares = fake_factory.create('ns0:SharesInfo')
cpuAllocation.shares.level = 'normal'
cpuAllocation.shares.shares = 0
expected.cpuAllocation = cpuAllocation
self.assertEqual(expected, result)
def test_get_cdrom_attach_config_spec(self):
fake_factory = fake.FakeFactory()
datastore = fake.Datastore()
result = vm_util.get_cdrom_attach_config_spec(fake_factory,
datastore,
"/tmp/foo.iso",
200, 0)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device_change.device = fake_factory.create('ns0:VirtualCdrom')
device_change.device.controllerKey = 200
device_change.device.unitNumber = 0
device_change.device.key = -1
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = False
connectable.startConnected = True
connectable.connected = True
device_change.device.connectable = connectable
backing = fake_factory.create('ns0:VirtualCdromIsoBackingInfo')
backing.fileName = '/tmp/foo.iso'
backing.datastore = datastore
device_change.device.backing = backing
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_lsilogic_controller_spec(self):
# Test controller spec returned for lsiLogic sas adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS)
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
def test_paravirtual_controller_spec(self):
# Test controller spec returned for paraVirtual adapter type
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_PARAVIRTUAL)
self.assertEqual("ns0:ParaVirtualSCSIController",
config_spec.device.obj_name)
def test_create_controller_spec_with_specific_bus_number(self):
# Test controller spec with specific bus number rather default 0
config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
adapter_type=constants.ADAPTER_TYPE_LSILOGICSAS,
bus_number=1)
self.assertEqual(1, config_spec.device.busNumber)
def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk.capacityInBytes = 1024
if parent:
disk_backing.parent = parent
disk.backing = disk_backing
# Ephemeral disk
e_disk = fake.VirtualDisk()
e_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = '[test_datastore] uuid/ephemeral_0.vmdk'
e_disk.capacityInBytes = 512
e_disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, e_disk, controller]
return devices
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None)
self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS,
vmdk.adapter_type)
self.assertEqual('[test_datastore] uuid/ephemeral_0.vmdk',
vmdk.path)
self.assertEqual(512, vmdk.capacity_in_bytes)
self.assertEqual(devices[1], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_match(self):
n_filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
session = fake.FakeSession()
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertEqual(constants.ADAPTER_TYPE_LSILOGICSAS,
vmdk.adapter_type)
self.assertEqual(n_filename, vmdk.path)
self.assertEqual(1024, vmdk.capacity_in_bytes)
self.assertEqual(devices[0], vmdk.device)
def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
n_filename = '[test_datastore] diuu/diuu.vmdk'
session = fake.FakeSession()
devices = self._vmdk_path_and_adapter_type_devices(n_filename)
with mock.patch.object(session, '_call_method', return_value=devices):
vmdk = vm_util.get_vmdk_info(session, None, uuid='uuid')
self.assertIsNone(vmdk.adapter_type)
self.assertIsNone(vmdk.path)
self.assertEqual(0, vmdk.capacity_in_bytes)
self.assertIsNone(vmdk.device)
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
# Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
# and ParaVirtual
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.ADAPTER_TYPE_LSILOGICSAS)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type(
constants.ADAPTER_TYPE_PARAVIRTUAL)
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE, vmdk_adapter_type)
vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
self.assertEqual("dummyAdapter", vmdk_adapter_type)
def test_get_scsi_adapter_type(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# return the scsi type, not ide
hardware_device = vm.get("config.hardware.device")
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
vm_util.get_scsi_adapter_type(hardware_device))
def test_get_scsi_adapter_type_with_error(self):
vm = fake.VirtualMachine()
devices = vm.get("config.hardware.device").VirtualDevice
scsi_controller = fake.VirtualLsiLogicController()
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
fake._update_object("VirtualMachine", vm)
# the controller is not suitable since the device under this controller
# has exceeded SCSI_MAX_CONNECT_NUMBER
for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER):
scsi_controller.device.append('device' + str(i))
hardware_device = vm.get("config.hardware.device")
self.assertRaises(exception.StorageError,
vm_util.get_scsi_adapter_type,
hardware_device)
def test_find_allocated_slots(self):
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
disk3 = fake.VirtualDisk(201, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
taken = vm_util._find_allocated_slots(devices)
self.assertEqual([0, 1], sorted(taken[200]))
self.assertEqual([1], taken[201])
self.assertEqual([7], taken[1000])
def test_get_bus_number_for_scsi_controller(self):
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7,
busNumber=0),
fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7,
busNumber=2)]
bus_number = vm_util._get_bus_number_for_scsi_controller(devices)
self.assertEqual(1, bus_number)
def test_get_bus_number_for_scsi_controller_buses_used_up(self):
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7,
busNumber=0),
fake.VirtualLsiLogicController(1001, scsiCtlrUnitNumber=7,
busNumber=1),
fake.VirtualLsiLogicController(1002, scsiCtlrUnitNumber=7,
busNumber=2),
fake.VirtualLsiLogicController(1003, scsiCtlrUnitNumber=7,
busNumber=3)]
self.assertRaises(vexc.VMwareDriverException,
vm_util._get_bus_number_for_scsi_controller,
devices)
def test_allocate_controller_key_and_unit_number_ide_default(self):
# Test that default IDE controllers are used when there is a free slot
# on them
disk1 = fake.VirtualDisk(200, 0)
disk2 = fake.VirtualDisk(200, 1)
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [disk1, disk2, ide0, ide1]
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
None,
devices,
'ide')
self.assertEqual(201, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_ide(self):
# Test that a new controller is created when there is no free slot on
# the default IDE controllers
ide0 = fake.VirtualIDEController(200)
ide1 = fake.VirtualIDEController(201)
devices = [ide0, ide1]
for controller_key in [200, 201]:
for unit_number in [0, 1]:
disk = fake.VirtualDisk(controller_key, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
'ide')
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertIsNotNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
for unit_number in range(7):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(1000, controller_key)
self.assertEqual(8, unit_number)
self.assertIsNone(controller_spec)
def test_allocate_controller_key_and_unit_number_scsi_new_controller(self):
# Test that we allocate on existing SCSI controller if there is a free
# slot on it
devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=15)]
for unit_number in range(15):
disk = fake.VirtualDisk(1000, unit_number)
devices.append(disk)
factory = fake.FakeFactory()
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
factory,
devices,
constants.DEFAULT_ADAPTER_TYPE)
self.assertEqual(-101, controller_key)
self.assertEqual(0, unit_number)
self.assertEqual(1, controller_spec.device.busNumber)
def _test_get_vnc_config_spec(self, keymap):
fake_factory = fake.FakeFactory()
result = vm_util.get_vnc_config_spec(fake_factory,
7)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
remote_display_vnc_enabled = fake_factory.create('ns0:OptionValue')
remote_display_vnc_enabled.value = 'true'
remote_display_vnc_enabled.key = 'RemoteDisplay.vnc.enabled'
expected.extraConfig.append(remote_display_vnc_enabled)
remote_display_vnc_port = fake_factory.create('ns0:OptionValue')
remote_display_vnc_port.value = 7
remote_display_vnc_port.key = 'RemoteDisplay.vnc.port'
expected.extraConfig.append(remote_display_vnc_port)
remote_display_vnc_keymap = fake_factory.create('ns0:OptionValue')
remote_display_vnc_keymap.value = keymap
remote_display_vnc_keymap.key = 'RemoteDisplay.vnc.keyMap'
expected.extraConfig.append(remote_display_vnc_keymap)
self.assertEqual(expected, result)
def test_get_vnc_config_spec(self):
# TODO(stephenfin): Fold this back in and stop overridding the keymap
# option once we remove the '[vnc] keymap' option
self.flags(vnc_keymap='en-ie', group='vmware')
self._test_get_vnc_config_spec('en-ie')
def test_get_vnc_config_spec__legacy_keymap(self):
self.flags(keymap='en-uk', group='vnc')
self.flags(vnc_keymap='en-ie', group='vmware')
self._test_get_vnc_config_spec('en-uk')
def _create_fake_vms(self):
fake_vms = fake.FakeRetrieveResult()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
for i in range(10):
vm = fake.ManagedObject()
opt_val = OptionValue(key='', value=5900 + i)
vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
fake_vms.add_object(vm)
return fake_vms
def test_get_vnc_port(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10000, group='vmware')
actual = vm_util.get_vnc_port(
fake.FakeObjectRetrievalSession(fake_vms))
self.assertEqual(actual, 5910)
def test_get_vnc_port_exhausted(self):
fake_vms = self._create_fake_vms()
self.flags(vnc_port=5900, group='vmware')
self.flags(vnc_port_total=10, group='vmware')
self.assertRaises(exception.ConsolePortRangeExhausted,
vm_util.get_vnc_port,
fake.FakeObjectRetrievalSession(fake_vms))
def test_get_cluster_ref_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'fake_cluster')
self.assertIsNone(ref)
def test_get_cluster_ref_by_name_exists(self):
fake_objects = fake.FakeRetrieveResult()
cluster = fake.ClusterComputeResource(name='cluster')
fake_objects.add_object(cluster)
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'cluster')
self.assertIs(cluster.obj, ref)
def test_get_cluster_ref_by_name_missing(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(partialObject(path='cluster'))
ref = vm_util.get_cluster_ref_by_name(
fake.FakeObjectRetrievalSession(fake_objects), 'cluster')
self.assertIsNone(ref)
def test_propset_dict_simple(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar")])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
def test_propset_dict_complex(self):
ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
MoRef = collections.namedtuple('Val', ['value'])
object = ObjectContent(propSet=[
DynamicProperty(name='foo', val="bar"),
DynamicProperty(name='some.thing',
val=MoRef(value='else')),
DynamicProperty(name='another.thing', val='value')])
propdict = vm_util.propset_dict(object.propSet)
self.assertEqual("bar", propdict['foo'])
self.assertTrue(hasattr(propdict['some.thing'], 'value'))
self.assertEqual("else", propdict['some.thing'].value)
self.assertEqual("value", propdict['another.thing'])
def _test_detach_virtual_disk_spec(self, destroy_disk=False):
virtual_device_config = vm_util.detach_virtual_disk_spec(
fake.FakeFactory(),
'fake_device',
destroy_disk)
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('fake_device', virtual_device_config.device)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy', virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
def test_detach_virtual_disk_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=False)
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
def _create_vm_config_spec(self):
fake_factory = fake.FakeFactory()
spec = fake_factory.create('ns0:VirtualMachineConfigSpec')
spec.name = self._instance.uuid
spec.instanceUuid = self._instance.uuid
spec.deviceChange = []
spec.numCPUs = 2
spec.version = None
spec.memoryMB = 2048
spec.guestId = 'otherGuest'
spec.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
spec.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
spec.extraConfig.append(extra_config)
spec.files = fake_factory.create('ns0:VirtualMachineFileInfo')
spec.files.vmPathName = '[fake-datastore]'
spec.managedBy = fake_factory.create('ns0:ManagedByInfo')
spec.managedBy.extensionKey = 'org.openstack.compute'
spec.managedBy.type = 'instance'
spec.tools = fake_factory.create('ns0:ToolsConfigInfo')
spec.tools.afterPowerOn = True
spec.tools.afterResume = True
spec.tools.beforeGuestReboot = True
spec.tools.beforeGuestShutdown = True
spec.tools.beforeGuestStandby = True
return spec
def test_get_vm_extra_config_spec(self):
fake_factory = fake.FakeFactory()
extra_opts = {mock.sentinel.key: mock.sentinel.value}
res = vm_util.get_vm_extra_config_spec(fake_factory, extra_opts)
self.assertEqual(1, len(res.extraConfig))
self.assertEqual(mock.sentinel.key, res.extraConfig[0].key)
self.assertEqual(mock.sentinel.value, res.extraConfig[0].value)
def test_get_vm_create_spec(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = self._create_vm_config_spec()
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_serial_port(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
self.flags(serial_port_service_uri='foobar', group='vmware')
self.flags(serial_port_proxy_uri='telnet://example.com:31337',
group='vmware')
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
serial_port_spec = vm_util.create_serial_port_spec(fake_factory)
expected = self._create_vm_config_spec()
expected.deviceChange = [serial_port_spec]
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_allocations(self):
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = constants.DEFAULT_OS_TYPE
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 6
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_limit(self):
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.limit = 7
cpu_allocation.reservation = 0
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'normal'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share(self):
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.type = 'instance'
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'high'
cpu_allocation.shares.shares = 0
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_share_custom(self):
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.instanceUuid = self._instance.uuid
expected.name = self._instance.uuid
expected.deviceChange = []
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
expected.memoryMB = 2048
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.version = None
expected.guestId = constants.DEFAULT_OS_TYPE
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.beforeGuestStandby = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.afterResume = True
expected.tools.afterPowerOn = True
cpu_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
cpu_allocation.reservation = 0
cpu_allocation.limit = -1
cpu_allocation.shares = fake_factory.create('ns0:SharesInfo')
cpu_allocation.shares.level = 'custom'
cpu_allocation.shares.shares = 1948
expected.cpuAllocation = cpu_allocation
expected.numCPUs = 2
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_metadata(self):
extra_specs = vm_util.ExtraSpecs()
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs,
metadata='fake-metadata')
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = 'otherGuest'
expected.annotation = 'fake-metadata'
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_firmware(self):
extra_specs = vm_util.ExtraSpecs(firmware='efi')
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = 'otherGuest'
expected.firmware = 'efi'
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_create_vm(self):
def fake_call_method(module, method, *args, **kwargs):
if (method == 'CreateVM_Task'):
return 'fake_create_vm_task'
else:
self.fail('Should not get here....')
def fake_wait_for_task(self, *args):
task_info = mock.Mock(state="success", result="fake_vm_ref")
return task_info
session = fake.FakeSession()
fake_call_mock = mock.Mock(side_effect=fake_call_method)
fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
with test.nested(
mock.patch.object(session, '_wait_for_task',
fake_wait_mock),
mock.patch.object(session, '_call_method',
fake_call_mock)
) as (wait_for_task, call_method):
vm_ref = vm_util.create_vm(
session,
self._instance,
'fake_vm_folder',
'fake_config_spec',
'fake_res_pool_ref')
self.assertEqual('fake_vm_ref', vm_ref)
call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
'fake_vm_folder', config='fake_config_spec',
pool='fake_res_pool_ref')
wait_for_task.assert_called_once_with('fake_create_vm_task')
@mock.patch.object(vm_util.LOG, 'warning')
def test_create_vm_invalid_guestid(self, mock_log_warn):
"""Ensure we warn when create_vm() fails after we passed an
unrecognised guestId
"""
found = [False]
def fake_log_warn(msg, values):
if not isinstance(values, dict):
return
if values.get('ostype') == 'invalid_os_type':
found[0] = True
mock_log_warn.side_effect = fake_log_warn
session = driver.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
self._instance, 'fake-datastore', [],
vm_util.ExtraSpecs(),
os_type='invalid_os_type')
self.assertRaises(vexc.VMwareDriverException,
vm_util.create_vm, session, self._instance,
'folder', config_spec, 'res-pool')
self.assertTrue(found[0])
def test_convert_vif_model(self):
expected = "VirtualE1000"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet", "VirtualVmxnet3"]
for type in types:
self.assertEqual(type,
vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_without_vm_ref(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance)
fake_get_vm_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_exception(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task",
side_effect=exception.NovaException('fake')),
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_on_instance,
session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_power_on_instance_with_power_state_exception(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(
session, "_wait_for_task",
side_effect=vexc.InvalidPowerStateException),
) as (fake_call_method, fake_wait_for_task):
vm_util.power_on_instance(session, self._instance,
vm_ref='fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOnVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_create_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with test.nested(
mock.patch.object(vm_util, "get_vmdk_create_spec",
return_value='fake-spec'),
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_get_spec, fake_call_method, fake_wait_for_task):
vm_util.create_virtual_disk(session, 'fake-dc-ref',
'fake-adapter-type', 'fake-disk-type',
'fake-path', 7)
fake_get_spec.assert_called_once_with(
session.vim.client.factory, 7,
'fake-adapter-type',
'fake-disk-type')
fake_call_method.assert_called_once_with(
session.vim,
"CreateVirtualDisk_Task",
dm,
name='fake-path',
datacenter='fake-dc-ref',
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
def test_copy_virtual_disk(self):
session = fake.FakeSession()
dm = session.vim.service_content.virtualDiskManager
with test.nested(
mock.patch.object(session, "_call_method",
return_value='fake-task'),
mock.patch.object(session, "_wait_for_task"),
) as (fake_call_method, fake_wait_for_task):
vm_util.copy_virtual_disk(session, 'fake-dc-ref',
'fake-source', 'fake-dest')
fake_call_method.assert_called_once_with(
session.vim,
"CopyVirtualDisk_Task",
dm,
sourceName='fake-source',
sourceDatacenter='fake-dc-ref',
destName='fake-dest')
fake_wait_for_task.assert_called_once_with('fake-task')
def _create_fake_vm_objects(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.VirtualMachine())
return fake_objects
def test_reconfigure_vm(self):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake_reconfigure_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
_call_method.assert_called_once_with(mock.ANY,
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
def _get_network_attach_config_spec_opaque(self, network_ref,
vc6_onwards=False):
vif_info = {'network_name': 'fake-name',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': network_ref,
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
if network_ref['use-external-id']:
if vc6_onwards:
device.externalId = vif_info['iface_id']
else:
dp = fake_factory.create('ns0:DynamicProperty')
dp.name = '__externalId__'
dp.val = vif_info['iface_id']
device.dynamicProperty = [dp]
device.addressType = 'manual'
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.startConnected = True
connectable.connected = True
device.connectable = connectable
backing = fake_factory.create(card)
backing.opaqueNetworkType = vif_info['network_ref']['network-type']
backing.opaqueNetworkId = vif_info['network_ref']['network-id']
device.backing = backing
device.key = -47
device.wakeOnLanEnabled = True
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_opaque_integration_bridge(self):
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'opaque',
'use-external-id': False}
self._get_network_attach_config_spec_opaque(network_ref)
def test_get_network_attach_config_spec_opaque(self):
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self._get_network_attach_config_spec_opaque(network_ref)
@mock.patch.object(fake, 'DataObject')
def test_get_network_attach_config_spec_opaque_vc6_onwards(self,
mock_object):
# Add new attribute externalId supported from VC6
class FakeVirtualE1000(fake.DataObject):
def __init__(self):
super(FakeVirtualE1000, self).__init__()
self.externalId = None
mock_object.return_value = FakeVirtualE1000
network_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-network-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self._get_network_attach_config_spec_opaque(network_ref,
vc6_onwards=True)
def test_get_network_attach_config_spec_dvs(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def test_get_network_attach_config_spec_dvs_with_limits(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
limits = vm_util.Limits()
limits.limit = 10
limits.reservation = 20
limits.shares_level = 'custom'
limits.shares_share = 40
result = vm_util.get_network_attach_config_spec(
fake_factory, vif_info, 1, limits)
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = vif_info['iface_id']
extra_config.key = 'nvp.iface-id.1'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
device.resourceAllocation = fake_factory.create(
'ns0:VirtualEthernetCardResourceAllocation')
device.resourceAllocation.limit = 10
device.resourceAllocation.reservation = 20
device.resourceAllocation.share = fake_factory.create(
'ns0:SharesInfo')
device.resourceAllocation.share.level = 'custom'
device.resourceAllocation.share.shares = 40
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
def _get_create_vif_spec(self, fake_factory, vif_info):
limits = vm_util.Limits()
limits.limit = 10
limits.reservation = 20
limits.shares_level = 'custom'
limits.shares_share = 40
return vm_util._create_vif_spec(fake_factory, vif_info, limits)
def _construct_vif_spec(self, fake_factory, vif_info):
port = 'ns0:DistributedVirtualSwitchPortConnection'
backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.operation = 'add'
device = fake_factory.create('ns0:VirtualE1000')
device.macAddress = vif_info['mac_address']
device.key = -47
device.addressType = 'manual'
device.wakeOnLanEnabled = True
device.backing = fake_factory.create(backing)
device.backing.port = fake_factory.create(port)
device.backing.port.portgroupKey = vif_info['network_ref']['dvpg']
device.backing.port.switchUuid = vif_info['network_ref']['dvsw']
if vif_info['network_ref'].get('dvs_port_key'):
device.backing.port.portKey = (
vif_info['network_ref']['dvs_port_key'])
device.resourceAllocation = fake_factory.create(
'ns0:VirtualEthernetCardResourceAllocation')
device.resourceAllocation.limit = 10
device.resourceAllocation.reservation = 20
device.resourceAllocation.share = fake_factory.create(
'ns0:SharesInfo')
device.resourceAllocation.share.level = 'custom'
device.resourceAllocation.share.shares = 40
connectable = fake_factory.create('ns0:VirtualDeviceConnectInfo')
connectable.allowGuestControl = True
connectable.connected = True
connectable.startConnected = True
device.connectable = connectable
device_change.device = device
return device_change
def test_get_create_vif_spec(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = self._get_create_vif_spec(fake_factory, vif_info)
device_change = self._construct_vif_spec(fake_factory, vif_info)
self.assertEqual(device_change, result)
def test_get_create_vif_spec_dvs_port_key(self):
vif_info = {'network_name': 'br100',
'mac_address': '00:00:00:ca:fe:01',
'network_ref': {'type': 'DistributedVirtualPortgroup',
'dvsw': 'fake-network-id',
'dvpg': 'fake-group',
'dvs_port_key': 'fake-key'},
'iface_id': 7,
'vif_model': 'VirtualE1000'}
fake_factory = fake.FakeFactory()
result = self._get_create_vif_spec(fake_factory, vif_info)
device_change = self._construct_vif_spec(fake_factory, vif_info)
self.assertEqual(device_change, result)
def test_get_network_detach_config_spec(self):
fake_factory = fake.FakeFactory()
result = vm_util.get_network_detach_config_spec(
fake_factory, 'fake-device', 2)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.value = 'free'
extra_config.key = 'nvp.iface-id.2'
expected.extraConfig.append(extra_config)
expected.deviceChange = []
device_change = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device_change.device = 'fake-device'
device_change.operation = 'remove'
expected.deviceChange.append(device_change)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
def test_power_off_instance_no_vm_ref(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task')
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance)
fake_get_ref.assert_called_once_with(session, self._instance)
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_with_exception(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(session, '_wait_for_task',
side_effect=exception.NovaException('fake'))
) as (fake_call_method, fake_wait_for_task):
self.assertRaises(exception.NovaException,
vm_util.power_off_instance,
session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
@mock.patch.object(vm_util, "get_vm_ref")
def test_power_off_instance_power_state_exception(self, fake_get_ref):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake-task'),
mock.patch.object(
session, '_wait_for_task',
side_effect=vexc.InvalidPowerStateException)
) as (fake_call_method, fake_wait_for_task):
vm_util.power_off_instance(session, self._instance, 'fake-vm-ref')
fake_call_method.assert_called_once_with(session.vim,
"PowerOffVM_Task",
'fake-vm-ref')
fake_wait_for_task.assert_called_once_with('fake-task')
self.assertFalse(fake_get_ref.called)
def test_get_vm_create_spec_updated_hw_version(self):
extra_specs = vm_util.ExtraSpecs(hw_version='vmx-08')
result = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
'fake-datastore', [],
extra_specs=extra_specs)
self.assertEqual('vmx-08', result.version)
def test_vm_create_spec_with_profile_spec(self):
datastore = ds_obj.Datastore('fake-ds-ref', 'fake-ds-name')
extra_specs = vm_util.ExtraSpecs()
create_spec = vm_util.get_vm_create_spec(fake.FakeFactory(),
self._instance,
datastore.name, [],
extra_specs,
profile_spec='fake_profile_spec')
self.assertEqual(['fake_profile_spec'], create_spec.vmProfile)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_get_storage_profile_spec(self, mock_retrieve_profile_id):
fake_profile_id = fake.DataObject()
fake_profile_id.uniqueId = 'fake_unique_id'
mock_retrieve_profile_id.return_value = fake_profile_id
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertEqual('ns0:VirtualMachineDefinedProfileSpec',
profile_spec.obj_name)
self.assertEqual(fake_profile_id.uniqueId, profile_spec.profileId)
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_storage_spec_empty_profile(self, mock_retrieve_profile_id):
mock_retrieve_profile_id.return_value = None
profile_spec = vm_util.get_storage_profile_spec(fake.FakeSession(),
'fake_policy')
self.assertIsNone(profile_spec)
def test_get_ephemeral_name(self):
filename = vm_util.get_ephemeral_name(0)
self.assertEqual('ephemeral_0.vmdk', filename)
def test_detach_and_delete_devices_config_spec(self):
fake_devices = ['device1', 'device2']
fake_factory = fake.FakeFactory()
result = vm_util._detach_and_delete_devices_config_spec(fake_factory,
fake_devices)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
device1 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device1.device = 'device1'
device1.operation = 'remove'
device1.fileOperation = 'destroy'
expected.deviceChange.append(device1)
device2 = fake_factory.create('ns0:VirtualDeviceConfigSpec')
device2.device = 'device2'
device2.operation = 'remove'
device2.fileOperation = 'destroy'
expected.deviceChange.append(device2)
self.assertEqual(expected, result)
@mock.patch.object(vm_util, 'reconfigure_vm')
def test_detach_devices_from_vm(self, mock_reconfigure):
fake_devices = ['device1', 'device2']
session = fake.FakeSession()
vm_util.detach_devices_from_vm(session,
'fake-ref',
fake_devices)
mock_reconfigure.assert_called_once_with(session, 'fake-ref', mock.ANY)
def test_get_vm_boot_spec(self):
disk = fake.VirtualDisk()
disk.key = 7
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_boot_spec(fake_factory,
disk)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
boot_disk = fake_factory.create(
'ns0:VirtualMachineBootOptionsBootableDiskDevice')
boot_disk.deviceKey = disk.key
boot_options = fake_factory.create('ns0:VirtualMachineBootOptions')
boot_options.bootOrder = [boot_disk]
expected.bootOptions = boot_options
self.assertEqual(expected, result)
def _get_devices(self, filename):
devices = fake._create_array_of_type('VirtualDevice')
devices.VirtualDevice = self._vmdk_path_and_adapter_type_devices(
filename)
return devices
def test_find_rescue_device(self):
filename = '[test_datastore] uuid/uuid-rescue.vmdk'
devices = self._get_devices(filename)
device = vm_util.find_rescue_device(devices, self._instance)
self.assertEqual(filename, device.backing.fileName)
def test_find_rescue_device_not_found(self):
filename = '[test_datastore] uuid/uuid.vmdk'
devices = self._get_devices(filename)
self.assertRaises(exception.NotFound,
vm_util.find_rescue_device,
devices,
self._instance)
def test_validate_limits(self):
limits = vm_util.Limits(shares_level='high',
shares_share=1948)
self.assertRaises(exception.InvalidInput,
limits.validate)
limits = vm_util.Limits(shares_level='fira')
self.assertRaises(exception.InvalidInput,
limits.validate)
def test_get_vm_create_spec_with_console_delay(self):
extra_specs = vm_util.ExtraSpecs()
self.flags(console_delay_seconds=2, group='vmware')
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.name = self._instance.uuid
expected.instanceUuid = self._instance.uuid
expected.deviceChange = []
expected.numCPUs = 2
expected.version = None
expected.memoryMB = 2048
expected.guestId = constants.DEFAULT_OS_TYPE
expected.extraConfig = []
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = self._instance.uuid
extra_config.key = 'nvp.vm-uuid'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = 2000000
extra_config.key = 'keyboard.typematicMinDelay'
expected.extraConfig.append(extra_config)
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_cores_per_socket(self):
extra_specs = vm_util.ExtraSpecs(cores_per_socket=4)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = 'otherGuest'
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.numCoresPerSocket = 4
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_vm_create_spec_with_memory_allocations(self):
memory_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
fake_factory = fake.FakeFactory()
result = vm_util.get_vm_create_spec(fake_factory,
self._instance,
'fake-datastore', [],
extra_specs)
expected = fake_factory.create('ns0:VirtualMachineConfigSpec')
expected.deviceChange = []
expected.guestId = 'otherGuest'
expected.instanceUuid = self._instance.uuid
expected.memoryMB = self._instance.memory_mb
expected.name = self._instance.uuid
expected.numCPUs = self._instance.vcpus
expected.version = None
expected.files = fake_factory.create('ns0:VirtualMachineFileInfo')
expected.files.vmPathName = '[fake-datastore]'
expected.tools = fake_factory.create('ns0:ToolsConfigInfo')
expected.tools.afterPowerOn = True
expected.tools.afterResume = True
expected.tools.beforeGuestReboot = True
expected.tools.beforeGuestShutdown = True
expected.tools.beforeGuestStandby = True
expected.managedBy = fake_factory.create('ns0:ManagedByInfo')
expected.managedBy.extensionKey = 'org.openstack.compute'
expected.managedBy.type = 'instance'
memory_allocation = fake_factory.create('ns0:ResourceAllocationInfo')
memory_allocation.limit = 7
memory_allocation.reservation = 6
memory_allocation.shares = fake_factory.create('ns0:SharesInfo')
memory_allocation.shares.level = 'normal'
memory_allocation.shares.shares = 0
expected.memoryAllocation = memory_allocation
expected.extraConfig = []
extra_config = fake_factory.create('ns0:OptionValue')
extra_config.key = 'nvp.vm-uuid'
extra_config.value = self._instance.uuid
expected.extraConfig.append(extra_config)
extra_config = fake_factory.create("ns0:OptionValue")
extra_config.value = True
extra_config.key = 'disk.EnableUUID'
expected.extraConfig.append(extra_config)
self.assertEqual(expected, result)
def test_get_swap(self):
vm_ref = 'fake-vm-ref'
# Root disk
controller_key = 1000
root_disk = fake.VirtualDisk()
root_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = '[test_datastore] uuid/uuid.vmdk'
root_disk.capacityInBytes = 1048576
root_disk.backing = disk_backing
# Swap disk
swap_disk = fake.VirtualDisk()
swap_disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = "swap"
swap_disk.capacityInBytes = 1024
swap_disk.backing = disk_backing
devices = [root_disk, swap_disk]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=devices) as mock_call:
device = vm_util.get_swap(session, vm_ref)
mock_call.assert_called_once_with(mock.ANY,
"get_object_property", vm_ref, "config.hardware.device")
self.assertEqual(swap_disk, device)
def test_create_folder(self):
"""Test create_folder when the folder doesn't exist"""
child_folder = mock.sentinel.child_folder
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[child_folder]):
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
child_name = 'child_folder'
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual(child_folder, ret)
session._call_method.assert_called_once_with(session.vim,
'CreateFolder',
parent_folder,
name=child_name)
def test_create_folder_duplicate_name(self):
"""Test create_folder when the folder already exists"""
session = fake.FakeSession()
details = {'object': 'folder-1'}
duplicate_exception = vexc.DuplicateName(details=details)
with mock.patch.object(session, '_call_method',
side_effect=[duplicate_exception]):
parent_folder = mock.sentinel.parent_folder
parent_folder.value = 'parent-ref'
child_name = 'child_folder'
ret = vm_util.create_folder(session, parent_folder, child_name)
self.assertEqual('Folder', ret._type)
self.assertEqual('folder-1', ret.value)
session._call_method.assert_called_once_with(session.vim,
'CreateFolder',
parent_folder,
name=child_name)
def test_get_folder_does_not_exist(self):
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=None):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_not_folder(self):
child_entity = mock.Mock()
child_entity._type = 'NotFolder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
return_value=prop_val):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_not_matched(self):
child_entity = mock.Mock()
child_entity._type = 'Folder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, 'fake-1-name']):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertIsNone(ret)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity, 'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_get_folder_child_entry_matched(self):
child_entity = mock.Mock()
child_entity._type = 'Folder'
prop_val = mock.Mock()
prop_val.ManagedObjectReference = [child_entity]
session = fake.FakeSession()
with mock.patch.object(session, '_call_method',
side_effect=[prop_val, 'fake-name']):
ret = vm_util._get_folder(session, 'fake-parent', 'fake-name')
self.assertEqual(ret, child_entity)
expected_invoke_api = [mock.call(vutil, 'get_object_property',
'fake-parent',
'childEntity'),
mock.call(vutil, 'get_object_property',
child_entity, 'name')]
self.assertEqual(expected_invoke_api,
session._call_method.mock_calls)
def test_folder_path_ref_cache(self):
path = 'OpenStack/Project (e2b86092bf064181ade43deb3188f8e4)'
self.assertIsNone(vm_util.folder_ref_cache_get(path))
vm_util.folder_ref_cache_update(path, 'fake-ref')
self.assertEqual('fake-ref', vm_util.folder_ref_cache_get(path))
def test_get_vm_name(self):
uuid = uuidutils.generate_uuid()
expected = uuid
name = vm_util._get_vm_name(None, uuid)
self.assertEqual(expected, name)
display_name = 'fira'
expected = 'fira (%s)' % uuid
name = vm_util._get_vm_name(display_name, uuid)
self.assertEqual(expected, name)
display_name = 'X' * 255
expected = '%s (%s)' % ('X' * 41, uuid)
name = vm_util._get_vm_name(display_name, uuid)
self.assertEqual(expected, name)
self.assertEqual(len(name), 80)
@mock.patch.object(vm_util, '_get_vm_name', return_value='fake-name')
def test_rename_vm(self, mock_get_name):
session = fake.FakeSession()
with test.nested(
mock.patch.object(session, '_call_method',
return_value='fake_rename_task'),
mock.patch.object(session, '_wait_for_task')
) as (_call_method, _wait_for_task):
vm_util.rename_vm(session, 'fake-ref', self._instance)
_call_method.assert_called_once_with(mock.ANY,
'Rename_Task', 'fake-ref', newName='fake-name')
_wait_for_task.assert_called_once_with(
'fake_rename_task')
mock_get_name.assert_called_once_with(self._instance.display_name,
self._instance.uuid)
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
# VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
# setUp causes object initialisation to fail. Not mocking in tests results
# in vim calls not using FakeVim.
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
self.session = driver.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
self.host_ref = list(fake._db_content['HostSystem'].keys())[0]
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_ref_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(self.host_ref, ret)
@mock.patch.object(vm_util, 'get_vm_ref')
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
host = fake._get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
self.assertEqual(host.name, ret)
|
|
from django.core.exceptions import ValidationError
import inflection
import shanghai
from shanghai.actions import *
from shanghai.exceptions import ModelValidationError, NotFoundError
from shanghai.inspectors import Inspector, MetaInspector, ModelInspector
from shanghai.mixins import *
from shanghai.serializers import Serializer
class Resource(CollectionMixin, ObjectMixin, LinkedMixin, RelatedMixin,
FetcherMixin, MetaMixin, ResponderMixin, DispatcherMixin,
FilterMixin, SortMixin, PaginationMixin, LinkerMixin,
InclusionMixin, FieldsMixin, object):
"""
A base class for all resources.
"""
inspector = MetaInspector
serializer = Serializer
def __init__(self, api=shanghai.api):
self.api = api
self.name = self.resolve_name()
self.type = self.resolve_type()
self.inspector = self.resolve_inspector()
self.serializer = self.resolve_serializer()
# generic request attributes
self.request = None
self.args = None
self.kwargs = None
# resolved action attributes
self.action = None
self.pk = None
self.link = None
self.related = None
self.input = None
def resolve_name(self):
cls = type(self)
name = getattr(cls, 'name', None)
if name is None:
name = cls.__name__[:-len('Resource')]
name = inflection.underscore(name)
name = inflection.pluralize(name)
return name
def resolve_type(self):
return inflection.dasherize(self.name)
def resolve_inspector(self):
inspector = getattr(type(self), 'inspector', Inspector)
return inspector(self)
def resolve_serializer(self):
serializer = getattr(type(self), 'serializer', Serializer)
return serializer(self)
def url_pattern_name(self, *args):
return '-'.join([self.api.name, self.type] + list(args))
def generate_urls(self):
from django.conf.urls import patterns, url
view = self.dispatch
pk = '\w+([,]?(\w+))*'
link = related = '\w+'
url_patterns = patterns('',
url(r'^{0}/(?P<pk>{1})/links/(?P<link>{2})'.format(self.type, pk, link), view, name=self.url_pattern_name('pk', 'link')),
url(r'^{0}/(?P<pk>{1})/(?P<related>{2})'.format(self.type, pk, related), view, name=self.url_pattern_name('pk', 'related')),
url(r'^{0}/(?P<pk>{1})'.format(self.type, pk), view, name=self.url_pattern_name('pk')),
url(r'^{0}'.format(self.type), view, name=self.url_pattern_name()),
)
return url_patterns
@property
def urls(self):
return self.generate_urls()
def __str__(self):
return self.name
# TODO find a suitable mixin
def process_collection(self, collection):
filters = self.filter_parameters()
sorters = self.sort_parameters()
pagination = self.pagination_parameters()
if len(filters):
collection = self.filter_collection(collection, **filters)
if len(sorters):
collection = self.sort_collection(collection, *sorters)
if pagination:
pagination['total'] = self.count_collection(collection)
collection = self.paginate_collection(collection, pagination)
return collection, dict(filters=filters, sorters=sorters, pagination=pagination)
def meta_for_document(self, object_or_iterable, **kwargs):
# TODO `pagination.total`
return None
def meta_for_object(self, obj, **kwargs):
return None
class ModelResource(ModelCollectionMixin, ModelObjectMixin,
ModelLinkedMixin, ModelRelatedMixin, ModelFetcherMixin,
ModelFilterMixin, ModelSortMixin, ModelPaginationMixin,
Resource):
"""
A model based resource.
"""
model = None
inspector = ModelInspector
def queryset(self):
return self.model.objects.all()
def _get_objects_data(self, pks):
if not pks or not len(pks):
return list()
objects = self.queryset().filter(pk__in=pks)
if len(pks) != len(objects):
raise NotFoundError()
return objects
def create_object(self):
return self.model()
def save_object(self, obj, data):
links = data.get('links', dict())
update_fields = list()
for key, attribute in self.attributes().items():
if key not in data:
continue
attribute.set_to(obj, data.get(key))
update_fields.append(key)
for key, relationship in self.relationships().items():
if key not in links:
continue
linked_resource = self.linked_resource(relationship)
linkage = links[key]
if relationship.is_belongs_to():
linked_obj = None
if linkage:
pk = linkage['id']
linked_obj = linked_resource.fetch_object(pk)
relationship.set_to(obj, linked_obj)
update_fields.append(key)
try:
obj.full_clean()
except ValidationError as error:
raise ModelValidationError(error)
if obj.pk:
obj.save(update_fields=update_fields)
else:
obj.save()
for key, relationship in self.relationships().items():
if key not in links:
continue
linked_resource = self.linked_resource(relationship)
linkage = links[key]
if relationship.is_has_many():
related_manager = relationship.get_from(obj)
pks = [item['id'] for item in linkage]
linked_objects = list()
if len(pks):
linked_objects = linked_resource._get_objects_data(pks)
relationship.set_to(obj, linked_objects)
return obj
def remove_object(self, obj):
obj.delete()
|
|
#!/usr/bin/env python
# Line too long - pylint: disable=C0301
# Invalid name - pylint: disable=C0103
#
# Copyright (c) EMC/Greenplum Inc 2011. All Rights Reserved.
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
"""
Internal Use Function.
"""
# THIS IMPORT MUST COME FIRST
from gppylib.mainUtils import simple_main, addStandardLoggingAndHelpOptions
import os, pickle, base64, time
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib import gparray, gplog
from gppylib.commands import base, gp
from gppylib.utils import parseKeyColonValueLines
logger = gplog.get_default_logger()
DESCRIPTION = """
This utility is NOT SUPPORTED and is for internal-use only.
Starts a set of one or more segment databases.
"""
HELP = ["""
Utility should only be used by other GP utilities.
Return codes:
0 - All segments started successfully
1 - At least one segment didn't start successfully
"""]
class StartResult:
"""
Recorded result information from an attempt to start one segment.
"""
def __init__(self, datadir, started, reason, reasoncode):
"""
@param datadir
@param started
@param reason
@param reasoncode one of the gp.SEGSTART_* values
"""
self.datadir = datadir
self.started = started
self.reason = reason
self.reasoncode = reasoncode
def __str__(self):
return "".join([
"STATUS",
"--DIR:", str(self.datadir),
"--STARTED:", str(self.started),
"--REASONCODE:", str(self.reasoncode),
"--REASON:", str(self.reason)
])
class OverallStatus:
"""
Mapping and segment status information for all segments on this host.
"""
def __init__(self, dblist):
"""
Build the datadir->segment mapping and remember the original size.
Since segments which fail to start will be removed from the mapping,
we later test the size of the map against the original size when
returning the appropriate status code to the caller.
"""
self.dirmap = dict([(seg.getSegmentDataDirectory(), seg) for seg in dblist])
self.original_length = len(self.dirmap)
self.results = []
self.logger = logger
def mark_failed(self, datadir, msg, reasoncode):
"""
Mark a segment as failed during some startup process.
Remove the entry for the segment from dirmap.
@param datadir
@param msg
@param reasoncode one of the gp.SEGSTART_* constant values
"""
self.logger.info("Marking failed %s, %s, %s" % (datadir, msg, reasoncode))
self.results.append( StartResult(datadir=datadir, started=False, reason=msg, reasoncode=reasoncode) )
del self.dirmap[datadir]
def remaining_items_succeeded(self):
"""
Add results for all remaining items in our datadir->segment map.
"""
for datadir in self.dirmap.keys():
self.results.append( StartResult(datadir=datadir, started=True, reason="Start Succeeded", reasoncode=gp.SEGSTART_SUCCESS ) )
def log_results(self):
"""
Log info messages with our results
"""
status = '\nCOMMAND RESULTS\n' + "\n".join([str(result) for result in self.results])
self.logger.info(status)
def exit_code(self):
"""
Return an appropriate exit code: 0 if no failures, 1 if some segments failed to start.
"""
if len(self.dirmap) != self.original_length:
return 1
return 0
class GpSegStart:
"""
Logic to start segment servers on this host.
"""
def __init__(self, dblist, gpversion, mirroringMode, num_cids, era,
timeout, pickledTransitionData, specialMode, wrapper, wrapper_args,
logfileDirectory=False):
# validate/store arguments
#
self.dblist = map(gparray.GpDB.initFromString, dblist)
expected_gpversion = gpversion
actual_gpversion = gp.GpVersion.local('local GP software version check', os.path.abspath(os.pardir))
if actual_gpversion != expected_gpversion:
raise Exception("Local Software Version does not match what is expected.\n"
"The local software version is: '%s'\n"
"But we were expecting it to be: '%s'\n"
"Please review and correct" % (actual_gpversion, expected_gpversion))
self.mirroringMode = mirroringMode
self.num_cids = num_cids
self.era = era
self.timeout = timeout
self.pickledTransitionData = pickledTransitionData
assert(specialMode in [None, 'upgrade', 'maintenance'])
self.specialMode = specialMode
self.wrapper = wrapper
self.wrapper_args = wrapper_args
# initialize state
#
self.pool = base.WorkerPool(numWorkers=len(dblist))
self.logger = logger
self.overall_status = None
self.logfileDirectory = logfileDirectory
def getOverallStatusKeys(self):
return self.overall_status.dirmap.keys()
# return True if all running
# return False if not all running
def checkPostmasters(self, must_be_running):
"""
Check that segment postmasters have been started.
@param must_be_running True if postmasters must be running by now.
"""
self.logger.info("Checking segment postmasters... (must_be_running %s)" % must_be_running)
all_running = True
for datadir in self.getOverallStatusKeys():
pid = gp.read_postmaster_pidfile(datadir)
running = gp.check_pid(pid)
msg = "Postmaster %s %srunning (pid %d)" % (datadir, "is " if running else "NOT ", pid)
self.logger.info(msg)
if not running:
all_running = False
if must_be_running and not running:
reasoncode = gp.SEGSTART_ERROR_PG_CTL_FAILED
self.overall_status.mark_failed(datadir, msg, reasoncode)
return all_running
def __validateDirectoriesAndSetupRecoveryStartup(self):
"""
validate that the directories all exist and run recovery startup if needed
"""
self.logger.info("Validating directories...")
for datadir in self.overall_status.dirmap.keys():
self.logger.info("Validating directory: %s" % datadir)
if os.path.isdir(datadir):
#
# segment datadir exists
#
pg_log = os.path.join(datadir, 'pg_log')
if not os.path.exists(pg_log):
os.mkdir(pg_log)
postmaster_pid = os.path.join(datadir, 'postmaster.pid')
if os.path.exists(postmaster_pid):
self.logger.warning("postmaster.pid file exists, checking if recovery startup required")
msg = gp.recovery_startup(datadir)
if msg:
reasoncode = gp.SEGSTART_ERROR_STOP_RUNNING_SEGMENT_FAILED
self.overall_status.mark_failed(datadir, msg, reasoncode)
else:
#
# segment datadir does not exist
#
msg = "Segment data directory does not exist for: '%s'" % datadir
self.logger.warning(msg)
reasoncode = gp.SEGSTART_ERROR_DATA_DIRECTORY_DOES_NOT_EXIST
self.overall_status.mark_failed(datadir, msg, reasoncode)
def __startSegments(self):
"""
Start the segments themselves
"""
self.logger.info("Starting segments... (mirroringMode %s)" % self.mirroringMode)
for datadir, seg in self.overall_status.dirmap.items():
cmd = gp.SegmentStart("Starting seg at dir %s" % datadir,
seg,
self.num_cids,
self.era,
self.mirroringMode,
timeout=self.timeout,
specialMode=self.specialMode,
wrapper=self.wrapper,
wrapper_args=self.wrapper_args)
self.pool.addCommand(cmd)
self.pool.join()
for cmd in self.pool.getCompletedItems():
res = cmd.get_results()
if res.rc != 0:
# we should also read in last entries in startup.log here
datadir = cmd.segment.getSegmentDataDirectory()
msg = "PG_CTL failed.\nstdout:%s\nstderr:%s\n" % (res.stdout, res.stderr)
reasoncode = gp.SEGSTART_ERROR_PG_CTL_FAILED
self.overall_status.mark_failed(datadir, msg, reasoncode)
self.pool.empty_completed_items()
def __convertSegments(self):
"""
Inform segments of their role
"""
if self.mirroringMode != 'quiescent':
self.logger.info("Not transitioning segments, mirroringMode is %s..." % self.mirroringMode)
return
self.logger.info("Transitioning segments, mirroringMode is %s..." % self.mirroringMode)
transitionData = None
if self.pickledTransitionData is not None:
transitionData = pickle.loads(base64.urlsafe_b64decode(self.pickledTransitionData))
# send transition messages to the segments
#
for datadir, seg in self.overall_status.dirmap.items():
#
# This cmd will deliver a message to the postmaster using gp_primarymirror
# (look for the protocol message type PRIMARY_MIRROR_TRANSITION_REQUEST_CODE )
#
port = seg.getSegmentPort()
cmd = gp.SendFilerepTransitionMessage.buildTransitionMessageCommand(transitionData, datadir, port)
self.pool.addCommand(cmd)
self.pool.join()
# examine the results from the segments
#
segments = self.overall_status.dirmap.values()
dataDirToSeg = gparray.GpArray.getSegmentsGroupedByValue(segments, gparray.GpDB.getSegmentDataDirectory)
toStop = []
cmds = self.pool.getCompletedItems()
for cmd in cmds:
res = cmd.get_results()
if res.rc == 0:
continue
# some form of failure
#
stdoutFromFailure = res.stdout.replace("\n", " ").strip()
stderrFromFailure = res.stderr.replace("\n", " ").strip()
shouldStop = False
if res.rc == gp.SendFilerepTransitionMessage.TRANSITION_ERRCODE_ERROR_SERVER_DID_NOT_RETURN_DATA:
msg = "Segment did not respond to startup request; check segment logfile"
reasoncode = gp.SEGSTART_ERROR_SERVER_DID_NOT_RESPOND
# server crashed when sending response, should ensure it's stopped completely!
shouldStop = True
elif stderrFromFailure.endswith("failure: Error: MirroringFailure"):
msg = "Failure in segment mirroring; check segment logfile"
reasoncode = gp.SEGSTART_ERROR_MIRRORING_FAILURE
elif stderrFromFailure.endswith("failure: Error: PostmasterDied"):
msg = "Segment postmaster has exited; check segment logfile"
reasoncode = gp.SEGSTART_ERROR_POSTMASTER_DIED
elif stderrFromFailure.endswith("failure: Error: InvalidStateTransition"):
msg = "Not a valid operation at this time; check segment logfile"
reasoncode = gp.SEGSTART_ERROR_INVALID_STATE_TRANSITION
# This should never happen, but if it does then we will ensure process is gone
shouldStop = True
elif stderrFromFailure.endswith("failure: Error: ServerIsInShutdown"):
msg = "System is shutting down"
reasoncode = gp.SEGSTART_ERROR_SERVER_IS_IN_SHUTDOWN
else:
if res.rc == gp.SendFilerepTransitionMessage.TRANSITION_ERRCODE_ERROR_SOCKET:
# Couldn't connect to server to do transition or got another problem
# communicating, must make sure it's halted!
shouldStop = True
msg = "Start failed; check segment logfile. \"%s%s\"" % (stdoutFromFailure, stderrFromFailure)
reasoncode = gp.SEGSTART_ERROR_OTHER
self.overall_status.mark_failed(cmd.dataDir, msg, reasoncode)
if shouldStop:
assert len(dataDirToSeg[cmd.dataDir]) == 1, "Multiple segments with dir %s" % cmd.dataDir
toStop.append( dataDirToSeg[cmd.dataDir][0] )
# ensure segments in a bad state are stopped
#
for seg in toStop:
datadir, port = (seg.getSegmentDataDirectory(), seg.getSegmentPort())
msg = "Stopping segment %s, %s because of failure sending transition" % (datadir, port)
self.logger.info(msg)
cmd = gp.SegmentStop('stop segment', datadir, mode="immediate")
cmd.run(validateAfter=False)
res = cmd.get_results()
if res.rc == 0:
self.logger.info("Stop of segment succeeded")
else:
stdoutFromFailure = res.stdout.replace("\n", " ").strip()
stderrFromFailure = res.stderr.replace("\n", " ").strip()
self.logger.info("Stop of segment failed: rc: %s\nstdout:%s\nstderr:%s" % \
(res.rc, stdoutFromFailure, stderrFromFailure))
def run(self):
"""
Logic to start the segments.
"""
# we initialize an overall status object which maintains a mapping
# from each segment's data directory to the segment object as well
# as a list of specific success/failure results.
#
self.overall_status = OverallStatus(self.dblist)
# Each of the next four steps executes operations which may cause segment
# details to be removed from the mapping and recorded as failures.
#
self.__validateDirectoriesAndSetupRecoveryStartup()
self.__startSegments()
# Being paranoid, we frequently check for postmaster failures.
# The postmasters should be running by now.
#
self.checkPostmasters(must_be_running=True)
self.__convertSegments()
self.checkPostmasters(must_be_running=True)
# At this point any segments remaining in the mapping are assumed to
# have successfully started.
#
self.overall_status.remaining_items_succeeded()
self.overall_status.log_results()
return self.overall_status.exit_code()
def cleanup(self):
"""
Cleanup worker pool resources
"""
if self.pool:
self.pool.haltWork()
@staticmethod
def createParser():
"""
Create parser expected by simple_main
"""
parser = OptParser(option_class=OptChecker,
description=' '.join(DESCRIPTION.split()),
version='%prog version main build dev')
parser.setHelp(HELP)
#
# Note that this mirroringmode parameter should only be either mirrorless or quiescent.
# If quiescent then it is implied that there is pickled transition data that will be
# provided (using -p) to immediately convert to a primary or a mirror.
#
addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=False)
parser.add_option("-D", "--dblist", dest="dblist", action="append", type="string")
parser.add_option("-M", "--mirroringmode", dest="mirroringMode", type="string")
parser.add_option("-p", "--pickledTransitionData", dest="pickledTransitionData", type="string")
parser.add_option("-V", "--gp-version", dest="gpversion", metavar="GP_VERSION", help="expected software version")
parser.add_option("-n", "--numsegments", dest="num_cids", help="number of distinct content ids in cluster")
parser.add_option("", "--era", dest="era", help="master era")
parser.add_option("-t", "--timeout", dest="timeout", type="int", default=gp.SEGMENT_TIMEOUT_DEFAULT,
help="seconds to wait")
parser.add_option('-U', '--specialMode', type='choice', choices=['upgrade', 'maintenance'],
metavar='upgrade|maintenance', action='store', default=None,
help='start the instance in upgrade or maintenance mode')
parser.add_option('', '--wrapper', dest="wrapper", default=None, type='string')
parser.add_option('', '--wrapper-args', dest="wrapper_args", default=None, type='string')
return parser
@staticmethod
def createProgram(options, args):
"""
Create program expected by simple_main
"""
logfileDirectory = options.ensure_value("logfileDirectory", False)
return GpSegStart(options.dblist,
options.gpversion,
options.mirroringMode,
options.num_cids,
options.era,
options.timeout,
options.pickledTransitionData,
options.specialMode,
options.wrapper,
options.wrapper_args,
logfileDirectory=logfileDirectory)
#-------------------------------------------------------------------------
if __name__ == '__main__':
mainOptions = { 'setNonuserOnToolLogger':True}
simple_main( GpSegStart.createParser, GpSegStart.createProgram, mainOptions )
|
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import shutil
import unittest
import datetime
import MySQLdb
import environment
import tablet
import utils
from mysql_flavor import mysql_flavor
use_mysqlctld = False
use_xtrabackup = False
stream_mode = 'tar'
tablet_master = None
tablet_replica1 = None
tablet_replica2 = None
xtrabackup_args = []
new_init_db = ''
db_credentials_file = ''
def setUpModule():
global xtrabackup_args
xtrabackup_args = ['-backup_engine_implementation',
'xtrabackup',
'-xtrabackup_stream_mode',
stream_mode,
'-xtrabackup_user=vt_dba',
'-xtrabackup_backup_flags',
'--password=VtDbaPass']
global new_init_db, db_credentials_file
global tablet_master, tablet_replica1, tablet_replica2
tablet_master = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
tablet_replica1 = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
tablet_replica2 = tablet.Tablet(use_mysqlctld=use_mysqlctld,
vt_dba_passwd='VtDbaPass')
try:
environment.topo_server().setup()
credentials = {
'vt_dba': ['VtDbaPass'],
'vt_app': ['VtAppPass'],
'vt_allprivs': ['VtAllprivsPass'],
'vt_repl': ['VtReplPass'],
'vt_filtered': ['VtFilteredPass'],
}
db_credentials_file = environment.tmproot+'/db_credentials.json'
with open(db_credentials_file, 'w') as fd:
fd.write(json.dumps(credentials))
# Determine which column is used for user passwords in this MySQL version.
proc = tablet_master.init_mysql()
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
try:
tablet_master.mquery('mysql', 'select password from mysql.user limit 0',
user='root')
password_col = 'password'
except MySQLdb.DatabaseError:
password_col = 'authentication_string'
utils.wait_procs([tablet_master.teardown_mysql()])
tablet_master.remove_tree(ignore_options=True)
# Create a new init_db.sql file that sets up passwords for all users.
# Then we use a db-credentials-file with the passwords.
new_init_db = environment.tmproot + '/init_db_with_passwords.sql'
with open(environment.vttop + '/config/init_db.sql') as fd:
init_db = fd.read()
with open(new_init_db, 'w') as fd:
fd.write(init_db)
fd.write(mysql_flavor().change_passwords(password_col))
# start mysql instance external to the test
setup_procs = [
tablet_master.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica1.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica2.init_mysql(init_db=new_init_db,
extra_args=['-db-credentials-file',
db_credentials_file]),
]
if use_mysqlctld:
tablet_master.wait_for_mysqlctl_socket()
tablet_replica1.wait_for_mysqlctl_socket()
tablet_replica2.wait_for_mysqlctl_socket()
else:
utils.wait_procs(setup_procs)
logging.debug("done initializing mysql %s",str(datetime.datetime.now()))
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_master.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica1.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file]),
tablet_replica2.teardown_mysql(extra_args=['-db-credentials-file',
db_credentials_file])
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_master.remove_tree()
tablet_replica1.remove_tree()
tablet_replica2.remove_tree()
class TestBackup(unittest.TestCase):
def setUp(self):
for t in tablet_master, tablet_replica1:
t.create_db('vt_test_keyspace')
xtra_args = ['-db-credentials-file', db_credentials_file]
if use_xtrabackup:
xtra_args.extend(xtrabackup_args)
tablet_master.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True,
extra_args=xtra_args)
tablet_replica1.init_tablet('replica', 'test_keyspace', '0', start=True,
supports_backups=True,
extra_args=xtra_args)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_master.tablet_alias])
def tearDown(self):
for t in tablet_master, tablet_replica1, tablet_replica2:
t.kill_vttablet()
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_master, tablet_replica1, tablet_replica2]:
t.reset_replication()
t.set_semi_sync_enabled(master=False, slave=False)
t.clean_dbs()
for backup in self._list_backups():
self._remove_backup(backup)
_create_vt_insert_test = '''create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
def _insert_data(self, t, index):
"""Add a single row with value 'index' to the given tablet."""
t.mquery(
'vt_test_keyspace',
"insert into vt_insert_test (msg) values ('test %s')" %
index, write=True)
def _check_data(self, t, count, msg):
"""Check that the specified tablet has the expected number of rows."""
timeout = 10
while True:
try:
result = t.mquery(
'vt_test_keyspace', 'select count(*) from vt_insert_test')
if result[0][0] == count:
break
except MySQLdb.DatabaseError:
# ignore exceptions, we'll just timeout (the tablet creation
# can take some time to replicate, and we get a 'table vt_insert_test
# does not exist exception in some rare cases)
logging.exception('exception waiting for data to replicate')
timeout = utils.wait_step(msg, timeout)
def _restore(self, t, tablet_type='replica'):
"""Erase mysql/tablet dir, then start tablet with restore enabled."""
logging.debug("restoring tablet %s",str(datetime.datetime.now()))
self._reset_tablet_dir(t)
xtra_args = ['-db-credentials-file', db_credentials_file]
if use_xtrabackup:
xtra_args.extend(xtrabackup_args)
t.start_vttablet(wait_for_state='SERVING',
init_tablet_type=tablet_type,
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True,
extra_args=xtra_args)
# check semi-sync is enabled for replica, disabled for rdonly.
if tablet_type == 'replica':
t.check_db_var('rpl_semi_sync_slave_enabled', 'ON')
t.check_db_status('rpl_semi_sync_slave_status', 'ON')
else:
t.check_db_var('rpl_semi_sync_slave_enabled', 'OFF')
t.check_db_status('rpl_semi_sync_slave_status', 'OFF')
def _restore_wait_for_backup(self, t, tablet_type='replica'):
"""Erase mysql/tablet dir, then start tablet with wait_for_restore_interval."""
self._reset_tablet_dir(t)
xtra_args = [
'-db-credentials-file', db_credentials_file,
'-wait_for_backup_interval', '1s',
]
if use_xtrabackup:
xtra_args.extend(xtrabackup_args)
t.start_vttablet(wait_for_state=None,
init_tablet_type=tablet_type,
init_keyspace='test_keyspace',
init_shard='0',
supports_backups=True,
extra_args=xtra_args)
def _reset_tablet_dir(self, t):
"""Stop mysql, delete everything including tablet dir, restart mysql."""
extra_args = ['-db-credentials-file', db_credentials_file]
utils.wait_procs([t.teardown_mysql(extra_args=extra_args)])
# Specify ignore_options because we want to delete the tree even
# if the test's -k / --keep-logs was specified on the command line.
t.remove_tree(ignore_options=True)
logging.debug("starting mysql %s",str(datetime.datetime.now()))
proc = t.init_mysql(init_db=new_init_db, extra_args=extra_args)
if use_mysqlctld:
t.wait_for_mysqlctl_socket()
else:
utils.wait_procs([proc])
logging.debug("done starting mysql %s",str(datetime.datetime.now()))
def _list_backups(self):
"""Get a list of backup names for the test shard."""
backups, _ = utils.run_vtctl(tablet.get_backup_storage_flags() +
['ListBackups', 'test_keyspace/0'],
mode=utils.VTCTL_VTCTL, trap_output=True)
return backups.splitlines()
def _remove_backup(self, backup):
"""Remove a named backup from the test shard."""
utils.run_vtctl(
tablet.get_backup_storage_flags() +
['RemoveBackup', 'test_keyspace/0', backup],
auto_log=True, mode=utils.VTCTL_VTCTL)
def test_backup_rdonly(self):
self._test_backup('rdonly', False)
def test_backup_replica(self):
self._test_backup('replica', False)
def test_backup_master(self):
"""Test backup flow.
test_backup will:
- create a shard with master and replica1 only
- run InitShardMaster
- insert some data
- take a backup on master
- insert more data on the master
- bring up tablet_replica2 after the fact, let it restore the backup
- check all data is right (before+after backup data)
- list the backup, remove it
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# This will fail, make sure we get the right error.
_, err = utils.run_vtctl(['Backup', tablet_master.tablet_alias],
auto_log=True, expect_fail=True)
self.assertIn('type MASTER cannot take backup. if you really need to do this, rerun the backup command with -allow_master', err)
# And make sure there is no backup left.
backups = self._list_backups()
self.assertEqual(len(backups), 0, 'invalid backups: %s' % backups)
# backup the master
utils.run_vtctl(['Backup', '-allow_master=true', tablet_master.tablet_alias], auto_log=True)
# check that the backup shows up in the listing
backups = self._list_backups()
logging.debug('list of backups: %s', backups)
self.assertEqual(len(backups), 1)
self.assertTrue(backups[0].endswith(tablet_master.tablet_alias))
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2, tablet_type='replica')
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# check that the restored slave has the right local_metadata
result = tablet_replica2.mquery('_vt', 'select * from local_metadata')
metadata = {}
for row in result:
metadata[row[0]] = row[1]
self.assertEqual(metadata['Alias'], 'test_nj-0000062346')
self.assertEqual(metadata['ClusterAlias'], 'test_keyspace.0')
self.assertEqual(metadata['DataCenter'], 'test_nj')
self.assertEqual(metadata['PromotionRule'], 'neutral')
# remove the backup and check that the list is empty
self._remove_backup(backups[0])
backups = self._list_backups()
logging.debug('list of backups after remove: %s', backups)
self.assertEqual(len(backups), 0)
tablet_replica2.kill_vttablet()
def _test_backup(self, tablet_type, backup_only):
"""Test backup flow.
test_backup will:
- create a shard with master and replica1 only
- run InitShardMaster
- bring up tablet_replica2 concurrently, telling it to wait for a backup
- insert some data
- take a backup
- insert more data on the master
- wait for tablet_replica2 to become SERVING
- check all data is right (before+after backup data)
- list the backup, remove it
Args:
tablet_type: 'replica' or 'rdonly'.
"""
# bring up another replica concurrently, telling it to wait until a backup
# is available instead of starting up empty.
self._restore_wait_for_backup(tablet_replica2, tablet_type=tablet_type)
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
alias = tablet_replica1.tablet_alias
logging.debug("taking backup %s",str(datetime.datetime.now()))
utils.run_vtctl(['Backup', alias], auto_log=True)
logging.debug("done taking backup %s",str(datetime.datetime.now()))
# end if
# check that the backup shows up in the listing
backups = self._list_backups()
logging.debug('list of backups: %s', backups)
self.assertEqual(len(backups), 1)
self.assertTrue(backups[0].endswith(alias))
# insert more data on the master
self._insert_data(tablet_master, 2)
# wait for tablet_replica2 to become serving (after restoring)
utils.pause('wait_for_backup')
tablet_replica2.wait_for_vttablet_state('SERVING')
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# check that the restored slave has the right local_metadata
result = tablet_replica2.mquery('_vt', 'select * from local_metadata')
metadata = {}
for row in result:
metadata[row[0]] = row[1]
self.assertEqual(metadata['Alias'], 'test_nj-0000062346')
self.assertEqual(metadata['ClusterAlias'], 'test_keyspace.0')
self.assertEqual(metadata['DataCenter'], 'test_nj')
if tablet_type == 'replica':
self.assertEqual(metadata['PromotionRule'], 'neutral')
else:
self.assertEqual(metadata['PromotionRule'], 'must_not')
for backup in backups:
self._remove_backup(backup)
backups = self._list_backups()
logging.debug('list of backups after remove: %s', backups)
self.assertEqual(len(backups), 0)
tablet_replica2.kill_vttablet()
def test_master_slave_same_backup(self):
"""Test a master and slave from the same backup.
Check that a slave and master both restored from the same backup
can replicate successfully.
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# now bring up the other slave, letting it restore from backup.
self._restore(tablet_replica2)
# check the new slave has the data
self._check_data(tablet_replica2, 2, 'replica2 tablet getting data')
# Promote replica2 to master.
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/0',
'-new_master', tablet_replica2.tablet_alias])
# insert more data on replica2 (current master)
self._insert_data(tablet_replica2, 3)
# Force replica1 to restore from backup.
tablet_replica1.kill_vttablet()
self._restore(tablet_replica1)
# wait for replica1 to catch up.
self._check_data(tablet_replica1, 3,
'replica1 getting data from restored master')
# This is to test that replicationPosition is processed correctly
# while doing backup/restore after a reparent.
# It is written into the MANIFEST and read back from the MANIFEST.
# Take another backup on the slave.
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# Insert more data on replica2 (current master).
self._insert_data(tablet_replica2, 4)
# Force replica1 to restore from backup.
tablet_replica1.kill_vttablet()
self._restore(tablet_replica1)
# Wait for replica1 to catch up.
self._check_data(tablet_replica1, 4,
'replica1 getting data from master after reparent+backup+restore')
tablet_replica2.kill_vttablet()
def _restore_old_master_test(self, restore_method):
"""Test that a former master replicates correctly after being restored.
- Take a backup.
- Reparent from old master to new master.
- Force old master to restore from a previous backup using restore_method.
Args:
restore_method: function accepting one parameter of type tablet.Tablet,
this function is called to force a restore on the provided tablet
"""
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# reparent to replica1
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/0',
'-new_master', tablet_replica1.tablet_alias])
# insert more data on new master
self._insert_data(tablet_replica1, 3)
# force the old master to restore at the latest backup.
restore_method(tablet_master)
# wait for it to catch up.
self._check_data(tablet_master, 3, 'former master catches up after restore')
def test_restore_old_master(self):
def _restore_using_kill(t):
t.kill_vttablet()
self._restore(t)
self._restore_old_master_test(_restore_using_kill)
def test_in_place_restore(self):
def _restore_in_place(t):
utils.run_vtctl(['RestoreFromBackup', t.tablet_alias], auto_log=True)
self._restore_old_master_test(_restore_in_place)
def test_terminated_restore(self):
stop_restore_msg = 'Copying file 10'
if use_xtrabackup:
stop_restore_msg = 'Restore: Preparing the files'
def _terminated_restore(t):
for e in utils.vtctld_connection.execute_vtctl_command(
['RestoreFromBackup', t.tablet_alias]):
logging.info('%s', e.value)
if stop_restore_msg in e.value:
break
utils.Vtctld().start()
# insert data on master, wait for slave to get it
tablet_master.mquery('vt_test_keyspace', self._create_vt_insert_test)
self._insert_data(tablet_master, 1)
self._check_data(tablet_replica1, 1, 'replica1 tablet getting data')
# backup the slave
utils.run_vtctl(['Backup', tablet_replica1.tablet_alias], auto_log=True)
# insert more data on the master
self._insert_data(tablet_master, 2)
# reparent to replica1
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/0',
'-new_master', tablet_replica1.tablet_alias])
# insert more data on new master
self._insert_data(tablet_replica1, 3)
# force the old master to restore at the latest backup, and terminate the restore
# when it is in the middle of copying the files
_terminated_restore(tablet_master)
# check that restore_file has been created but not deleted
restore_file = os.path.join(tablet_master.tablet_dir, 'restore_in_progress')
self.assertTrue(os.path.isfile(restore_file))
# now retry the restore
for e in utils.vtctld_connection.execute_vtctl_command(
['RestoreFromBackup', tablet_master.tablet_alias]):
logging.info('%s', e.value)
logging.info('waiting for restore to finish')
utils.wait_for_tablet_type(tablet_master.tablet_alias, 'replica', timeout=30)
# check that restore_file doesn't exist any more
self.assertFalse(os.path.isfile(restore_file))
# wait for it to catch up.
self._check_data(tablet_master, 3, 'former master catches up after restore')
if __name__ == '__main__':
utils.main()
|
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD Style.
"""Implementation of Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
import warnings
from ..externals.joblib import Parallel, delayed
from ..base import RegressorMixin
from ..base import ClassifierMixin
from ..feature_selection.selector_mixin import SelectorMixin
from .base import BaseSGD
from ..utils import atleast2d_or_csr, check_arrays
from ..utils.extmath import safe_sparse_dot
from ..utils import safe_asarray
from ..utils import deprecated
from .sgd_fast import plain_sgd as plain_sgd
from .sgd_fast import ArrayDataset, CSRDataset
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
def _make_dataset(X, y_i, sample_weight):
"""Returns Dataset object + intercept_decay"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = 0.01
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _tocsr(X):
"""Convert X to CSR matrix, preventing a copy if possible"""
if sp.isspmatrix_csr(X) and X.dtype == np.float64:
return X
else:
return sp.csr_matrix(X, dtype=np.float64)
class SGDClassifier(BaseSGD, ClassifierMixin, SelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD.
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'hinge' or 'log' or 'modified_huber'
The loss function to be used. Defaults to 'hinge'. The hinge loss is
a margin loss used by standard linear SVM models. The 'log' loss is
the loss of logistic regression models and can be used for
probability estimation in binary classifiers. 'modified_huber'
is another smooth loss that brings tolerance to outliers.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' migh bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
rho : float
The Elastic Net mixing parameter, with 0 < rho <= 1.
Defaults to 0.85.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
seed: int, optional
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level
n_jobs: integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(t+t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
eta0 : double
The initial learning rate [default 0.01].
power_t : double
The exponent for inverse scaling learning rate [default 0.25].
class_weight : dict, {class_label : weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [1, n_features] if n_classes == 2 else [n_classes,
n_features]
Weights assigned to the features.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, class_weight=None, eta0=0.0,
fit_intercept=True, learning_rate='optimal', loss='hinge',
n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, rho=0.85, seed=0,
shuffle=False, verbose=0, warm_start=False)
>>> print clf.predict([[-0.8, -1]])
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001,
rho=0.85, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, n_jobs=1, seed=0, learning_rate="optimal",
eta0=0.0, power_t=0.5, class_weight=None, warm_start=False):
super(SGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, rho=rho,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose, seed=seed,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
@property
@deprecated("to be removed in v0.12; use ``classes_`` instead.")
def classes(self):
return self.classes_
def _set_loss_function(self, loss):
"""Set concrete LossFunction."""
loss_functions = {
"hinge": Hinge(1.0),
"perceptron": Hinge(0.0),
"log": Log(),
"modified_huber": ModifiedHuber(),
}
try:
self.loss_function = loss_functions[loss]
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _set_class_weight(self, class_weight, classes, y):
"""Estimate class weights for unbalanced datasets."""
if class_weight is None:
# keep the old class_weight if none provided
class_weight = self.class_weight
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight == 'auto':
# proportional to the number of samples in the class
weight = np.array([1.0 / np.sum(y == i) for i in classes],
dtype=np.float64, order='C')
weight *= classes.shape[0] / np.sum(weight)
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
self._expanded_class_weight = weight
def _partial_fit(self, X, y, n_iter, classes=None, sample_weight=None):
X = safe_asarray(X, dtype=np.float64, order="C")
y = np.asarray(y)
n_samples, n_features = X.shape
self._check_fit_data(X, y)
if self.classes_ is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None and self.classes_ is not None:
if not np.all(self.classes_ == np.unique(classes)):
raise ValueError("`classes` is not the same as on last call "
"to partial_fit.")
elif classes is not None:
self.classes_ = classes
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._set_class_weight(self.class_weight, self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init=None, intercept_init=None)
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, sample_weight, n_iter)
elif n_classes == 2:
self._fit_binary(X, y, sample_weight, n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
self.t_ += n_iter * n_samples
return self
def partial_fit(self, X, y, classes=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if class_weight != None:
warnings.warn("Using 'class_weight' as a parameter to the 'fit'"
"method is deprecated. Set it on initialization instead.",
DeprecationWarning)
self.class_weight = class_weight
return self._partial_fit(X, y, n_iter=1, classes=classes,
sample_weight=sample_weight)
def fit(self, X, y, coef_init=None, intercept_init=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coeffients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if class_weight != None:
warnings.warn("Using 'class_weight' as a parameter to the 'fit'"
"method is deprecated. Set it on initialization instead.",
DeprecationWarning)
self.class_weight = class_weight
X = safe_asarray(X, dtype=np.float64, order="C")
y = np.asarray(y)
n_samples, n_features = X.shape
self._check_fit_data(X, y)
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
n_classes = classes.shape[0]
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
# Allocate datastructures from input arguments.
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
# Need to re-initialize in case of multiple call to fit.
self._init_t()
self._partial_fit(X, y, self.n_iter,
classes=classes,
sample_weight=sample_weight)
# fitting is over, we can now transform coef_ to fortran order
# for faster predictions
self._set_coef(self.coef_)
return self
def decision_function(self, X):
"""Predict signed 'distance' to the hyperplane (aka confidence score)
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples] if n_classes == 2 else [n_samples,n_classes]
The signed 'distances' to the hyperplane(s).
"""
X = atleast2d_or_csr(X)
scores = safe_sparse_dot(X, self.coef_.T) + self.intercept_
if self.classes_.shape[0] == 2:
return np.ravel(scores)
else:
return scores
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Array containing the predicted class labels.
"""
scores = self.decision_function(X)
if self.classes_.shape[0] == 2:
indices = np.array(scores > 0, dtype=np.int)
else:
indices = scores.argmax(axis=1)
return self.classes_[np.ravel(indices)]
def predict_proba(self, X):
"""Predict class membership probability
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples] if n_classes == 2 else [n_samples,
n_classes]
Contains the membership probabilities of the positive class.
"""
if len(self.classes_) != 2:
raise NotImplementedError("predict_(log_)proba only supported"
" for binary classification")
elif not isinstance(self.loss_function, Log):
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' (%s given)" % self.loss)
return 1.0 / (1.0 + np.exp(-self.decision_function(X)))
def _fit_binary(self, X, y, sample_weight, n_iter):
if sp.issparse(X):
X = _tocsr(X)
coef, intercept = fit_binary(self, 1, X, y, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
# need to be 2d
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
if sp.issparse(X):
X = _tocsr(X)
# Use joblib to fit OvA in parallel
result = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, n_iter,
self._expanded_class_weight[i], 1.,
sample_weight)
for i in xrange(len(self.classes_)))
for i, (coef, intercept) in enumerate(result):
self.coef_[i] = coef
self.intercept_[i] = intercept
def _prepare_fit_binary(est, y, i):
"""Common initialization for _fit_binary_{dense,sparse}.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
if len(est.classes_) == 2:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.coef_[i]
intercept = est.intercept_[i]
return y_i, coef, intercept
def fit_binary(est, i, X, y, n_iter, pos_weight, neg_weight,
sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
y_i, coef, intercept = _prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
return plain_sgd(coef, intercept, est.loss_function,
est.penalty_type, est.alpha, est.rho,
dataset, n_iter, est.fit_intercept,
est.verbose, est.shuffle, est.seed,
pos_weight, neg_weight,
est.learning_rate_code, est.eta0,
est.power_t, est.t_, intercept_decay)
class SGDRegressor(BaseSGD, RegressorMixin, SelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss' or 'huber'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' is an epsilon insensitive
loss function for robust regression.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' migh bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
rho : float
The Elastic Net mixing parameter, with 0 < rho <= 1.
Defaults to 0.85.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
seed: int, optional
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level.
p : float
Epsilon in the epsilon-insensitive huber loss function;
only if `loss=='huber'`.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(t+t0)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [n_features]
Weights asigned to the features.
`intercept_` : array, shape = [1]
The intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
SGDRegressor(alpha=0.0001, eta0=0.01, fit_intercept=True,
learning_rate='invscaling', loss='squared_loss', n_iter=5, p=0.1,
penalty='l2', power_t=0.25, rho=0.85, seed=0, shuffle=False,
verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
rho=0.85, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, p=0.1, seed=0, learning_rate="invscaling",
eta0=0.01, power_t=0.25, warm_start=False):
self.p = float(p)
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, rho=rho,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose, seed=seed,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=False)
def _set_loss_function(self, loss):
"""Get concrete LossFunction"""
loss_functions = {
"squared_loss": SquaredLoss(),
"huber": Huber(self.p),
}
try:
self.loss_function = loss_functions[loss]
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _partial_fit(self, X, y, n_iter, sample_weight=None,
coef_init=None, intercept_init=None):
X, y = check_arrays(X, y, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)
y = np.asarray(y, dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._check_fit_data(X, y)
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
self._fit_regressor(X, y, sample_weight, n_iter)
self.t_ += n_iter * n_samples
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, n_iter=1, sample_weight=sample_weight)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coeffients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
# Need to re-initialize in case of multiple call to fit.
self._init_t()
return self._partial_fit(X, y, self.n_iter, sample_weight,
coef_init, intercept_init)
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
X = atleast2d_or_csr(X)
scores = safe_sparse_dot(X, self.coef_) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
self.coef_, intercept = plain_sgd(self.coef_,
self.intercept_[0],
self.loss_function,
self.penalty_type,
self.alpha, self.rho,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
self.seed,
1.0, 1.0,
self.learning_rate_code,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.intercept_ = np.atleast_1d(intercept)
|
|
# Copyright 2015 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NSX data models.
This module defines data models used by the VMware NSX plugin family.
"""
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db import model_base
from neutron.db import models_v2
class TzNetworkBinding(model_base.BASEV2):
"""Represents a binding of a virtual network with a transport zone.
This model class associates a Neutron network with a transport zone;
optionally a vlan ID might be used if the binding type is 'bridge'
"""
__tablename__ = 'tz_network_bindings'
# TODO(arosen) - it might be worth while refactoring the how this data
# is stored later so every column does not need to be a primary key.
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# 'flat', 'vlan', stt' or 'gre'
binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='tz_network_bindings_binding_type'),
nullable=False, primary_key=True)
phy_uuid = sa.Column(sa.String(36), primary_key=True, default='')
vlan_id = sa.Column(sa.Integer, primary_key=True,
autoincrement=False, default=0)
def __init__(self, network_id, binding_type, phy_uuid, vlan_id):
self.network_id = network_id
self.binding_type = binding_type
self.phy_uuid = phy_uuid
self.vlan_id = vlan_id
def __repr__(self):
return "<NetworkBinding(%s,%s,%s,%s)>" % (self.network_id,
self.binding_type,
self.phy_uuid,
self.vlan_id)
class NeutronNsxNetworkMapping(model_base.BASEV2):
"""Maps neutron network identifiers to NSX identifiers.
Because of chained logical switches more than one mapping might exist
for a single Neutron network.
"""
__tablename__ = 'neutron_nsx_network_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
class NeutronNsxSecurityGroupMapping(model_base.BASEV2):
"""Backend mappings for Neutron Security Group identifiers.
This class maps a neutron security group identifier to the corresponding
NSX security profile identifier.
"""
__tablename__ = 'neutron_nsx_security_group_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('securitygroups.id',
ondelete="CASCADE"),
primary_key=True)
nsx_id = sa.Column(sa.String(36), primary_key=True)
class NeutronNsxPortMapping(model_base.BASEV2):
"""Represents the mapping between neutron and nsx port uuids."""
__tablename__ = 'neutron_nsx_port_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
nsx_switch_id = sa.Column(sa.String(36))
nsx_port_id = sa.Column(sa.String(36), nullable=False)
def __init__(self, neutron_id, nsx_switch_id, nsx_port_id):
self.neutron_id = neutron_id
self.nsx_switch_id = nsx_switch_id
self.nsx_port_id = nsx_port_id
class NeutronNsxRouterMapping(model_base.BASEV2):
"""Maps neutron router identifiers to NSX identifiers."""
__tablename__ = 'neutron_nsx_router_mappings'
neutron_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete='CASCADE'),
primary_key=True)
nsx_id = sa.Column(sa.String(36))
class MultiProviderNetworks(model_base.BASEV2):
"""Networks provisioned through multiprovider extension."""
__tablename__ = 'multi_provider_networks'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
def __init__(self, network_id):
self.network_id = network_id
class NetworkConnection(model_base.BASEV2, models_v2.HasTenant):
"""Defines a connection between a network gateway and a network."""
# We use port_id as the primary key as one can connect a gateway
# to a network in multiple ways (and we cannot use the same port form
# more than a single gateway)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'))
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete='CASCADE'))
segmentation_type = sa.Column(
sa.Enum('flat', 'vlan',
name='networkconnections_segmentation_type'))
segmentation_id = sa.Column(sa.Integer)
__table_args__ = (sa.UniqueConstraint(network_gateway_id,
segmentation_type,
segmentation_id),
model_base.BASEV2.__table_args__)
# Also, storing port id comes back useful when disconnecting a network
# from a gateway
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete='CASCADE'),
primary_key=True)
class NetworkGatewayDeviceReference(model_base.BASEV2):
id = sa.Column(sa.String(36), primary_key=True)
network_gateway_id = sa.Column(sa.String(36),
sa.ForeignKey('networkgateways.id',
ondelete='CASCADE'),
primary_key=True)
interface_name = sa.Column(sa.String(64), primary_key=True)
class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
nsx_id = sa.Column(sa.String(36))
# Optional name for the gateway device
name = sa.Column(sa.String(255))
# Transport connector type. Not using enum as range of
# connector types might vary with backend version
connector_type = sa.Column(sa.String(10))
# Transport connector IP Address
connector_ip = sa.Column(sa.String(64))
# operational status
status = sa.Column(sa.String(16))
class NetworkGateway(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Defines the data model for a network gateway."""
name = sa.Column(sa.String(255))
# Tenant id is nullable for this resource
tenant_id = sa.Column(sa.String(36))
default = sa.Column(sa.Boolean())
devices = orm.relationship(NetworkGatewayDeviceReference,
backref='networkgateways',
cascade='all,delete')
network_connections = orm.relationship(NetworkConnection, lazy='joined')
class MacLearningState(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model using the backref attribute.
# This will instruct SQLAlchemy to eagerly load this association.
port = orm.relationship(
models_v2.Port,
backref=orm.backref("mac_learning_state", lazy='joined',
uselist=False, cascade='delete'))
class LsnPort(models_v2.model_base.BASEV2):
__tablename__ = 'lsn_port'
lsn_port_id = sa.Column(sa.String(36), primary_key=True)
lsn_id = sa.Column(sa.String(36),
sa.ForeignKey('lsn.lsn_id', ondelete="CASCADE"),
nullable=False)
sub_id = sa.Column(sa.String(36), nullable=False, unique=True)
mac_addr = sa.Column(sa.String(32), nullable=False, unique=True)
def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id):
self.lsn_port_id = lsn_port_id
self.lsn_id = lsn_id
self.sub_id = subnet_id
self.mac_addr = mac_address
class Lsn(models_v2.model_base.BASEV2):
__tablename__ = 'lsn'
lsn_id = sa.Column(sa.String(36), primary_key=True)
net_id = sa.Column(sa.String(36), nullable=False)
def __init__(self, net_id, lsn_id):
self.net_id = net_id
self.lsn_id = lsn_id
class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
name = sa.Column(sa.String(255))
default = sa.Column(sa.Boolean, default=False, server_default=sql.false())
min = sa.Column(sa.Integer, nullable=False)
max = sa.Column(sa.Integer, nullable=True)
qos_marking = sa.Column(sa.Enum('untrusted', 'trusted',
name='qosqueues_qos_marking'))
dscp = sa.Column(sa.Integer)
class PortQueueMapping(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey("ports.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"),
primary_key=True)
# Add a relationship to the Port model adding a backref which will
# allow SQLAlchemy for eagerly load the queue binding
port = orm.relationship(
models_v2.Port,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
class NetworkQueueMapping(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete="CASCADE"),
primary_key=True)
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id",
ondelete="CASCADE"))
# Add a relationship to the Network model adding a backref which will
# allow SQLAlcremy for eagerly load the queue binding
network = orm.relationship(
models_v2.Network,
backref=orm.backref("qos_queue", uselist=False,
cascade='delete', lazy='joined'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.